Merge branch 'irq-pio'

Conflicts:

	drivers/scsi/libata-core.c
	include/linux/libata.h
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index d23f002..35487e3 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -71,6 +71,7 @@
 	AHCI_CMD_CLR_BUSY	= (1 << 10),
 
 	RX_FIS_D2H_REG		= 0x40,	/* offset of D2H Register FIS data */
+	RX_FIS_UNK		= 0x60, /* offset of Unknown FIS data */
 
 	board_ahci		= 0,
 	board_ahci_vt8251	= 1,
@@ -128,15 +129,16 @@
 	PORT_IRQ_PIOS_FIS	= (1 << 1), /* PIO Setup FIS rx'd */
 	PORT_IRQ_D2H_REG_FIS	= (1 << 0), /* D2H Register FIS rx'd */
 
-	PORT_IRQ_FATAL		= PORT_IRQ_TF_ERR |
-				  PORT_IRQ_HBUS_ERR |
-				  PORT_IRQ_HBUS_DATA_ERR |
-				  PORT_IRQ_IF_ERR,
-	DEF_PORT_IRQ		= PORT_IRQ_FATAL | PORT_IRQ_PHYRDY |
-				  PORT_IRQ_CONNECT | PORT_IRQ_SG_DONE |
-				  PORT_IRQ_UNK_FIS | PORT_IRQ_SDB_FIS |
-				  PORT_IRQ_DMAS_FIS | PORT_IRQ_PIOS_FIS |
-				  PORT_IRQ_D2H_REG_FIS,
+	PORT_IRQ_FREEZE		= PORT_IRQ_HBUS_ERR |
+				  PORT_IRQ_IF_ERR |
+				  PORT_IRQ_CONNECT |
+				  PORT_IRQ_UNK_FIS,
+	PORT_IRQ_ERROR		= PORT_IRQ_FREEZE |
+				  PORT_IRQ_TF_ERR |
+				  PORT_IRQ_HBUS_DATA_ERR,
+	DEF_PORT_IRQ		= PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
+				  PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
+				  PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
 
 	/* PORT_CMD bits */
 	PORT_CMD_ATAPI		= (1 << 24), /* Device is ATAPI */
@@ -197,13 +199,15 @@
 static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
 static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes);
 static void ahci_irq_clear(struct ata_port *ap);
-static void ahci_eng_timeout(struct ata_port *ap);
 static int ahci_port_start(struct ata_port *ap);
 static void ahci_port_stop(struct ata_port *ap);
 static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
 static void ahci_qc_prep(struct ata_queued_cmd *qc);
 static u8 ahci_check_status(struct ata_port *ap);
-static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
+static void ahci_freeze(struct ata_port *ap);
+static void ahci_thaw(struct ata_port *ap);
+static void ahci_error_handler(struct ata_port *ap);
+static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
 static void ahci_remove_one (struct pci_dev *pdev);
 
 static struct scsi_host_template ahci_sht = {
@@ -237,14 +241,18 @@
 	.qc_prep		= ahci_qc_prep,
 	.qc_issue		= ahci_qc_issue,
 
-	.eng_timeout		= ahci_eng_timeout,
-
 	.irq_handler		= ahci_interrupt,
 	.irq_clear		= ahci_irq_clear,
 
 	.scr_read		= ahci_scr_read,
 	.scr_write		= ahci_scr_write,
 
+	.freeze			= ahci_freeze,
+	.thaw			= ahci_thaw,
+
+	.error_handler		= ahci_error_handler,
+	.post_internal_cmd	= ahci_post_internal_cmd,
+
 	.port_start		= ahci_port_start,
 	.port_stop		= ahci_port_stop,
 };
@@ -567,7 +575,7 @@
 
 	DPRINTK("ENTER\n");
 
-	if (!sata_dev_present(ap)) {
+	if (ata_port_offline(ap)) {
 		DPRINTK("PHY reports no device\n");
 		*class = ATA_DEV_NONE;
 		return 0;
@@ -597,7 +605,7 @@
 	/* restart engine */
 	ahci_start_engine(ap);
 
-	ata_tf_init(ap, &tf, 0);
+	ata_tf_init(ap->device, &tf);
 	fis = pp->cmd_tbl;
 
 	/* issue the first D2H Register FIS */
@@ -640,7 +648,7 @@
 	msleep(150);
 
 	*class = ATA_DEV_NONE;
-	if (sata_dev_present(ap)) {
+	if (ata_port_online(ap)) {
 		if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
 			rc = -EIO;
 			reason = "device not ready";
@@ -655,8 +663,7 @@
  fail_restart:
 	ahci_start_engine(ap);
  fail:
-	printk(KERN_ERR "ata%u: softreset failed (%s)\n",
-	       ap->id, reason);
+	ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
 	return rc;
 }
 
@@ -670,7 +677,7 @@
 	rc = sata_std_hardreset(ap, class);
 	ahci_start_engine(ap);
 
-	if (rc == 0)
+	if (rc == 0 && ata_port_online(ap))
 		*class = ahci_dev_classify(ap);
 	if (*class == ATA_DEV_UNKNOWN)
 		*class = ATA_DEV_NONE;
@@ -790,109 +797,108 @@
 	ahci_fill_cmd_slot(pp, opts);
 }
 
-static void ahci_restart_port(struct ata_port *ap, u32 irq_stat)
+static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
 {
-	void __iomem *mmio = ap->host_set->mmio_base;
-	void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
-	u32 tmp;
+	struct ahci_port_priv *pp = ap->private_data;
+	struct ata_eh_info *ehi = &ap->eh_info;
+	unsigned int err_mask = 0, action = 0;
+	struct ata_queued_cmd *qc;
+	u32 serror;
 
-	if ((ap->device[0].class != ATA_DEV_ATAPI) ||
-	    ((irq_stat & PORT_IRQ_TF_ERR) == 0))
-		printk(KERN_WARNING "ata%u: port reset, "
-		       "p_is %x is %x pis %x cmd %x tf %x ss %x se %x\n",
-			ap->id,
-			irq_stat,
-			readl(mmio + HOST_IRQ_STAT),
-			readl(port_mmio + PORT_IRQ_STAT),
-			readl(port_mmio + PORT_CMD),
-			readl(port_mmio + PORT_TFDATA),
-			readl(port_mmio + PORT_SCR_STAT),
-			readl(port_mmio + PORT_SCR_ERR));
+	ata_ehi_clear_desc(ehi);
 
-	/* stop DMA */
-	ahci_stop_engine(ap);
+	/* AHCI needs SError cleared; otherwise, it might lock up */
+	serror = ahci_scr_read(ap, SCR_ERROR);
+	ahci_scr_write(ap, SCR_ERROR, serror);
 
-	/* clear SATA phy error, if any */
-	tmp = readl(port_mmio + PORT_SCR_ERR);
-	writel(tmp, port_mmio + PORT_SCR_ERR);
+	/* analyze @irq_stat */
+	ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
 
-	/* if DRQ/BSY is set, device needs to be reset.
-	 * if so, issue COMRESET
-	 */
-	tmp = readl(port_mmio + PORT_TFDATA);
-	if (tmp & (ATA_BUSY | ATA_DRQ)) {
-		writel(0x301, port_mmio + PORT_SCR_CTL);
-		readl(port_mmio + PORT_SCR_CTL); /* flush */
-		udelay(10);
-		writel(0x300, port_mmio + PORT_SCR_CTL);
-		readl(port_mmio + PORT_SCR_CTL); /* flush */
+	if (irq_stat & PORT_IRQ_TF_ERR)
+		err_mask |= AC_ERR_DEV;
+
+	if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
+		err_mask |= AC_ERR_HOST_BUS;
+		action |= ATA_EH_SOFTRESET;
 	}
 
-	/* re-start DMA */
-	ahci_start_engine(ap);
-}
+	if (irq_stat & PORT_IRQ_IF_ERR) {
+		err_mask |= AC_ERR_ATA_BUS;
+		action |= ATA_EH_SOFTRESET;
+		ata_ehi_push_desc(ehi, ", interface fatal error");
+	}
 
-static void ahci_eng_timeout(struct ata_port *ap)
-{
-	struct ata_host_set *host_set = ap->host_set;
-	void __iomem *mmio = host_set->mmio_base;
-	void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
-	struct ata_queued_cmd *qc;
-	unsigned long flags;
+	if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
+		err_mask |= AC_ERR_ATA_BUS;
+		action |= ATA_EH_SOFTRESET;
+		ata_ehi_push_desc(ehi, ", %s", irq_stat & PORT_IRQ_CONNECT ?
+			"connection status changed" : "PHY RDY changed");
+	}
 
-	printk(KERN_WARNING "ata%u: handling error/timeout\n", ap->id);
+	if (irq_stat & PORT_IRQ_UNK_FIS) {
+		u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
 
-	spin_lock_irqsave(&host_set->lock, flags);
+		err_mask |= AC_ERR_HSM;
+		action |= ATA_EH_SOFTRESET;
+		ata_ehi_push_desc(ehi, ", unknown FIS %08x %08x %08x %08x",
+				  unk[0], unk[1], unk[2], unk[3]);
+	}
 
-	ahci_restart_port(ap, readl(port_mmio + PORT_IRQ_STAT));
+	/* okay, let's hand over to EH */
+	ehi->serror |= serror;
+	ehi->action |= action;
+
 	qc = ata_qc_from_tag(ap, ap->active_tag);
-	qc->err_mask |= AC_ERR_TIMEOUT;
+	if (qc)
+		qc->err_mask |= err_mask;
+	else
+		ehi->err_mask |= err_mask;
 
-	spin_unlock_irqrestore(&host_set->lock, flags);
-
-	ata_eh_qc_complete(qc);
+	if (irq_stat & PORT_IRQ_FREEZE)
+		ata_port_freeze(ap);
+	else
+		ata_port_abort(ap);
 }
 
-static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
+static void ahci_host_intr(struct ata_port *ap)
 {
 	void __iomem *mmio = ap->host_set->mmio_base;
 	void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
-	u32 status, serr, ci;
-
-	serr = readl(port_mmio + PORT_SCR_ERR);
-	writel(serr, port_mmio + PORT_SCR_ERR);
+	struct ata_queued_cmd *qc;
+	u32 status, ci;
 
 	status = readl(port_mmio + PORT_IRQ_STAT);
 	writel(status, port_mmio + PORT_IRQ_STAT);
 
-	ci = readl(port_mmio + PORT_CMD_ISSUE);
-	if (likely((ci & 0x1) == 0)) {
-		if (qc) {
-			WARN_ON(qc->err_mask);
+	if (unlikely(status & PORT_IRQ_ERROR)) {
+		ahci_error_intr(ap, status);
+		return;
+	}
+
+	if ((qc = ata_qc_from_tag(ap, ap->active_tag))) {
+		ci = readl(port_mmio + PORT_CMD_ISSUE);
+		if ((ci & 0x1) == 0) {
 			ata_qc_complete(qc);
-			qc = NULL;
+			return;
 		}
 	}
 
-	if (status & PORT_IRQ_FATAL) {
-		unsigned int err_mask;
-		if (status & PORT_IRQ_TF_ERR)
-			err_mask = AC_ERR_DEV;
-		else if (status & PORT_IRQ_IF_ERR)
-			err_mask = AC_ERR_ATA_BUS;
-		else
-			err_mask = AC_ERR_HOST_BUS;
+	/* hmmm... a spurious interupt */
 
-		/* command processing has stopped due to error; restart */
-		ahci_restart_port(ap, status);
+	/* ignore interim PIO setup fis interrupts */
+	if (ata_tag_valid(ap->active_tag)) {
+		struct ata_queued_cmd *qc =
+			ata_qc_from_tag(ap, ap->active_tag);
 
-		if (qc) {
-			qc->err_mask |= err_mask;
-			ata_qc_complete(qc);
-		}
+		if (qc && qc->tf.protocol == ATA_PROT_PIO &&
+		    (status & PORT_IRQ_PIOS_FIS))
+			return;
 	}
 
-	return 1;
+	if (ata_ratelimit())
+		ata_port_printk(ap, KERN_INFO, "spurious interrupt "
+				"(irq_stat 0x%x active_tag %d)\n",
+				status, ap->active_tag);
 }
 
 static void ahci_irq_clear(struct ata_port *ap)
@@ -929,14 +935,7 @@
 
 		ap = host_set->ports[i];
 		if (ap) {
-			struct ata_queued_cmd *qc;
-			qc = ata_qc_from_tag(ap, ap->active_tag);
-			if (!ahci_host_intr(ap, qc))
-				if (ata_ratelimit())
-					dev_printk(KERN_WARNING, host_set->dev,
-					  "unhandled interrupt on port %u\n",
-					  i);
-
+			ahci_host_intr(ap);
 			VPRINTK("port %u\n", i);
 		} else {
 			VPRINTK("port %u (no irq)\n", i);
@@ -953,7 +952,7 @@
 		handled = 1;
 	}
 
-        spin_unlock(&host_set->lock);
+	spin_unlock(&host_set->lock);
 
 	VPRINTK("EXIT\n");
 
@@ -971,6 +970,56 @@
 	return 0;
 }
 
+static void ahci_freeze(struct ata_port *ap)
+{
+	void __iomem *mmio = ap->host_set->mmio_base;
+	void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
+
+	/* turn IRQ off */
+	writel(0, port_mmio + PORT_IRQ_MASK);
+}
+
+static void ahci_thaw(struct ata_port *ap)
+{
+	void __iomem *mmio = ap->host_set->mmio_base;
+	void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
+	u32 tmp;
+
+	/* clear IRQ */
+	tmp = readl(port_mmio + PORT_IRQ_STAT);
+	writel(tmp, port_mmio + PORT_IRQ_STAT);
+	writel(1 << ap->id, mmio + HOST_IRQ_STAT);
+
+	/* turn IRQ back on */
+	writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK);
+}
+
+static void ahci_error_handler(struct ata_port *ap)
+{
+	if (!(ap->flags & ATA_FLAG_FROZEN)) {
+		/* restart engine */
+		ahci_stop_engine(ap);
+		ahci_start_engine(ap);
+	}
+
+	/* perform recovery */
+	ata_do_eh(ap, ahci_softreset, ahci_hardreset, ahci_postreset);
+}
+
+static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+
+	if (qc->flags & ATA_QCFLAG_FAILED)
+		qc->err_mask |= AC_ERR_OTHER;
+
+	if (qc->err_mask) {
+		/* make DMA engine forget about the failed command */
+		ahci_stop_engine(ap);
+		ahci_start_engine(ap);
+	}
+}
+
 static void ahci_setup_port(struct ata_ioports *port, unsigned long base,
 			    unsigned int port_idx)
 {
@@ -1115,9 +1164,6 @@
 			writel(tmp, port_mmio + PORT_IRQ_STAT);
 
 		writel(1 << i, mmio + HOST_IRQ_STAT);
-
-		/* set irq mask (enables interrupts) */
-		writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK);
 	}
 
 	tmp = readl(mmio + HOST_CTL);
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index 62dabf7..e3184a7 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -243,7 +243,10 @@
 	.qc_prep		= ata_qc_prep,
 	.qc_issue		= ata_qc_issue_prot,
 
-	.eng_timeout		= ata_eng_timeout,
+	.freeze			= ata_bmdma_freeze,
+	.thaw			= ata_bmdma_thaw,
+	.error_handler		= ata_bmdma_error_handler,
+	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
 
 	.irq_handler		= ata_interrupt,
 	.irq_clear		= ata_bmdma_irq_clear,
@@ -271,7 +274,10 @@
 	.qc_prep		= ata_qc_prep,
 	.qc_issue		= ata_qc_issue_prot,
 
-	.eng_timeout		= ata_eng_timeout,
+	.freeze			= ata_bmdma_freeze,
+	.thaw			= ata_bmdma_thaw,
+	.error_handler		= ata_bmdma_error_handler,
+	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
 
 	.irq_handler		= ata_interrupt,
 	.irq_clear		= ata_bmdma_irq_clear,
@@ -484,7 +490,7 @@
 	struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
 
 	if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->hard_port_no])) {
-		printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
+		ata_port_printk(ap, KERN_INFO, "port disabled. ignoring.\n");
 		return 0;
 	}
 
@@ -565,7 +571,7 @@
 static int piix_sata_probe_reset(struct ata_port *ap, unsigned int *classes)
 {
 	if (!piix_sata_probe(ap)) {
-		printk(KERN_INFO "ata%u: SATA port has no device.\n", ap->id);
+		ata_port_printk(ap, KERN_INFO, "SATA port has no device.\n");
 		return 0;
 	}
 
diff --git a/drivers/scsi/libata-bmdma.c b/drivers/scsi/libata-bmdma.c
index 835dff0..49eff18 100644
--- a/drivers/scsi/libata-bmdma.c
+++ b/drivers/scsi/libata-bmdma.c
@@ -652,6 +652,150 @@
 	ata_altstatus(ap);        /* dummy read */
 }
 
+/**
+ *	ata_bmdma_freeze - Freeze BMDMA controller port
+ *	@ap: port to freeze
+ *
+ *	Freeze BMDMA controller port.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+void ata_bmdma_freeze(struct ata_port *ap)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+
+	ap->ctl |= ATA_NIEN;
+	ap->last_ctl = ap->ctl;
+
+	if (ap->flags & ATA_FLAG_MMIO)
+		writeb(ap->ctl, (void __iomem *)ioaddr->ctl_addr);
+	else
+		outb(ap->ctl, ioaddr->ctl_addr);
+}
+
+/**
+ *	ata_bmdma_thaw - Thaw BMDMA controller port
+ *	@ap: port to thaw
+ *
+ *	Thaw BMDMA controller port.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+void ata_bmdma_thaw(struct ata_port *ap)
+{
+	/* clear & re-enable interrupts */
+	ata_chk_status(ap);
+	ap->ops->irq_clear(ap);
+	if (ap->ioaddr.ctl_addr)	/* FIXME: hack. create a hook instead */
+		ata_irq_on(ap);
+}
+
+/**
+ *	ata_bmdma_drive_eh - Perform EH with given methods for BMDMA controller
+ *	@ap: port to handle error for
+ *	@softreset: softreset method (can be NULL)
+ *	@hardreset: hardreset method (can be NULL)
+ *	@postreset: postreset method (can be NULL)
+ *
+ *	Handle error for ATA BMDMA controller.  It can handle both
+ *	PATA and SATA controllers.  Many controllers should be able to
+ *	use this EH as-is or with some added handling before and
+ *	after.
+ *
+ *	This function is intended to be used for constructing
+ *	->error_handler callback by low level drivers.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ */
+void ata_bmdma_drive_eh(struct ata_port *ap, ata_reset_fn_t softreset,
+			ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
+{
+	struct ata_host_set *host_set = ap->host_set;
+	struct ata_eh_context *ehc = &ap->eh_context;
+	struct ata_queued_cmd *qc;
+	unsigned long flags;
+	int thaw = 0;
+
+	qc = __ata_qc_from_tag(ap, ap->active_tag);
+	if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
+		qc = NULL;
+
+	/* reset PIO HSM and stop DMA engine */
+	spin_lock_irqsave(&host_set->lock, flags);
+
+	ap->flags &= ~ATA_FLAG_NOINTR;
+	ap->hsm_task_state = HSM_ST_IDLE;
+
+	if (qc && (qc->tf.protocol == ATA_PROT_DMA ||
+		   qc->tf.protocol == ATA_PROT_ATAPI_DMA)) {
+		u8 host_stat;
+
+		host_stat = ata_bmdma_status(ap);
+
+		ata_ehi_push_desc(&ehc->i, "BMDMA stat 0x%x", host_stat);
+
+		/* BMDMA controllers indicate host bus error by
+		 * setting DMA_ERR bit and timing out.  As it wasn't
+		 * really a timeout event, adjust error mask and
+		 * cancel frozen state.
+		 */
+		if (qc->err_mask == AC_ERR_TIMEOUT && host_stat & ATA_DMA_ERR) {
+			qc->err_mask = AC_ERR_HOST_BUS;
+			thaw = 1;
+		}
+
+		ap->ops->bmdma_stop(qc);
+	}
+
+	ata_altstatus(ap);
+	ata_chk_status(ap);
+	ap->ops->irq_clear(ap);
+
+	spin_unlock_irqrestore(&host_set->lock, flags);
+
+	if (thaw)
+		ata_eh_thaw_port(ap);
+
+	/* PIO and DMA engines have been stopped, perform recovery */
+	ata_do_eh(ap, softreset, hardreset, postreset);
+}
+
+/**
+ *	ata_bmdma_error_handler - Stock error handler for BMDMA controller
+ *	@ap: port to handle error for
+ *
+ *	Stock error handler for BMDMA controller.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ */
+void ata_bmdma_error_handler(struct ata_port *ap)
+{
+	ata_reset_fn_t hardreset;
+
+	hardreset = NULL;
+	if (sata_scr_valid(ap))
+		hardreset = sata_std_hardreset;
+
+	ata_bmdma_drive_eh(ap, ata_std_softreset, hardreset, ata_std_postreset);
+}
+
+/**
+ *	ata_bmdma_post_internal_cmd - Stock post_internal_cmd for
+ *				      BMDMA controller
+ *	@qc: internal command to clean up
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ */
+void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+	ata_bmdma_stop(qc);
+}
+
 #ifdef CONFIG_PCI
 static struct ata_probe_ent *
 ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 5d38a6c..c859b96 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -61,13 +61,10 @@
 
 #include "libata.h"
 
-static unsigned int ata_dev_init_params(struct ata_port *ap,
-					struct ata_device *dev,
-					u16 heads,
-					u16 sectors);
-static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
-					 struct ata_device *dev);
-static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev);
+static unsigned int ata_dev_init_params(struct ata_device *dev,
+					u16 heads, u16 sectors);
+static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
+static void ata_dev_xfermask(struct ata_device *dev);
 
 static unsigned int ata_unique_id = 1;
 static struct workqueue_struct *ata_wq;
@@ -412,11 +409,10 @@
 	return spd_str[spd - 1];
 }
 
-void ata_dev_disable(struct ata_port *ap, struct ata_device *dev)
+void ata_dev_disable(struct ata_device *dev)
 {
 	if (ata_dev_enabled(dev)) {
-		printk(KERN_WARNING "ata%u: dev %u disabled\n",
-		       ap->id, dev->devno);
+		ata_dev_printk(dev, KERN_WARNING, "disabled\n");
 		dev->class++;
 	}
 }
@@ -955,13 +951,11 @@
 {
 	struct completion *waiting = qc->private_data;
 
-	qc->ap->ops->tf_read(qc->ap, &qc->tf);
 	complete(waiting);
 }
 
 /**
  *	ata_exec_internal - execute libata internal command
- *	@ap: Port to which the command is sent
  *	@dev: Device to which the command is sent
  *	@tf: Taskfile registers for the command and the result
  *	@cdb: CDB for packet command
@@ -979,24 +973,57 @@
  *	None.  Should be called with kernel context, might sleep.
  */
 
-unsigned ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
+unsigned ata_exec_internal(struct ata_device *dev,
 			   struct ata_taskfile *tf, const u8 *cdb,
 			   int dma_dir, void *buf, unsigned int buflen)
 {
+	struct ata_port *ap = dev->ap;
 	u8 command = tf->command;
 	struct ata_queued_cmd *qc;
+	unsigned int tag, preempted_tag;
 	DECLARE_COMPLETION(wait);
 	unsigned long flags;
 	unsigned int err_mask;
+	int rc;
 
 	spin_lock_irqsave(&ap->host_set->lock, flags);
 
-	qc = ata_qc_new_init(ap, dev);
-	BUG_ON(qc == NULL);
+	/* no internal command while frozen */
+	if (ap->flags & ATA_FLAG_FROZEN) {
+		spin_unlock_irqrestore(&ap->host_set->lock, flags);
+		return AC_ERR_SYSTEM;
+	}
 
+	/* initialize internal qc */
+
+	/* XXX: Tag 0 is used for drivers with legacy EH as some
+	 * drivers choke if any other tag is given.  This breaks
+	 * ata_tag_internal() test for those drivers.  Don't use new
+	 * EH stuff without converting to it.
+	 */
+	if (ap->ops->error_handler)
+		tag = ATA_TAG_INTERNAL;
+	else
+		tag = 0;
+
+	if (test_and_set_bit(tag, &ap->qactive))
+		BUG();
+	qc = __ata_qc_from_tag(ap, tag);
+
+	qc->tag = tag;
+	qc->scsicmd = NULL;
+	qc->ap = ap;
+	qc->dev = dev;
+	ata_qc_reinit(qc);
+
+	preempted_tag = ap->active_tag;
+	ap->active_tag = ATA_TAG_POISON;
+
+	/* prepare & issue qc */
 	qc->tf = *tf;
 	if (cdb)
 		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
+	qc->flags |= ATA_QCFLAG_RESULT_TF;
 	qc->dma_dir = dma_dir;
 	if (dma_dir != DMA_NONE) {
 		ata_sg_init_one(qc, buf, buflen);
@@ -1010,31 +1037,51 @@
 
 	spin_unlock_irqrestore(&ap->host_set->lock, flags);
 
-	if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) {
-		ata_port_flush_task(ap);
+	rc = wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL);
 
+	ata_port_flush_task(ap);
+
+	if (!rc) {
 		spin_lock_irqsave(&ap->host_set->lock, flags);
 
 		/* We're racing with irq here.  If we lose, the
 		 * following test prevents us from completing the qc
-		 * again.  If completion irq occurs after here but
-		 * before the caller cleans up, it will result in a
-		 * spurious interrupt.  We can live with that.
+		 * twice.  If we win, the port is frozen and will be
+		 * cleaned up by ->post_internal_cmd().
 		 */
 		if (qc->flags & ATA_QCFLAG_ACTIVE) {
-			qc->err_mask = AC_ERR_TIMEOUT;
-			ata_qc_complete(qc);
-			printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
-			       ap->id, command);
+			qc->err_mask |= AC_ERR_TIMEOUT;
+
+			if (ap->ops->error_handler)
+				ata_port_freeze(ap);
+			else
+				ata_qc_complete(qc);
+
+			ata_dev_printk(dev, KERN_WARNING,
+				       "qc timeout (cmd 0x%x)\n", command);
 		}
 
 		spin_unlock_irqrestore(&ap->host_set->lock, flags);
 	}
 
-	*tf = qc->tf;
+	/* do post_internal_cmd */
+	if (ap->ops->post_internal_cmd)
+		ap->ops->post_internal_cmd(qc);
+
+	if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) {
+		ata_dev_printk(dev, KERN_WARNING, "zero err_mask for failed "
+			       "internal command, assuming AC_ERR_OTHER\n");
+		qc->err_mask |= AC_ERR_OTHER;
+	}
+
+	/* finish up */
+	spin_lock_irqsave(&ap->host_set->lock, flags);
+
+	*tf = qc->result_tf;
 	err_mask = qc->err_mask;
 
 	ata_qc_free(qc);
+	ap->active_tag = preempted_tag;
 
 	/* XXX - Some LLDDs (sata_mv) disable port on command failure.
 	 * Until those drivers are fixed, we detect the condition
@@ -1052,6 +1099,8 @@
 		ata_port_probe(ap);
 	}
 
+	spin_unlock_irqrestore(&ap->host_set->lock, flags);
+
 	return err_mask;
 }
 
@@ -1090,11 +1139,10 @@
 
 /**
  *	ata_dev_read_id - Read ID data from the specified device
- *	@ap: port on which target device resides
  *	@dev: target device
  *	@p_class: pointer to class of the target device (may be changed)
  *	@post_reset: is this read ID post-reset?
- *	@p_id: read IDENTIFY page (newly allocated)
+ *	@id: buffer to read IDENTIFY data into
  *
  *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
  *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
@@ -1107,13 +1155,13 @@
  *	RETURNS:
  *	0 on success, -errno otherwise.
  */
-static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
-			   unsigned int *p_class, int post_reset, u16 **p_id)
+static int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
+			   int post_reset, u16 *id)
 {
+	struct ata_port *ap = dev->ap;
 	unsigned int class = *p_class;
 	struct ata_taskfile tf;
 	unsigned int err_mask = 0;
-	u16 *id;
 	const char *reason;
 	int rc;
 
@@ -1121,15 +1169,8 @@
 
 	ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
 
-	id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
-	if (id == NULL) {
-		rc = -ENOMEM;
-		reason = "out of memory";
-		goto err_out;
-	}
-
  retry:
-	ata_tf_init(ap, &tf, dev->devno);
+	ata_tf_init(dev, &tf);
 
 	switch (class) {
 	case ATA_DEV_ATA:
@@ -1146,7 +1187,7 @@
 
 	tf.protocol = ATA_PROT_PIO;
 
-	err_mask = ata_exec_internal(ap, dev, &tf, NULL, DMA_FROM_DEVICE,
+	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
 				     id, sizeof(id[0]) * ATA_ID_WORDS);
 	if (err_mask) {
 		rc = -EIO;
@@ -1173,7 +1214,7 @@
 		 * Some drives were very specific about that exact sequence.
 		 */
 		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
-			err_mask = ata_dev_init_params(ap, dev, id[3], id[6]);
+			err_mask = ata_dev_init_params(dev, id[3], id[6]);
 			if (err_mask) {
 				rc = -EIO;
 				reason = "INIT_DEV_PARAMS failed";
@@ -1189,25 +1230,22 @@
 	}
 
 	*p_class = class;
-	*p_id = id;
+
 	return 0;
 
  err_out:
-	printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n",
-	       ap->id, dev->devno, reason);
-	kfree(id);
+	ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
+		       "(%s, err_mask=0x%x)\n", reason, err_mask);
 	return rc;
 }
 
-static inline u8 ata_dev_knobble(const struct ata_port *ap,
-				 struct ata_device *dev)
+static inline u8 ata_dev_knobble(struct ata_device *dev)
 {
-	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
+	return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
 }
 
 /**
  *	ata_dev_configure - Configure the specified ATA/ATAPI device
- *	@ap: Port on which target device resides
  *	@dev: Target device to configure
  *	@print_info: Enable device info printout
  *
@@ -1220,9 +1258,9 @@
  *	RETURNS:
  *	0 on success, -errno otherwise
  */
-static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
-			     int print_info)
+static int ata_dev_configure(struct ata_device *dev, int print_info)
 {
+	struct ata_port *ap = dev->ap;
 	const u16 *id = dev->id;
 	unsigned int xfer_mask;
 	int i, rc;
@@ -1237,10 +1275,10 @@
 
 	/* print device capabilities */
 	if (print_info)
-		printk(KERN_DEBUG "ata%u: dev %u cfg 49:%04x 82:%04x 83:%04x "
-		       "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
-		       ap->id, dev->devno, id[49], id[82], id[83],
-		       id[84], id[85], id[86], id[87], id[88]);
+		ata_dev_printk(dev, KERN_DEBUG, "cfg 49:%04x 82:%04x 83:%04x "
+			       "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
+			       id[49], id[82], id[83], id[84],
+			       id[85], id[86], id[87], id[88]);
 
 	/* initialize to-be-configured parameters */
 	dev->flags &= ~ATA_DFLAG_CFG_MASK;
@@ -1276,13 +1314,12 @@
 
 			/* print device info to dmesg */
 			if (print_info)
-				printk(KERN_INFO "ata%u: dev %u ATA-%d, "
-				       "max %s, %Lu sectors: %s\n",
-				       ap->id, dev->devno,
-				       ata_id_major_version(id),
-				       ata_mode_string(xfer_mask),
-				       (unsigned long long)dev->n_sectors,
-				       lba_desc);
+				ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
+					"max %s, %Lu sectors: %s\n",
+					ata_id_major_version(id),
+					ata_mode_string(xfer_mask),
+					(unsigned long long)dev->n_sectors,
+					lba_desc);
 		} else {
 			/* CHS */
 
@@ -1300,13 +1337,12 @@
 
 			/* print device info to dmesg */
 			if (print_info)
-				printk(KERN_INFO "ata%u: dev %u ATA-%d, "
-				       "max %s, %Lu sectors: CHS %u/%u/%u\n",
-				       ap->id, dev->devno,
-				       ata_id_major_version(id),
-				       ata_mode_string(xfer_mask),
-				       (unsigned long long)dev->n_sectors,
-				       dev->cylinders, dev->heads, dev->sectors);
+				ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
+					"max %s, %Lu sectors: CHS %u/%u/%u\n",
+					ata_id_major_version(id),
+					ata_mode_string(xfer_mask),
+					(unsigned long long)dev->n_sectors,
+					dev->cylinders, dev->heads, dev->sectors);
 		}
 
 		if (dev->id[59] & 0x100) {
@@ -1324,7 +1360,8 @@
 
 		rc = atapi_cdb_len(id);
 		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
-			printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
+			ata_dev_printk(dev, KERN_WARNING,
+				       "unsupported CDB len\n");
 			rc = -EINVAL;
 			goto err_out_nosup;
 		}
@@ -1337,9 +1374,9 @@
 
 		/* print device info to dmesg */
 		if (print_info)
-			printk(KERN_INFO "ata%u: dev %u ATAPI, max %s%s\n",
-			       ap->id, dev->devno, ata_mode_string(xfer_mask),
-			       cdb_intr_string);
+			ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
+				       ata_mode_string(xfer_mask),
+				       cdb_intr_string);
 	}
 
 	ap->host->max_cmd_len = 0;
@@ -1349,10 +1386,10 @@
 					      ap->device[i].cdb_len);
 
 	/* limit bridge transfers to udma5, 200 sectors */
-	if (ata_dev_knobble(ap, dev)) {
+	if (ata_dev_knobble(dev)) {
 		if (print_info)
-			printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
-			       ap->id, dev->devno);
+			ata_dev_printk(dev, KERN_INFO,
+				       "applying bridge limits\n");
 		dev->udma_mask &= ATA_UDMA5;
 		dev->max_sectors = ATA_MAX_SECTORS;
 	}
@@ -1405,15 +1442,18 @@
 	if (ap->ops->probe_reset) {
 		rc = ap->ops->probe_reset(ap, classes);
 		if (rc) {
-			printk("ata%u: reset failed (errno=%d)\n", ap->id, rc);
+			ata_port_printk(ap, KERN_ERR,
+					"reset failed (errno=%d)\n", rc);
 			return rc;
 		}
 	} else {
 		ap->ops->phy_reset(ap);
 
-		if (!(ap->flags & ATA_FLAG_DISABLED))
-			for (i = 0; i < ATA_MAX_DEVICES; i++)
+		for (i = 0; i < ATA_MAX_DEVICES; i++) {
+			if (!(ap->flags & ATA_FLAG_DISABLED))
 				classes[i] = ap->device[i].class;
+			ap->device[i].class = ATA_DEV_UNKNOWN;
+		}
 
 		ata_port_probe(ap);
 	}
@@ -1432,32 +1472,17 @@
 		if (!ata_dev_enabled(dev))
 			continue;
 
-		kfree(dev->id);
-		dev->id = NULL;
-		rc = ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id);
+		rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
 		if (rc)
 			goto fail;
 
-		rc = ata_dev_configure(ap, dev, 1);
+		rc = ata_dev_configure(dev, 1);
 		if (rc)
 			goto fail;
 	}
 
 	/* configure transfer mode */
-	if (ap->ops->set_mode) {
-		/* FIXME: make ->set_mode handle no device case and
-		 * return error code and failing device on failure as
-		 * ata_set_mode() does.
-		 */
-		for (i = 0; i < ATA_MAX_DEVICES; i++)
-			if (ata_dev_enabled(&ap->device[i])) {
-				ap->ops->set_mode(ap);
-				break;
-			}
-		rc = 0;
-	} else
-		rc = ata_set_mode(ap, &dev);
-
+	rc = ata_set_mode(ap, &dev);
 	if (rc) {
 		down_xfermask = 1;
 		goto fail;
@@ -1479,18 +1504,18 @@
 		tries[dev->devno] = 0;
 		break;
 	case -EIO:
-		ata_down_sata_spd_limit(ap);
+		sata_down_spd_limit(ap);
 		/* fall through */
 	default:
 		tries[dev->devno]--;
 		if (down_xfermask &&
-		    ata_down_xfermask_limit(ap, dev, tries[dev->devno] == 1))
+		    ata_down_xfermask_limit(dev, tries[dev->devno] == 1))
 			tries[dev->devno] = 0;
 	}
 
 	if (!tries[dev->devno]) {
-		ata_down_xfermask_limit(ap, dev, 1);
-		ata_dev_disable(ap, dev);
+		ata_down_xfermask_limit(dev, 1);
+		ata_dev_disable(dev);
 	}
 
 	goto retry;
@@ -1525,21 +1550,19 @@
 {
 	u32 sstatus, scontrol, tmp;
 
-	if (!ap->ops->scr_read)
+	if (sata_scr_read(ap, SCR_STATUS, &sstatus))
 		return;
+	sata_scr_read(ap, SCR_CONTROL, &scontrol);
 
-	sstatus = scr_read(ap, SCR_STATUS);
-	scontrol = scr_read(ap, SCR_CONTROL);
-
-	if (sata_dev_present(ap)) {
+	if (ata_port_online(ap)) {
 		tmp = (sstatus >> 4) & 0xf;
-		printk(KERN_INFO
-		       "ata%u: SATA link up %s (SStatus %X SControl %X)\n",
-		       ap->id, sata_spd_string(tmp), sstatus, scontrol);
+		ata_port_printk(ap, KERN_INFO,
+				"SATA link up %s (SStatus %X SControl %X)\n",
+				sata_spd_string(tmp), sstatus, scontrol);
 	} else {
-		printk(KERN_INFO
-		       "ata%u: SATA link down (SStatus %X SControl %X)\n",
-		       ap->id, sstatus, scontrol);
+		ata_port_printk(ap, KERN_INFO,
+				"SATA link down (SStatus %X SControl %X)\n",
+				sstatus, scontrol);
 	}
 }
 
@@ -1562,17 +1585,18 @@
 
 	if (ap->flags & ATA_FLAG_SATA_RESET) {
 		/* issue phy wake/reset */
-		scr_write_flush(ap, SCR_CONTROL, 0x301);
+		sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
 		/* Couldn't find anything in SATA I/II specs, but
 		 * AHCI-1.1 10.4.2 says at least 1 ms. */
 		mdelay(1);
 	}
-	scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
+	/* phy wake/clear reset */
+	sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
 
 	/* wait for phy to become ready, if necessary */
 	do {
 		msleep(200);
-		sstatus = scr_read(ap, SCR_STATUS);
+		sata_scr_read(ap, SCR_STATUS, &sstatus);
 		if ((sstatus & 0xf) != 1)
 			break;
 	} while (time_before(jiffies, timeout));
@@ -1581,7 +1605,7 @@
 	sata_print_link_status(ap);
 
 	/* TODO: phy layer with polling, timeouts, etc. */
-	if (sata_dev_present(ap))
+	if (!ata_port_offline(ap))
 		ata_port_probe(ap);
 	else
 		ata_port_disable(ap);
@@ -1618,15 +1642,15 @@
 
 /**
  *	ata_dev_pair		-	return other device on cable
- *	@ap: port
  *	@adev: device
  *
  *	Obtain the other device on the same cable, or if none is
  *	present NULL is returned
  */
 
-struct ata_device *ata_dev_pair(struct ata_port *ap, struct ata_device *adev)
+struct ata_device *ata_dev_pair(struct ata_device *adev)
 {
+	struct ata_port *ap = adev->ap;
 	struct ata_device *pair = &ap->device[1 - adev->devno];
 	if (!ata_dev_enabled(pair))
 		return NULL;
@@ -1654,12 +1678,12 @@
 }
 
 /**
- *	ata_down_sata_spd_limit - adjust SATA spd limit downward
+ *	sata_down_spd_limit - adjust SATA spd limit downward
  *	@ap: Port to adjust SATA spd limit for
  *
  *	Adjust SATA spd limit of @ap downward.  Note that this
  *	function only adjusts the limit.  The change must be applied
- *	using ata_set_sata_spd().
+ *	using sata_set_spd().
  *
  *	LOCKING:
  *	Inherited from caller.
@@ -1667,13 +1691,14 @@
  *	RETURNS:
  *	0 on success, negative errno on failure
  */
-int ata_down_sata_spd_limit(struct ata_port *ap)
+int sata_down_spd_limit(struct ata_port *ap)
 {
-	u32 spd, mask;
-	int highbit;
+	u32 sstatus, spd, mask;
+	int rc, highbit;
 
-	if (ap->cbl != ATA_CBL_SATA || !ap->ops->scr_read)
-		return -EOPNOTSUPP;
+	rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
+	if (rc)
+		return rc;
 
 	mask = ap->sata_spd_limit;
 	if (mask <= 1)
@@ -1681,7 +1706,7 @@
 	highbit = fls(mask) - 1;
 	mask &= ~(1 << highbit);
 
-	spd = (scr_read(ap, SCR_STATUS) >> 4) & 0xf;
+	spd = (sstatus >> 4) & 0xf;
 	if (spd <= 1)
 		return -EINVAL;
 	spd--;
@@ -1691,13 +1716,13 @@
 
 	ap->sata_spd_limit = mask;
 
-	printk(KERN_WARNING "ata%u: limiting SATA link speed to %s\n",
-	       ap->id, sata_spd_string(fls(mask)));
+	ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
+			sata_spd_string(fls(mask)));
 
 	return 0;
 }
 
-static int __ata_set_sata_spd_needed(struct ata_port *ap, u32 *scontrol)
+static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
 {
 	u32 spd, limit;
 
@@ -1713,7 +1738,7 @@
 }
 
 /**
- *	ata_set_sata_spd_needed - is SATA spd configuration needed
+ *	sata_set_spd_needed - is SATA spd configuration needed
  *	@ap: Port in question
  *
  *	Test whether the spd limit in SControl matches
@@ -1727,20 +1752,18 @@
  *	RETURNS:
  *	1 if SATA spd configuration is needed, 0 otherwise.
  */
-int ata_set_sata_spd_needed(struct ata_port *ap)
+int sata_set_spd_needed(struct ata_port *ap)
 {
 	u32 scontrol;
 
-	if (ap->cbl != ATA_CBL_SATA || !ap->ops->scr_read)
+	if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
 		return 0;
 
-	scontrol = scr_read(ap, SCR_CONTROL);
-
-	return __ata_set_sata_spd_needed(ap, &scontrol);
+	return __sata_set_spd_needed(ap, &scontrol);
 }
 
 /**
- *	ata_set_sata_spd - set SATA spd according to spd limit
+ *	sata_set_spd - set SATA spd according to spd limit
  *	@ap: Port to set SATA spd for
  *
  *	Set SATA spd of @ap according to sata_spd_limit.
@@ -1750,20 +1773,22 @@
  *
  *	RETURNS:
  *	0 if spd doesn't need to be changed, 1 if spd has been
- *	changed.  -EOPNOTSUPP if SCR registers are inaccessible.
+ *	changed.  Negative errno if SCR registers are inaccessible.
  */
-int ata_set_sata_spd(struct ata_port *ap)
+int sata_set_spd(struct ata_port *ap)
 {
 	u32 scontrol;
+	int rc;
 
-	if (ap->cbl != ATA_CBL_SATA || !ap->ops->scr_read)
-		return -EOPNOTSUPP;
+	if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
+		return rc;
 
-	scontrol = scr_read(ap, SCR_CONTROL);
-	if (!__ata_set_sata_spd_needed(ap, &scontrol))
+	if (!__sata_set_spd_needed(ap, &scontrol))
 		return 0;
 
-	scr_write(ap, SCR_CONTROL, scontrol);
+	if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
+		return rc;
+
 	return 1;
 }
 
@@ -1917,7 +1942,6 @@
 
 /**
  *	ata_down_xfermask_limit - adjust dev xfer masks downward
- *	@ap: Port associated with device @dev
  *	@dev: Device to adjust xfer masks
  *	@force_pio0: Force PIO0
  *
@@ -1931,8 +1955,7 @@
  *	RETURNS:
  *	0 on success, negative errno on failure
  */
-int ata_down_xfermask_limit(struct ata_port *ap, struct ata_device *dev,
-			    int force_pio0)
+int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
 {
 	unsigned long xfer_mask;
 	int highbit;
@@ -1956,8 +1979,8 @@
 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
 			    &dev->udma_mask);
 
-	printk(KERN_WARNING "ata%u: dev %u limiting speed to %s\n",
-	       ap->id, dev->devno, ata_mode_string(xfer_mask));
+	ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
+		       ata_mode_string(xfer_mask));
 
 	return 0;
 
@@ -1965,7 +1988,7 @@
 	return -EINVAL;
 }
 
-static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
+static int ata_dev_set_mode(struct ata_device *dev)
 {
 	unsigned int err_mask;
 	int rc;
@@ -1974,24 +1997,22 @@
 	if (dev->xfer_shift == ATA_SHIFT_PIO)
 		dev->flags |= ATA_DFLAG_PIO;
 
-	err_mask = ata_dev_set_xfermode(ap, dev);
+	err_mask = ata_dev_set_xfermode(dev);
 	if (err_mask) {
-		printk(KERN_ERR
-		       "ata%u: failed to set xfermode (err_mask=0x%x)\n",
-		       ap->id, err_mask);
+		ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
+			       "(err_mask=0x%x)\n", err_mask);
 		return -EIO;
 	}
 
-	rc = ata_dev_revalidate(ap, dev, 0);
+	rc = ata_dev_revalidate(dev, 0);
 	if (rc)
 		return rc;
 
 	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
 		dev->xfer_shift, (int)dev->xfer_mode);
 
-	printk(KERN_INFO "ata%u: dev %u configured for %s\n",
-	       ap->id, dev->devno,
-	       ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
+	ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
+		       ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
 	return 0;
 }
 
@@ -2015,6 +2036,20 @@
 	struct ata_device *dev;
 	int i, rc = 0, used_dma = 0, found = 0;
 
+	/* has private set_mode? */
+	if (ap->ops->set_mode) {
+		/* FIXME: make ->set_mode handle no device case and
+		 * return error code and failing device on failure.
+		 */
+		for (i = 0; i < ATA_MAX_DEVICES; i++) {
+			if (ata_dev_enabled(&ap->device[i])) {
+				ap->ops->set_mode(ap);
+				break;
+			}
+		}
+		return 0;
+	}
+
 	/* step 1: calculate xfer_mask */
 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
 		unsigned int pio_mask, dma_mask;
@@ -2024,7 +2059,7 @@
 		if (!ata_dev_enabled(dev))
 			continue;
 
-		ata_dev_xfermask(ap, dev);
+		ata_dev_xfermask(dev);
 
 		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
 		dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
@@ -2045,8 +2080,7 @@
 			continue;
 
 		if (!dev->pio_mode) {
-			printk(KERN_WARNING "ata%u: dev %u no PIO support\n",
-			       ap->id, dev->devno);
+			ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
 			rc = -EINVAL;
 			goto out;
 		}
@@ -2077,7 +2111,7 @@
 		if (!ata_dev_enabled(dev))
 			continue;
 
-		rc = ata_dev_set_mode(ap, dev);
+		rc = ata_dev_set_mode(dev);
 		if (rc)
 			goto out;
 	}
@@ -2145,8 +2179,8 @@
 	}
 
 	if (status & ATA_BUSY)
-		printk(KERN_WARNING "ata%u is slow to respond, "
-		       "please be patient\n", ap->id);
+		ata_port_printk(ap, KERN_WARNING,
+				"port is slow to respond, please be patient\n");
 
 	timeout = timer_start + tmout;
 	while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
@@ -2155,8 +2189,8 @@
 	}
 
 	if (status & ATA_BUSY) {
-		printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
-		       ap->id, tmout / HZ);
+		ata_port_printk(ap, KERN_ERR, "port failed to respond "
+				"(%lu secs)\n", tmout / HZ);
 		return 1;
 	}
 
@@ -2249,7 +2283,7 @@
 	 * pulldown resistor.
 	 */
 	if (ata_check_status(ap) == 0xFF) {
-		printk(KERN_ERR "ata%u: SRST failed (status 0xFF)\n", ap->id);
+		ata_port_printk(ap, KERN_ERR, "SRST failed (status 0xFF)\n");
 		return AC_ERR_OTHER;
 	}
 
@@ -2343,7 +2377,7 @@
 	return;
 
 err_out:
-	printk(KERN_ERR "ata%u: disabling port\n", ap->id);
+	ata_port_printk(ap, KERN_ERR, "disabling port\n");
 	ap->ops->port_disable(ap);
 
 	DPRINTK("EXIT\n");
@@ -2353,20 +2387,26 @@
 {
 	unsigned long timeout = jiffies + (HZ * 5);
 	u32 scontrol, sstatus;
+	int rc;
 
-	scontrol = scr_read(ap, SCR_CONTROL);
+	if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
+		return rc;
+
 	scontrol = (scontrol & 0x0f0) | 0x300;
-	scr_write_flush(ap, SCR_CONTROL, scontrol);
+
+	if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
+		return rc;
 
 	/* Wait for phy to become ready, if necessary. */
 	do {
 		msleep(200);
-		sstatus = scr_read(ap, SCR_STATUS);
+		if ((rc = sata_scr_read(ap, SCR_STATUS, &sstatus)))
+			return rc;
 		if ((sstatus & 0xf) != 1)
 			return 0;
 	} while (time_before(jiffies, timeout));
 
-	return -1;
+	return -EBUSY;
 }
 
 /**
@@ -2384,22 +2424,20 @@
  */
 void ata_std_probeinit(struct ata_port *ap)
 {
-	if ((ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read) {
-		u32 spd;
+	u32 scontrol;
 
-		/* set cable type and resume link */
-		ap->cbl = ATA_CBL_SATA;
-		sata_phy_resume(ap);
+	/* resume link */
+	sata_phy_resume(ap);
 
-		/* init sata_spd_limit to the current value */
-		spd = (scr_read(ap, SCR_CONTROL) & 0xf0) >> 4;
-		if (spd)
-			ap->sata_spd_limit &= (1 << spd) - 1;
-
-		/* wait for device */
-		if (sata_dev_present(ap))
-			ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
+	/* init sata_spd_limit to the current value */
+	if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
+		int spd = (scontrol >> 4) & 0xf;
+		ap->sata_spd_limit &= (1 << spd) - 1;
 	}
+
+	/* wait for device */
+	if (ata_port_online(ap))
+		ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
 }
 
 /**
@@ -2424,7 +2462,7 @@
 
 	DPRINTK("ENTER\n");
 
-	if (ap->ops->scr_read && !sata_dev_present(ap)) {
+	if (ata_port_offline(ap)) {
 		classes[0] = ATA_DEV_NONE;
 		goto out;
 	}
@@ -2442,8 +2480,8 @@
 	DPRINTK("about to softreset, devmask=%x\n", devmask);
 	err_mask = ata_bus_softreset(ap, devmask);
 	if (err_mask) {
-		printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
-		       ap->id, err_mask);
+		ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
+				err_mask);
 		return -EIO;
 	}
 
@@ -2475,26 +2513,35 @@
 int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
 {
 	u32 scontrol;
+	int rc;
 
 	DPRINTK("ENTER\n");
 
-	if (ata_set_sata_spd_needed(ap)) {
+	if (sata_set_spd_needed(ap)) {
 		/* SATA spec says nothing about how to reconfigure
 		 * spd.  To be on the safe side, turn off phy during
 		 * reconfiguration.  This works for at least ICH7 AHCI
 		 * and Sil3124.
 		 */
-		scontrol = scr_read(ap, SCR_CONTROL);
-		scontrol = (scontrol & 0x0f0) | 0x302;
-		scr_write_flush(ap, SCR_CONTROL, scontrol);
+		if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
+			return rc;
 
-		ata_set_sata_spd(ap);
+		scontrol = (scontrol & 0x0f0) | 0x302;
+
+		if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
+			return rc;
+
+		sata_set_spd(ap);
 	}
 
 	/* issue phy wake/reset */
-	scontrol = scr_read(ap, SCR_CONTROL);
+	if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
+		return rc;
+
 	scontrol = (scontrol & 0x0f0) | 0x301;
-	scr_write_flush(ap, SCR_CONTROL, scontrol);
+
+	if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
+		return rc;
 
 	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
 	 * 10.4.2 says at least 1 ms.
@@ -2505,15 +2552,15 @@
 	sata_phy_resume(ap);
 
 	/* TODO: phy layer with polling, timeouts, etc. */
-	if (!sata_dev_present(ap)) {
+	if (ata_port_offline(ap)) {
 		*class = ATA_DEV_NONE;
 		DPRINTK("EXIT, link offline\n");
 		return 0;
 	}
 
 	if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
-		printk(KERN_ERR
-		       "ata%u: COMRESET failed (device not ready)\n", ap->id);
+		ata_port_printk(ap, KERN_ERR,
+				"COMRESET failed (device not ready)\n");
 		return -EIO;
 	}
 
@@ -2542,15 +2589,23 @@
  */
 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
 {
+	u32 serror;
+
 	DPRINTK("ENTER\n");
 
 	/* print link status */
-	if (ap->cbl == ATA_CBL_SATA)
-		sata_print_link_status(ap);
+	sata_print_link_status(ap);
+
+	/* clear SError */
+	if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
+		sata_scr_write(ap, SCR_ERROR, serror);
 
 	/* re-enable interrupts */
-	if (ap->ioaddr.ctl_addr)	/* FIXME: hack. create a hook instead */
-		ata_irq_on(ap);
+	if (!ap->ops->error_handler) {
+		/* FIXME: hack. create a hook instead */
+		if (ap->ioaddr.ctl_addr)
+			ata_irq_on(ap);
+	}
 
 	/* is double-select really necessary? */
 	if (classes[0] != ATA_DEV_NONE)
@@ -2593,7 +2648,7 @@
 	ata_reset_fn_t hardreset;
 
 	hardreset = NULL;
-	if (ap->cbl == ATA_CBL_SATA && ap->ops->scr_read)
+	if (sata_scr_valid(ap))
 		hardreset = sata_std_hardreset;
 
 	return ata_drive_probe_reset(ap, ata_std_probeinit,
@@ -2602,7 +2657,7 @@
 }
 
 int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset,
-		 ata_postreset_fn_t postreset, unsigned int *classes)
+		 unsigned int *classes)
 {
 	int i, rc;
 
@@ -2626,9 +2681,6 @@
 			if (classes[i] == ATA_DEV_UNKNOWN)
 				classes[i] = ATA_DEV_NONE;
 
-	if (postreset)
-		postreset(ap, classes);
-
 	return 0;
 }
 
@@ -2668,15 +2720,17 @@
 {
 	int rc = -EINVAL;
 
+	ata_eh_freeze_port(ap);
+
 	if (probeinit)
 		probeinit(ap);
 
-	if (softreset && !ata_set_sata_spd_needed(ap)) {
-		rc = ata_do_reset(ap, softreset, postreset, classes);
+	if (softreset && !sata_set_spd_needed(ap)) {
+		rc = ata_do_reset(ap, softreset, classes);
 		if (rc == 0 && classes[0] != ATA_DEV_UNKNOWN)
 			goto done;
-		printk(KERN_INFO "ata%u: softreset failed, will try "
-		       "hardreset in 5 secs\n", ap->id);
+		ata_port_printk(ap, KERN_INFO, "softreset failed, "
+				"will try hardreset in 5 secs\n");
 		ssleep(5);
 	}
 
@@ -2684,39 +2738,45 @@
 		goto done;
 
 	while (1) {
-		rc = ata_do_reset(ap, hardreset, postreset, classes);
+		rc = ata_do_reset(ap, hardreset, classes);
 		if (rc == 0) {
 			if (classes[0] != ATA_DEV_UNKNOWN)
 				goto done;
 			break;
 		}
 
-		if (ata_down_sata_spd_limit(ap))
+		if (sata_down_spd_limit(ap))
 			goto done;
 
-		printk(KERN_INFO "ata%u: hardreset failed, will retry "
-		       "in 5 secs\n", ap->id);
+		ata_port_printk(ap, KERN_INFO, "hardreset failed, "
+				"will retry in 5 secs\n");
 		ssleep(5);
 	}
 
 	if (softreset) {
-		printk(KERN_INFO "ata%u: hardreset succeeded without "
-		       "classification, will retry softreset in 5 secs\n",
-		       ap->id);
+		ata_port_printk(ap, KERN_INFO,
+				"hardreset succeeded without classification, "
+				"will retry softreset in 5 secs\n");
 		ssleep(5);
 
-		rc = ata_do_reset(ap, softreset, postreset, classes);
+		rc = ata_do_reset(ap, softreset, classes);
 	}
 
  done:
-	if (rc == 0 && classes[0] == ATA_DEV_UNKNOWN)
-		rc = -ENODEV;
+	if (rc == 0) {
+		if (postreset)
+			postreset(ap, classes);
+
+		ata_eh_thaw_port(ap);
+
+		if (classes[0] == ATA_DEV_UNKNOWN)
+			rc = -ENODEV;
+	}
 	return rc;
 }
 
 /**
  *	ata_dev_same_device - Determine whether new ID matches configured device
- *	@ap: port on which the device to compare against resides
  *	@dev: device to compare against
  *	@new_class: class of the new device
  *	@new_id: IDENTIFY page of the new device
@@ -2731,17 +2791,16 @@
  *	RETURNS:
  *	1 if @dev matches @new_class and @new_id, 0 otherwise.
  */
-static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
-			       unsigned int new_class, const u16 *new_id)
+static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
+			       const u16 *new_id)
 {
 	const u16 *old_id = dev->id;
 	unsigned char model[2][41], serial[2][21];
 	u64 new_n_sectors;
 
 	if (dev->class != new_class) {
-		printk(KERN_INFO
-		       "ata%u: dev %u class mismatch %d != %d\n",
-		       ap->id, dev->devno, dev->class, new_class);
+		ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
+			       dev->class, new_class);
 		return 0;
 	}
 
@@ -2752,24 +2811,22 @@
 	new_n_sectors = ata_id_n_sectors(new_id);
 
 	if (strcmp(model[0], model[1])) {
-		printk(KERN_INFO
-		       "ata%u: dev %u model number mismatch '%s' != '%s'\n",
-		       ap->id, dev->devno, model[0], model[1]);
+		ata_dev_printk(dev, KERN_INFO, "model number mismatch "
+			       "'%s' != '%s'\n", model[0], model[1]);
 		return 0;
 	}
 
 	if (strcmp(serial[0], serial[1])) {
-		printk(KERN_INFO
-		       "ata%u: dev %u serial number mismatch '%s' != '%s'\n",
-		       ap->id, dev->devno, serial[0], serial[1]);
+		ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
+			       "'%s' != '%s'\n", serial[0], serial[1]);
 		return 0;
 	}
 
 	if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
-		printk(KERN_INFO
-		       "ata%u: dev %u n_sectors mismatch %llu != %llu\n",
-		       ap->id, dev->devno, (unsigned long long)dev->n_sectors,
-		       (unsigned long long)new_n_sectors);
+		ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
+			       "%llu != %llu\n",
+			       (unsigned long long)dev->n_sectors,
+			       (unsigned long long)new_n_sectors);
 		return 0;
 	}
 
@@ -2778,7 +2835,6 @@
 
 /**
  *	ata_dev_revalidate - Revalidate ATA device
- *	@ap: port on which the device to revalidate resides
  *	@dev: device to revalidate
  *	@post_reset: is this revalidation after reset?
  *
@@ -2791,11 +2847,10 @@
  *	RETURNS:
  *	0 on success, negative errno otherwise
  */
-int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
-		       int post_reset)
+int ata_dev_revalidate(struct ata_device *dev, int post_reset)
 {
 	unsigned int class = dev->class;
-	u16 *id = NULL;
+	u16 *id = (void *)dev->ap->sector_buf;
 	int rc;
 
 	if (!ata_dev_enabled(dev)) {
@@ -2803,29 +2858,26 @@
 		goto fail;
 	}
 
-	/* allocate & read ID data */
-	rc = ata_dev_read_id(ap, dev, &class, post_reset, &id);
+	/* read ID data */
+	rc = ata_dev_read_id(dev, &class, post_reset, id);
 	if (rc)
 		goto fail;
 
 	/* is the device still there? */
-	if (!ata_dev_same_device(ap, dev, class, id)) {
+	if (!ata_dev_same_device(dev, class, id)) {
 		rc = -ENODEV;
 		goto fail;
 	}
 
-	kfree(dev->id);
-	dev->id = id;
+	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
 
 	/* configure device according to the new ID */
-	rc = ata_dev_configure(ap, dev, 0);
+	rc = ata_dev_configure(dev, 0);
 	if (rc == 0)
 		return 0;
 
  fail:
-	printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n",
-	       ap->id, dev->devno, rc);
-	kfree(id);
+	ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
 	return rc;
 }
 
@@ -2901,7 +2953,6 @@
 
 /**
  *	ata_dev_xfermask - Compute supported xfermask of the given device
- *	@ap: Port on which the device to compute xfermask for resides
  *	@dev: Device to compute xfermask for
  *
  *	Compute supported xfermask of @dev and store it in
@@ -2916,8 +2967,9 @@
  *	LOCKING:
  *	None.
  */
-static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev)
+static void ata_dev_xfermask(struct ata_device *dev)
 {
+	struct ata_port *ap = dev->ap;
 	struct ata_host_set *hs = ap->host_set;
 	unsigned long xfer_mask;
 	int i;
@@ -2953,8 +3005,8 @@
 	}
 
 	if (ata_dma_blacklisted(dev))
-		printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, "
-		       "disabling DMA\n", ap->id, dev->devno);
+		ata_dev_printk(dev, KERN_WARNING,
+			       "device is on DMA blacklist, disabling DMA\n");
 
 	if (hs->flags & ATA_HOST_SIMPLEX) {
 		if (hs->simplex_claimed)
@@ -2970,7 +3022,6 @@
 
 /**
  *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
- *	@ap: Port associated with device @dev
  *	@dev: Device to which command will be sent
  *
  *	Issue SET FEATURES - XFER MODE command to device @dev
@@ -2983,8 +3034,7 @@
  *	0 on success, AC_ERR_* mask otherwise.
  */
 
-static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
-					 struct ata_device *dev)
+static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
 {
 	struct ata_taskfile tf;
 	unsigned int err_mask;
@@ -2992,14 +3042,14 @@
 	/* set up set-features taskfile */
 	DPRINTK("set features - xfer mode\n");
 
-	ata_tf_init(ap, &tf, dev->devno);
+	ata_tf_init(dev, &tf);
 	tf.command = ATA_CMD_SET_FEATURES;
 	tf.feature = SETFEATURES_XFER;
 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
 	tf.protocol = ATA_PROT_NODATA;
 	tf.nsect = dev->xfer_mode;
 
-	err_mask = ata_exec_internal(ap, dev, &tf, NULL, DMA_NONE, NULL, 0);
+	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
 
 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
 	return err_mask;
@@ -3007,8 +3057,9 @@
 
 /**
  *	ata_dev_init_params - Issue INIT DEV PARAMS command
- *	@ap: Port associated with device @dev
  *	@dev: Device to which command will be sent
+ *	@heads: Number of heads
+ *	@sectors: Number of sectors
  *
  *	LOCKING:
  *	Kernel thread context (may sleep)
@@ -3016,11 +3067,8 @@
  *	RETURNS:
  *	0 on success, AC_ERR_* mask otherwise.
  */
-
-static unsigned int ata_dev_init_params(struct ata_port *ap,
-					struct ata_device *dev,
-					u16 heads,
-					u16 sectors)
+static unsigned int ata_dev_init_params(struct ata_device *dev,
+					u16 heads, u16 sectors)
 {
 	struct ata_taskfile tf;
 	unsigned int err_mask;
@@ -3032,14 +3080,14 @@
 	/* set up init dev params taskfile */
 	DPRINTK("init dev params \n");
 
-	ata_tf_init(ap, &tf, dev->devno);
+	ata_tf_init(dev, &tf);
 	tf.command = ATA_CMD_INIT_DEV_PARAMS;
 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
 	tf.protocol = ATA_PROT_NODATA;
 	tf.nsect = sectors;
 	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
 
-	err_mask = ata_exec_internal(ap, dev, &tf, NULL, DMA_NONE, NULL, 0);
+	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
 
 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
 	return err_mask;
@@ -3421,15 +3469,29 @@
  *	LOCKING:
  *	None.  (grabs host lock)
  */
-
 void ata_poll_qc_complete(struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
 	unsigned long flags;
 
 	spin_lock_irqsave(&ap->host_set->lock, flags);
-	ata_irq_on(ap);
-	ata_qc_complete(qc);
+
+	if (ap->ops->error_handler) {
+		/* EH might have kicked in while host_set lock is released */
+		qc = ata_qc_from_tag(ap, qc->tag);
+		if (qc) {
+			if (!(qc->err_mask & AC_ERR_HSM)) {
+				ata_irq_on(ap);
+				ata_qc_complete(qc);
+			} else
+				ata_port_freeze(ap);
+		}
+	} else {
+		/* old EH */
+		ata_irq_on(ap);
+		ata_qc_complete(qc);
+	}
+
 	spin_unlock_irqrestore(&ap->host_set->lock, flags);
 }
 
@@ -3728,8 +3790,8 @@
 		unsigned int i;
 
 		if (words) /* warning if bytes > 1 */
-			printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
-			       ap->id, bytes);
+			ata_dev_printk(qc->dev, KERN_WARNING,
+				       "%u bytes trailing data\n", bytes);
 
 		for (i = 0; i < words; i++)
 			ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
@@ -3823,8 +3885,7 @@
 	return;
 
 err_out:
-	printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
-	      ap->id, dev->devno);
+	ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
 	qc->err_mask |= AC_ERR_HSM;
 	ap->hsm_task_state = HSM_ST_ERR;
 }
@@ -4134,9 +4195,14 @@
 	struct ata_queued_cmd *qc = NULL;
 	unsigned int i;
 
-	for (i = 0; i < ATA_MAX_QUEUE; i++)
+	/* no command while frozen */
+	if (unlikely(ap->flags & ATA_FLAG_FROZEN))
+		return NULL;
+
+	/* the last tag is reserved for internal command. */
+	for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
 		if (!test_and_set_bit(i, &ap->qactive)) {
-			qc = ata_qc_from_tag(ap, i);
+			qc = __ata_qc_from_tag(ap, i);
 			break;
 		}
 
@@ -4148,16 +4214,15 @@
 
 /**
  *	ata_qc_new_init - Request an available ATA command, and initialize it
- *	@ap: Port associated with device @dev
  *	@dev: Device from whom we request an available command structure
  *
  *	LOCKING:
  *	None.
  */
 
-struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
-				      struct ata_device *dev)
+struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
 {
+	struct ata_port *ap = dev->ap;
 	struct ata_queued_cmd *qc;
 
 	qc = ata_qc_new(ap);
@@ -4192,8 +4257,6 @@
 	qc->flags = 0;
 	tag = qc->tag;
 	if (likely(ata_tag_valid(tag))) {
-		if (tag == ap->active_tag)
-			ap->active_tag = ATA_TAG_POISON;
 		qc->tag = ATA_TAG_POISON;
 		clear_bit(tag, &ap->qactive);
 	}
@@ -4207,6 +4270,9 @@
 	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
 		ata_sg_clean(qc);
 
+	/* command should be marked inactive atomically with qc completion */
+	qc->ap->active_tag = ATA_TAG_POISON;
+
 	/* atapi: mark qc as inactive to prevent the interrupt handler
 	 * from completing the command twice later, before the error handler
 	 * is called. (when rc != 0 and atapi request sense is needed)
@@ -4217,6 +4283,66 @@
 	qc->complete_fn(qc);
 }
 
+/**
+ *	ata_qc_complete - Complete an active ATA command
+ *	@qc: Command to complete
+ *	@err_mask: ATA Status register contents
+ *
+ *	Indicate to the mid and upper layers that an ATA
+ *	command has completed, with either an ok or not-ok status.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+void ata_qc_complete(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+
+	/* XXX: New EH and old EH use different mechanisms to
+	 * synchronize EH with regular execution path.
+	 *
+	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
+	 * Normal execution path is responsible for not accessing a
+	 * failed qc.  libata core enforces the rule by returning NULL
+	 * from ata_qc_from_tag() for failed qcs.
+	 *
+	 * Old EH depends on ata_qc_complete() nullifying completion
+	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
+	 * not synchronize with interrupt handler.  Only PIO task is
+	 * taken care of.
+	 */
+	if (ap->ops->error_handler) {
+		WARN_ON(ap->flags & ATA_FLAG_FROZEN);
+
+		if (unlikely(qc->err_mask))
+			qc->flags |= ATA_QCFLAG_FAILED;
+
+		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
+			if (!ata_tag_internal(qc->tag)) {
+				/* always fill result TF for failed qc */
+				ap->ops->tf_read(ap, &qc->result_tf);
+				ata_qc_schedule_eh(qc);
+				return;
+			}
+		}
+
+		/* read result TF if requested */
+		if (qc->flags & ATA_QCFLAG_RESULT_TF)
+			ap->ops->tf_read(ap, &qc->result_tf);
+
+		__ata_qc_complete(qc);
+	} else {
+		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
+			return;
+
+		/* read result TF if failed or requested */
+		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
+			ap->ops->tf_read(ap, &qc->result_tf);
+
+		__ata_qc_complete(qc);
+	}
+}
+
 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
@@ -4503,7 +4629,7 @@
 #ifdef ATA_IRQ_TRAP
 	if ((ap->stats.idle_irq % 1000) == 0) {
 		ata_irq_ack(ap, 0); /* debug trap */
-		printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
+		ata_port_printk(ap, KERN_WARNING, "irq trap\n");
 		return 1;
 	}
 #endif
@@ -4556,32 +4682,168 @@
 	return IRQ_RETVAL(handled);
 }
 
+/**
+ *	sata_scr_valid - test whether SCRs are accessible
+ *	@ap: ATA port to test SCR accessibility for
+ *
+ *	Test whether SCRs are accessible for @ap.
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	RETURNS:
+ *	1 if SCRs are accessible, 0 otherwise.
+ */
+int sata_scr_valid(struct ata_port *ap)
+{
+	return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
+}
+
+/**
+ *	sata_scr_read - read SCR register of the specified port
+ *	@ap: ATA port to read SCR for
+ *	@reg: SCR to read
+ *	@val: Place to store read value
+ *
+ *	Read SCR register @reg of @ap into *@val.  This function is
+ *	guaranteed to succeed if the cable type of the port is SATA
+ *	and the port implements ->scr_read.
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	RETURNS:
+ *	0 on success, negative errno on failure.
+ */
+int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
+{
+	if (sata_scr_valid(ap)) {
+		*val = ap->ops->scr_read(ap, reg);
+		return 0;
+	}
+	return -EOPNOTSUPP;
+}
+
+/**
+ *	sata_scr_write - write SCR register of the specified port
+ *	@ap: ATA port to write SCR for
+ *	@reg: SCR to write
+ *	@val: value to write
+ *
+ *	Write @val to SCR register @reg of @ap.  This function is
+ *	guaranteed to succeed if the cable type of the port is SATA
+ *	and the port implements ->scr_read.
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	RETURNS:
+ *	0 on success, negative errno on failure.
+ */
+int sata_scr_write(struct ata_port *ap, int reg, u32 val)
+{
+	if (sata_scr_valid(ap)) {
+		ap->ops->scr_write(ap, reg, val);
+		return 0;
+	}
+	return -EOPNOTSUPP;
+}
+
+/**
+ *	sata_scr_write_flush - write SCR register of the specified port and flush
+ *	@ap: ATA port to write SCR for
+ *	@reg: SCR to write
+ *	@val: value to write
+ *
+ *	This function is identical to sata_scr_write() except that this
+ *	function performs flush after writing to the register.
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	RETURNS:
+ *	0 on success, negative errno on failure.
+ */
+int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
+{
+	if (sata_scr_valid(ap)) {
+		ap->ops->scr_write(ap, reg, val);
+		ap->ops->scr_read(ap, reg);
+		return 0;
+	}
+	return -EOPNOTSUPP;
+}
+
+/**
+ *	ata_port_online - test whether the given port is online
+ *	@ap: ATA port to test
+ *
+ *	Test whether @ap is online.  Note that this function returns 0
+ *	if online status of @ap cannot be obtained, so
+ *	ata_port_online(ap) != !ata_port_offline(ap).
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	RETURNS:
+ *	1 if the port online status is available and online.
+ */
+int ata_port_online(struct ata_port *ap)
+{
+	u32 sstatus;
+
+	if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
+		return 1;
+	return 0;
+}
+
+/**
+ *	ata_port_offline - test whether the given port is offline
+ *	@ap: ATA port to test
+ *
+ *	Test whether @ap is offline.  Note that this function returns
+ *	0 if offline status of @ap cannot be obtained, so
+ *	ata_port_online(ap) != !ata_port_offline(ap).
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	RETURNS:
+ *	1 if the port offline status is available and offline.
+ */
+int ata_port_offline(struct ata_port *ap)
+{
+	u32 sstatus;
+
+	if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
+		return 1;
+	return 0;
+}
 
 /*
  * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
  * without filling any other registers
  */
-static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev,
-			     u8 cmd)
+static int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
 {
 	struct ata_taskfile tf;
 	int err;
 
-	ata_tf_init(ap, &tf, dev->devno);
+	ata_tf_init(dev, &tf);
 
 	tf.command = cmd;
 	tf.flags |= ATA_TFLAG_DEVICE;
 	tf.protocol = ATA_PROT_NODATA;
 
-	err = ata_exec_internal(ap, dev, &tf, NULL, DMA_NONE, NULL, 0);
+	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
 	if (err)
-		printk(KERN_ERR "%s: ata command failed: %d\n",
-				__FUNCTION__, err);
+		ata_dev_printk(dev, KERN_ERR, "%s: ata command failed: %d\n",
+			       __FUNCTION__, err);
 
 	return err;
 }
 
-static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
+static int ata_flush_cache(struct ata_device *dev)
 {
 	u8 cmd;
 
@@ -4593,22 +4855,21 @@
 	else
 		cmd = ATA_CMD_FLUSH;
 
-	return ata_do_simple_cmd(ap, dev, cmd);
+	return ata_do_simple_cmd(dev, cmd);
 }
 
-static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev)
+static int ata_standby_drive(struct ata_device *dev)
 {
-	return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1);
+	return ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1);
 }
 
-static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
+static int ata_start_drive(struct ata_device *dev)
 {
-	return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE);
+	return ata_do_simple_cmd(dev, ATA_CMD_IDLEIMMEDIATE);
 }
 
 /**
  *	ata_device_resume - wakeup a previously suspended devices
- *	@ap: port the device is connected to
  *	@dev: the device to resume
  *
  *	Kick the drive back into action, by sending it an idle immediate
@@ -4616,39 +4877,42 @@
  *	and host.
  *
  */
-int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
+int ata_device_resume(struct ata_device *dev)
 {
+	struct ata_port *ap = dev->ap;
+
 	if (ap->flags & ATA_FLAG_SUSPENDED) {
 		struct ata_device *failed_dev;
 		ap->flags &= ~ATA_FLAG_SUSPENDED;
 		while (ata_set_mode(ap, &failed_dev))
-			ata_dev_disable(ap, failed_dev);
+			ata_dev_disable(failed_dev);
 	}
 	if (!ata_dev_enabled(dev))
 		return 0;
 	if (dev->class == ATA_DEV_ATA)
-		ata_start_drive(ap, dev);
+		ata_start_drive(dev);
 
 	return 0;
 }
 
 /**
  *	ata_device_suspend - prepare a device for suspend
- *	@ap: port the device is connected to
  *	@dev: the device to suspend
  *
  *	Flush the cache on the drive, if appropriate, then issue a
  *	standbynow command.
  */
-int ata_device_suspend(struct ata_port *ap, struct ata_device *dev, pm_message_t state)
+int ata_device_suspend(struct ata_device *dev, pm_message_t state)
 {
+	struct ata_port *ap = dev->ap;
+
 	if (!ata_dev_enabled(dev))
 		return 0;
 	if (dev->class == ATA_DEV_ATA)
-		ata_flush_cache(ap, dev);
+		ata_flush_cache(dev);
 
 	if (state.event != PM_EVENT_FREEZE)
-		ata_standby_drive(ap, dev);
+		ata_standby_drive(dev);
 	ap->flags |= ATA_FLAG_SUSPENDED;
 	return 0;
 }
@@ -4776,7 +5040,6 @@
 	ap->udma_mask = ent->udma_mask;
 	ap->flags |= ent->host_flags;
 	ap->ops = ent->port_ops;
-	ap->cbl = ATA_CBL_NONE;
 	ap->sata_spd_limit = UINT_MAX;
 	ap->active_tag = ATA_TAG_POISON;
 	ap->last_ctl = 0xFF;
@@ -4784,8 +5047,14 @@
 	INIT_WORK(&ap->port_task, NULL, NULL);
 	INIT_LIST_HEAD(&ap->eh_done_q);
 
+	/* set cable type */
+	ap->cbl = ATA_CBL_NONE;
+	if (ap->flags & ATA_FLAG_SATA)
+		ap->cbl = ATA_CBL_SATA;
+
 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
 		struct ata_device *dev = &ap->device[i];
+		dev->ap = ap;
 		dev->devno = i;
 		dev->pio_mask = UINT_MAX;
 		dev->mwdma_mask = UINT_MAX;
@@ -4909,18 +5178,18 @@
 				(ap->pio_mask << ATA_SHIFT_PIO);
 
 		/* print per-port info to dmesg */
-		printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
-				 "bmdma 0x%lX irq %lu\n",
-			ap->id,
-			ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
-			ata_mode_string(xfer_mode_mask),
-	       		ap->ioaddr.cmd_addr,
-	       		ap->ioaddr.ctl_addr,
-	       		ap->ioaddr.bmdma_addr,
-	       		ent->irq);
+		ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%lX "
+				"ctl 0x%lX bmdma 0x%lX irq %lu\n",
+				ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
+				ata_mode_string(xfer_mode_mask),
+				ap->ioaddr.cmd_addr,
+				ap->ioaddr.ctl_addr,
+				ap->ioaddr.bmdma_addr,
+				ent->irq);
 
 		ata_chk_status(ap);
 		host_set->ops->irq_clear(ap);
+		ata_eh_freeze_port(ap);	/* freeze port before requesting IRQ */
 		count++;
 	}
 
@@ -4955,8 +5224,7 @@
 
 		rc = scsi_add_host(ap->host, dev);
 		if (rc) {
-			printk(KERN_ERR "ata%u: scsi_add_host failed\n",
-			       ap->id);
+			ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
 			/* FIXME: do something useful here */
 			/* FIXME: handle unconditional calls to
 			 * scsi_scan_host and ata_host_remove, below,
@@ -5052,14 +5320,11 @@
 int ata_scsi_release(struct Scsi_Host *host)
 {
 	struct ata_port *ap = ata_shost_to_port(host);
-	int i;
 
 	DPRINTK("ENTER\n");
 
 	ap->ops->port_disable(ap);
 	ata_host_remove(ap, 0);
-	for (i = 0; i < ATA_MAX_DEVICES; i++)
-		kfree(ap->device[i].id);
 
 	DPRINTK("EXIT\n");
 	return 1;
@@ -5277,7 +5542,7 @@
 EXPORT_SYMBOL_GPL(ata_host_set_remove);
 EXPORT_SYMBOL_GPL(ata_sg_init);
 EXPORT_SYMBOL_GPL(ata_sg_init_one);
-EXPORT_SYMBOL_GPL(__ata_qc_complete);
+EXPORT_SYMBOL_GPL(ata_qc_complete);
 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
 EXPORT_SYMBOL_GPL(ata_tf_load);
 EXPORT_SYMBOL_GPL(ata_tf_read);
@@ -5299,8 +5564,13 @@
 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
 EXPORT_SYMBOL_GPL(ata_bmdma_status);
 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
+EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
+EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
+EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
+EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
+EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
 EXPORT_SYMBOL_GPL(ata_port_probe);
-EXPORT_SYMBOL_GPL(ata_set_sata_spd);
+EXPORT_SYMBOL_GPL(sata_set_spd);
 EXPORT_SYMBOL_GPL(sata_phy_reset);
 EXPORT_SYMBOL_GPL(__sata_phy_reset);
 EXPORT_SYMBOL_GPL(ata_bus_reset);
@@ -5323,6 +5593,12 @@
 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
 EXPORT_SYMBOL_GPL(ata_scsi_release);
 EXPORT_SYMBOL_GPL(ata_host_intr);
+EXPORT_SYMBOL_GPL(sata_scr_valid);
+EXPORT_SYMBOL_GPL(sata_scr_read);
+EXPORT_SYMBOL_GPL(sata_scr_write);
+EXPORT_SYMBOL_GPL(sata_scr_write_flush);
+EXPORT_SYMBOL_GPL(ata_port_online);
+EXPORT_SYMBOL_GPL(ata_port_offline);
 EXPORT_SYMBOL_GPL(ata_id_string);
 EXPORT_SYMBOL_GPL(ata_id_c_string);
 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
@@ -5348,7 +5624,12 @@
 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
 
-EXPORT_SYMBOL_GPL(ata_scsi_error);
 EXPORT_SYMBOL_GPL(ata_eng_timeout);
+EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
+EXPORT_SYMBOL_GPL(ata_port_abort);
+EXPORT_SYMBOL_GPL(ata_port_freeze);
+EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
+EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
+EXPORT_SYMBOL_GPL(ata_do_eh);
diff --git a/drivers/scsi/libata-eh.c b/drivers/scsi/libata-eh.c
index 16db622..e401f35 100644
--- a/drivers/scsi/libata-eh.c
+++ b/drivers/scsi/libata-eh.c
@@ -44,6 +44,53 @@
 
 #include "libata.h"
 
+static void __ata_port_freeze(struct ata_port *ap);
+
+static void ata_ering_record(struct ata_ering *ering, int is_io,
+			     unsigned int err_mask)
+{
+	struct ata_ering_entry *ent;
+
+	WARN_ON(!err_mask);
+
+	ering->cursor++;
+	ering->cursor %= ATA_ERING_SIZE;
+
+	ent = &ering->ring[ering->cursor];
+	ent->is_io = is_io;
+	ent->err_mask = err_mask;
+	ent->timestamp = get_jiffies_64();
+}
+
+static struct ata_ering_entry * ata_ering_top(struct ata_ering *ering)
+{
+	struct ata_ering_entry *ent = &ering->ring[ering->cursor];
+	if (!ent->err_mask)
+		return NULL;
+	return ent;
+}
+
+static int ata_ering_map(struct ata_ering *ering,
+			 int (*map_fn)(struct ata_ering_entry *, void *),
+			 void *arg)
+{
+	int idx, rc = 0;
+	struct ata_ering_entry *ent;
+
+	idx = ering->cursor;
+	do {
+		ent = &ering->ring[idx];
+		if (!ent->err_mask)
+			break;
+		rc = map_fn(ent, arg);
+		if (rc)
+			break;
+		idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
+	} while (idx != ering->cursor);
+
+	return rc;
+}
+
 /**
  *	ata_scsi_timed_out - SCSI layer time out callback
  *	@cmd: timed out SCSI command
@@ -55,6 +102,8 @@
  *	from finishing it by setting EH_SCHEDULED and return
  *	EH_NOT_HANDLED.
  *
+ *	TODO: kill this function once old EH is gone.
+ *
  *	LOCKING:
  *	Called from timer context
  *
@@ -67,10 +116,16 @@
 	struct ata_port *ap = ata_shost_to_port(host);
 	unsigned long flags;
 	struct ata_queued_cmd *qc;
-	enum scsi_eh_timer_return ret = EH_HANDLED;
+	enum scsi_eh_timer_return ret;
 
 	DPRINTK("ENTER\n");
 
+	if (ap->ops->error_handler) {
+		ret = EH_NOT_HANDLED;
+		goto out;
+	}
+
+	ret = EH_HANDLED;
 	spin_lock_irqsave(&ap->host_set->lock, flags);
 	qc = ata_qc_from_tag(ap, ap->active_tag);
 	if (qc) {
@@ -81,6 +136,7 @@
 	}
 	spin_unlock_irqrestore(&ap->host_set->lock, flags);
 
+ out:
 	DPRINTK("EXIT, ret=%d\n", ret);
 	return ret;
 }
@@ -100,21 +156,141 @@
 void ata_scsi_error(struct Scsi_Host *host)
 {
 	struct ata_port *ap = ata_shost_to_port(host);
+	spinlock_t *hs_lock = &ap->host_set->lock;
+	int i, repeat_cnt = ATA_EH_MAX_REPEAT;
+	unsigned long flags;
 
 	DPRINTK("ENTER\n");
 
-	/* synchronize with IRQ handler and port task */
-	spin_unlock_wait(&ap->host_set->lock);
+	/* synchronize with port task */
 	ata_port_flush_task(ap);
 
-	WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
+	/* synchronize with host_set lock and sort out timeouts */
 
-	ap->ops->eng_timeout(ap);
+	/* For new EH, all qcs are finished in one of three ways -
+	 * normal completion, error completion, and SCSI timeout.
+	 * Both cmpletions can race against SCSI timeout.  When normal
+	 * completion wins, the qc never reaches EH.  When error
+	 * completion wins, the qc has ATA_QCFLAG_FAILED set.
+	 *
+	 * When SCSI timeout wins, things are a bit more complex.
+	 * Normal or error completion can occur after the timeout but
+	 * before this point.  In such cases, both types of
+	 * completions are honored.  A scmd is determined to have
+	 * timed out iff its associated qc is active and not failed.
+	 */
+	if (ap->ops->error_handler) {
+		struct scsi_cmnd *scmd, *tmp;
+		int nr_timedout = 0;
 
+		spin_lock_irqsave(hs_lock, flags);
+
+		list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
+			struct ata_queued_cmd *qc;
+
+			for (i = 0; i < ATA_MAX_QUEUE; i++) {
+				qc = __ata_qc_from_tag(ap, i);
+				if (qc->flags & ATA_QCFLAG_ACTIVE &&
+				    qc->scsicmd == scmd)
+					break;
+			}
+
+			if (i < ATA_MAX_QUEUE) {
+				/* the scmd has an associated qc */
+				if (!(qc->flags & ATA_QCFLAG_FAILED)) {
+					/* which hasn't failed yet, timeout */
+					qc->err_mask |= AC_ERR_TIMEOUT;
+					qc->flags |= ATA_QCFLAG_FAILED;
+					nr_timedout++;
+				}
+			} else {
+				/* Normal completion occurred after
+				 * SCSI timeout but before this point.
+				 * Successfully complete it.
+				 */
+				scmd->retries = scmd->allowed;
+				scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
+			}
+		}
+
+		/* If we have timed out qcs.  They belong to EH from
+		 * this point but the state of the controller is
+		 * unknown.  Freeze the port to make sure the IRQ
+		 * handler doesn't diddle with those qcs.  This must
+		 * be done atomically w.r.t. setting QCFLAG_FAILED.
+		 */
+		if (nr_timedout)
+			__ata_port_freeze(ap);
+
+		spin_unlock_irqrestore(hs_lock, flags);
+	} else
+		spin_unlock_wait(hs_lock);
+
+ repeat:
+	/* invoke error handler */
+	if (ap->ops->error_handler) {
+		/* fetch & clear EH info */
+		spin_lock_irqsave(hs_lock, flags);
+
+		memset(&ap->eh_context, 0, sizeof(ap->eh_context));
+		ap->eh_context.i = ap->eh_info;
+		memset(&ap->eh_info, 0, sizeof(ap->eh_info));
+
+		ap->flags &= ~ATA_FLAG_EH_PENDING;
+
+		spin_unlock_irqrestore(hs_lock, flags);
+
+		/* invoke EH */
+		ap->ops->error_handler(ap);
+
+		/* Exception might have happend after ->error_handler
+		 * recovered the port but before this point.  Repeat
+		 * EH in such case.
+		 */
+		spin_lock_irqsave(hs_lock, flags);
+
+		if (ap->flags & ATA_FLAG_EH_PENDING) {
+			if (--repeat_cnt) {
+				ata_port_printk(ap, KERN_INFO,
+					"EH pending after completion, "
+					"repeating EH (cnt=%d)\n", repeat_cnt);
+				spin_unlock_irqrestore(hs_lock, flags);
+				goto repeat;
+			}
+			ata_port_printk(ap, KERN_ERR, "EH pending after %d "
+					"tries, giving up\n", ATA_EH_MAX_REPEAT);
+		}
+
+		/* this run is complete, make sure EH info is clear */
+		memset(&ap->eh_info, 0, sizeof(ap->eh_info));
+
+		/* Clear host_eh_scheduled while holding hs_lock such
+		 * that if exception occurs after this point but
+		 * before EH completion, SCSI midlayer will
+		 * re-initiate EH.
+		 */
+		host->host_eh_scheduled = 0;
+
+		spin_unlock_irqrestore(hs_lock, flags);
+	} else {
+		WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
+		ap->ops->eng_timeout(ap);
+	}
+
+	/* finish or retry handled scmd's and clean up */
 	WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
 
 	scsi_eh_flush_done_q(&ap->eh_done_q);
 
+	/* clean up */
+	spin_lock_irqsave(hs_lock, flags);
+
+	if (ap->flags & ATA_FLAG_RECOVERED)
+		ata_port_printk(ap, KERN_INFO, "EH complete\n");
+	ap->flags &= ~ATA_FLAG_RECOVERED;
+
+	spin_unlock_irqrestore(hs_lock, flags);
+
 	DPRINTK("EXIT\n");
 }
 
@@ -133,6 +309,8 @@
  *	an interrupt was not delivered to the driver, even though the
  *	transaction completed successfully.
  *
+ *	TODO: kill this function once old EH is gone.
+ *
  *	LOCKING:
  *	Inherited from SCSI layer (none, can sleep)
  */
@@ -167,8 +345,9 @@
 		/* ack bmdma irq events */
 		ap->ops->irq_clear(ap);
 
-		printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
-		       ap->id, qc->tf.command, drv_stat, host_stat);
+		ata_dev_printk(qc->dev, KERN_ERR, "command 0x%x timeout, "
+			       "stat 0x%x host_stat 0x%x\n",
+			       qc->tf.command, drv_stat, host_stat);
 
 		/* complete taskfile transaction */
 		qc->err_mask |= AC_ERR_TIMEOUT;
@@ -197,6 +376,8 @@
  *	an interrupt was not delivered to the driver, even though the
  *	transaction completed successfully.
  *
+ *	TODO: kill this function once old EH is gone.
+ *
  *	LOCKING:
  *	Inherited from SCSI layer (none, can sleep)
  */
@@ -209,6 +390,190 @@
 	DPRINTK("EXIT\n");
 }
 
+/**
+ *	ata_qc_schedule_eh - schedule qc for error handling
+ *	@qc: command to schedule error handling for
+ *
+ *	Schedule error handling for @qc.  EH will kick in as soon as
+ *	other commands are drained.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+
+	WARN_ON(!ap->ops->error_handler);
+
+	qc->flags |= ATA_QCFLAG_FAILED;
+	qc->ap->flags |= ATA_FLAG_EH_PENDING;
+
+	/* The following will fail if timeout has already expired.
+	 * ata_scsi_error() takes care of such scmds on EH entry.
+	 * Note that ATA_QCFLAG_FAILED is unconditionally set after
+	 * this function completes.
+	 */
+	scsi_req_abort_cmd(qc->scsicmd);
+}
+
+/**
+ *	ata_port_schedule_eh - schedule error handling without a qc
+ *	@ap: ATA port to schedule EH for
+ *
+ *	Schedule error handling for @ap.  EH will kick in as soon as
+ *	all commands are drained.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+void ata_port_schedule_eh(struct ata_port *ap)
+{
+	WARN_ON(!ap->ops->error_handler);
+
+	ap->flags |= ATA_FLAG_EH_PENDING;
+	ata_schedule_scsi_eh(ap->host);
+
+	DPRINTK("port EH scheduled\n");
+}
+
+/**
+ *	ata_port_abort - abort all qc's on the port
+ *	@ap: ATA port to abort qc's for
+ *
+ *	Abort all active qc's of @ap and schedule EH.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ *
+ *	RETURNS:
+ *	Number of aborted qc's.
+ */
+int ata_port_abort(struct ata_port *ap)
+{
+	int tag, nr_aborted = 0;
+
+	WARN_ON(!ap->ops->error_handler);
+
+	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
+		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
+
+		if (qc) {
+			qc->flags |= ATA_QCFLAG_FAILED;
+			ata_qc_complete(qc);
+			nr_aborted++;
+		}
+	}
+
+	if (!nr_aborted)
+		ata_port_schedule_eh(ap);
+
+	return nr_aborted;
+}
+
+/**
+ *	__ata_port_freeze - freeze port
+ *	@ap: ATA port to freeze
+ *
+ *	This function is called when HSM violation or some other
+ *	condition disrupts normal operation of the port.  Frozen port
+ *	is not allowed to perform any operation until the port is
+ *	thawed, which usually follows a successful reset.
+ *
+ *	ap->ops->freeze() callback can be used for freezing the port
+ *	hardware-wise (e.g. mask interrupt and stop DMA engine).  If a
+ *	port cannot be frozen hardware-wise, the interrupt handler
+ *	must ack and clear interrupts unconditionally while the port
+ *	is frozen.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+static void __ata_port_freeze(struct ata_port *ap)
+{
+	WARN_ON(!ap->ops->error_handler);
+
+	if (ap->ops->freeze)
+		ap->ops->freeze(ap);
+
+	ap->flags |= ATA_FLAG_FROZEN;
+
+	DPRINTK("ata%u port frozen\n", ap->id);
+}
+
+/**
+ *	ata_port_freeze - abort & freeze port
+ *	@ap: ATA port to freeze
+ *
+ *	Abort and freeze @ap.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ *
+ *	RETURNS:
+ *	Number of aborted commands.
+ */
+int ata_port_freeze(struct ata_port *ap)
+{
+	int nr_aborted;
+
+	WARN_ON(!ap->ops->error_handler);
+
+	nr_aborted = ata_port_abort(ap);
+	__ata_port_freeze(ap);
+
+	return nr_aborted;
+}
+
+/**
+ *	ata_eh_freeze_port - EH helper to freeze port
+ *	@ap: ATA port to freeze
+ *
+ *	Freeze @ap.
+ *
+ *	LOCKING:
+ *	None.
+ */
+void ata_eh_freeze_port(struct ata_port *ap)
+{
+	unsigned long flags;
+
+	if (!ap->ops->error_handler)
+		return;
+
+	spin_lock_irqsave(&ap->host_set->lock, flags);
+	__ata_port_freeze(ap);
+	spin_unlock_irqrestore(&ap->host_set->lock, flags);
+}
+
+/**
+ *	ata_port_thaw_port - EH helper to thaw port
+ *	@ap: ATA port to thaw
+ *
+ *	Thaw frozen port @ap.
+ *
+ *	LOCKING:
+ *	None.
+ */
+void ata_eh_thaw_port(struct ata_port *ap)
+{
+	unsigned long flags;
+
+	if (!ap->ops->error_handler)
+		return;
+
+	spin_lock_irqsave(&ap->host_set->lock, flags);
+
+	ap->flags &= ~ATA_FLAG_FROZEN;
+
+	if (ap->ops->thaw)
+		ap->ops->thaw(ap);
+
+	spin_unlock_irqrestore(&ap->host_set->lock, flags);
+
+	DPRINTK("ata%u port thawed\n", ap->id);
+}
+
 static void ata_eh_scsidone(struct scsi_cmnd *scmd)
 {
 	/* nada */
@@ -261,3 +626,778 @@
 		scmd->retries--;
 	__ata_eh_qc_complete(qc);
 }
+
+/**
+ *	ata_eh_about_to_do - about to perform eh_action
+ *	@ap: target ATA port
+ *	@action: action about to be performed
+ *
+ *	Called just before performing EH actions to clear related bits
+ *	in @ap->eh_info such that eh actions are not unnecessarily
+ *	repeated.
+ *
+ *	LOCKING:
+ *	None.
+ */
+static void ata_eh_about_to_do(struct ata_port *ap, unsigned int action)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ap->host_set->lock, flags);
+	ap->eh_info.action &= ~action;
+	ap->flags |= ATA_FLAG_RECOVERED;
+	spin_unlock_irqrestore(&ap->host_set->lock, flags);
+}
+
+/**
+ *	ata_err_string - convert err_mask to descriptive string
+ *	@err_mask: error mask to convert to string
+ *
+ *	Convert @err_mask to descriptive string.  Errors are
+ *	prioritized according to severity and only the most severe
+ *	error is reported.
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	RETURNS:
+ *	Descriptive string for @err_mask
+ */
+static const char * ata_err_string(unsigned int err_mask)
+{
+	if (err_mask & AC_ERR_HOST_BUS)
+		return "host bus error";
+	if (err_mask & AC_ERR_ATA_BUS)
+		return "ATA bus error";
+	if (err_mask & AC_ERR_TIMEOUT)
+		return "timeout";
+	if (err_mask & AC_ERR_HSM)
+		return "HSM violation";
+	if (err_mask & AC_ERR_SYSTEM)
+		return "internal error";
+	if (err_mask & AC_ERR_MEDIA)
+		return "media error";
+	if (err_mask & AC_ERR_INVALID)
+		return "invalid argument";
+	if (err_mask & AC_ERR_DEV)
+		return "device error";
+	return "unknown error";
+}
+
+/**
+ *	atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
+ *	@dev: device to perform REQUEST_SENSE to
+ *	@sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
+ *
+ *	Perform ATAPI REQUEST_SENSE after the device reported CHECK
+ *	SENSE.  This function is EH helper.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, AC_ERR_* mask on failure
+ */
+static unsigned int atapi_eh_request_sense(struct ata_device *dev,
+					   unsigned char *sense_buf)
+{
+	struct ata_port *ap = dev->ap;
+	struct ata_taskfile tf;
+	u8 cdb[ATAPI_CDB_LEN];
+
+	DPRINTK("ATAPI request sense\n");
+
+	ata_tf_init(dev, &tf);
+
+	/* FIXME: is this needed? */
+	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
+
+	/* XXX: why tf_read here? */
+	ap->ops->tf_read(ap, &tf);
+
+	/* fill these in, for the case where they are -not- overwritten */
+	sense_buf[0] = 0x70;
+	sense_buf[2] = tf.feature >> 4;
+
+	memset(cdb, 0, ATAPI_CDB_LEN);
+	cdb[0] = REQUEST_SENSE;
+	cdb[4] = SCSI_SENSE_BUFFERSIZE;
+
+	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+	tf.command = ATA_CMD_PACKET;
+
+	/* is it pointless to prefer PIO for "safety reasons"? */
+	if (ap->flags & ATA_FLAG_PIO_DMA) {
+		tf.protocol = ATA_PROT_ATAPI_DMA;
+		tf.feature |= ATAPI_PKT_DMA;
+	} else {
+		tf.protocol = ATA_PROT_ATAPI;
+		tf.lbam = (8 * 1024) & 0xff;
+		tf.lbah = (8 * 1024) >> 8;
+	}
+
+	return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
+				 sense_buf, SCSI_SENSE_BUFFERSIZE);
+}
+
+/**
+ *	ata_eh_analyze_serror - analyze SError for a failed port
+ *	@ap: ATA port to analyze SError for
+ *
+ *	Analyze SError if available and further determine cause of
+ *	failure.
+ *
+ *	LOCKING:
+ *	None.
+ */
+static void ata_eh_analyze_serror(struct ata_port *ap)
+{
+	struct ata_eh_context *ehc = &ap->eh_context;
+	u32 serror = ehc->i.serror;
+	unsigned int err_mask = 0, action = 0;
+
+	if (serror & SERR_PERSISTENT) {
+		err_mask |= AC_ERR_ATA_BUS;
+		action |= ATA_EH_HARDRESET;
+	}
+	if (serror &
+	    (SERR_DATA_RECOVERED | SERR_COMM_RECOVERED | SERR_DATA)) {
+		err_mask |= AC_ERR_ATA_BUS;
+		action |= ATA_EH_SOFTRESET;
+	}
+	if (serror & SERR_PROTOCOL) {
+		err_mask |= AC_ERR_HSM;
+		action |= ATA_EH_SOFTRESET;
+	}
+	if (serror & SERR_INTERNAL) {
+		err_mask |= AC_ERR_SYSTEM;
+		action |= ATA_EH_SOFTRESET;
+	}
+	if (serror & (SERR_PHYRDY_CHG | SERR_DEV_XCHG)) {
+		err_mask |= AC_ERR_ATA_BUS;
+		action |= ATA_EH_HARDRESET;
+	}
+
+	ehc->i.err_mask |= err_mask;
+	ehc->i.action |= action;
+}
+
+/**
+ *	ata_eh_analyze_tf - analyze taskfile of a failed qc
+ *	@qc: qc to analyze
+ *	@tf: Taskfile registers to analyze
+ *
+ *	Analyze taskfile of @qc and further determine cause of
+ *	failure.  This function also requests ATAPI sense data if
+ *	avaliable.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	Determined recovery action
+ */
+static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
+				      const struct ata_taskfile *tf)
+{
+	unsigned int tmp, action = 0;
+	u8 stat = tf->command, err = tf->feature;
+
+	if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
+		qc->err_mask |= AC_ERR_HSM;
+		return ATA_EH_SOFTRESET;
+	}
+
+	if (!(qc->err_mask & AC_ERR_DEV))
+		return 0;
+
+	switch (qc->dev->class) {
+	case ATA_DEV_ATA:
+		if (err & ATA_ICRC)
+			qc->err_mask |= AC_ERR_ATA_BUS;
+		if (err & ATA_UNC)
+			qc->err_mask |= AC_ERR_MEDIA;
+		if (err & ATA_IDNF)
+			qc->err_mask |= AC_ERR_INVALID;
+		break;
+
+	case ATA_DEV_ATAPI:
+		tmp = atapi_eh_request_sense(qc->dev,
+					     qc->scsicmd->sense_buffer);
+		if (!tmp) {
+			/* ATA_QCFLAG_SENSE_VALID is used to tell
+			 * atapi_qc_complete() that sense data is
+			 * already valid.
+			 *
+			 * TODO: interpret sense data and set
+			 * appropriate err_mask.
+			 */
+			qc->flags |= ATA_QCFLAG_SENSE_VALID;
+		} else
+			qc->err_mask |= tmp;
+	}
+
+	if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
+		action |= ATA_EH_SOFTRESET;
+
+	return action;
+}
+
+static int ata_eh_categorize_ering_entry(struct ata_ering_entry *ent)
+{
+	if (ent->err_mask & (AC_ERR_ATA_BUS | AC_ERR_TIMEOUT))
+		return 1;
+
+	if (ent->is_io) {
+		if (ent->err_mask & AC_ERR_HSM)
+			return 1;
+		if ((ent->err_mask &
+		     (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
+			return 2;
+	}
+
+	return 0;
+}
+
+struct speed_down_needed_arg {
+	u64 since;
+	int nr_errors[3];
+};
+
+static int speed_down_needed_cb(struct ata_ering_entry *ent, void *void_arg)
+{
+	struct speed_down_needed_arg *arg = void_arg;
+
+	if (ent->timestamp < arg->since)
+		return -1;
+
+	arg->nr_errors[ata_eh_categorize_ering_entry(ent)]++;
+	return 0;
+}
+
+/**
+ *	ata_eh_speed_down_needed - Determine wheter speed down is necessary
+ *	@dev: Device of interest
+ *
+ *	This function examines error ring of @dev and determines
+ *	whether speed down is necessary.  Speed down is necessary if
+ *	there have been more than 3 of Cat-1 errors or 10 of Cat-2
+ *	errors during last 15 minutes.
+ *
+ *	Cat-1 errors are ATA_BUS, TIMEOUT for any command and HSM
+ *	violation for known supported commands.
+ *
+ *	Cat-2 errors are unclassified DEV error for known supported
+ *	command.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ *
+ *	RETURNS:
+ *	1 if speed down is necessary, 0 otherwise
+ */
+static int ata_eh_speed_down_needed(struct ata_device *dev)
+{
+	const u64 interval = 15LLU * 60 * HZ;
+	static const int err_limits[3] = { -1, 3, 10 };
+	struct speed_down_needed_arg arg;
+	struct ata_ering_entry *ent;
+	int err_cat;
+	u64 j64;
+
+	ent = ata_ering_top(&dev->ering);
+	if (!ent)
+		return 0;
+
+	err_cat = ata_eh_categorize_ering_entry(ent);
+	if (err_cat == 0)
+		return 0;
+
+	memset(&arg, 0, sizeof(arg));
+
+	j64 = get_jiffies_64();
+	if (j64 >= interval)
+		arg.since = j64 - interval;
+	else
+		arg.since = 0;
+
+	ata_ering_map(&dev->ering, speed_down_needed_cb, &arg);
+
+	return arg.nr_errors[err_cat] > err_limits[err_cat];
+}
+
+/**
+ *	ata_eh_speed_down - record error and speed down if necessary
+ *	@dev: Failed device
+ *	@is_io: Did the device fail during normal IO?
+ *	@err_mask: err_mask of the error
+ *
+ *	Record error and examine error history to determine whether
+ *	adjusting transmission speed is necessary.  It also sets
+ *	transmission limits appropriately if such adjustment is
+ *	necessary.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise
+ */
+static int ata_eh_speed_down(struct ata_device *dev, int is_io,
+			     unsigned int err_mask)
+{
+	if (!err_mask)
+		return 0;
+
+	/* record error and determine whether speed down is necessary */
+	ata_ering_record(&dev->ering, is_io, err_mask);
+
+	if (!ata_eh_speed_down_needed(dev))
+		return 0;
+
+	/* speed down SATA link speed if possible */
+	if (sata_down_spd_limit(dev->ap) == 0)
+		return ATA_EH_HARDRESET;
+
+	/* lower transfer mode */
+	if (ata_down_xfermask_limit(dev, 0) == 0)
+		return ATA_EH_SOFTRESET;
+
+	ata_dev_printk(dev, KERN_ERR,
+		       "speed down requested but no transfer mode left\n");
+	return 0;
+}
+
+/**
+ *	ata_eh_autopsy - analyze error and determine recovery action
+ *	@ap: ATA port to perform autopsy on
+ *
+ *	Analyze why @ap failed and determine which recovery action is
+ *	needed.  This function also sets more detailed AC_ERR_* values
+ *	and fills sense data for ATAPI CHECK SENSE.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ */
+static void ata_eh_autopsy(struct ata_port *ap)
+{
+	struct ata_eh_context *ehc = &ap->eh_context;
+	unsigned int action = ehc->i.action;
+	struct ata_device *failed_dev = NULL;
+	unsigned int all_err_mask = 0;
+	int tag, is_io = 0;
+	u32 serror;
+	int rc;
+
+	DPRINTK("ENTER\n");
+
+	/* obtain and analyze SError */
+	rc = sata_scr_read(ap, SCR_ERROR, &serror);
+	if (rc == 0) {
+		ehc->i.serror |= serror;
+		ata_eh_analyze_serror(ap);
+	} else if (rc != -EOPNOTSUPP)
+		action |= ATA_EH_HARDRESET;
+
+	/* any real error trumps AC_ERR_OTHER */
+	if (ehc->i.err_mask & ~AC_ERR_OTHER)
+		ehc->i.err_mask &= ~AC_ERR_OTHER;
+
+	all_err_mask |= ehc->i.err_mask;
+
+	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
+		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
+
+		if (!(qc->flags & ATA_QCFLAG_FAILED))
+			continue;
+
+		/* inherit upper level err_mask */
+		qc->err_mask |= ehc->i.err_mask;
+
+		if (qc->err_mask & AC_ERR_TIMEOUT)
+			action |= ATA_EH_SOFTRESET;
+
+		/* analyze TF */
+		action |= ata_eh_analyze_tf(qc, &qc->result_tf);
+
+		/* DEV errors are probably spurious in case of ATA_BUS error */
+		if (qc->err_mask & AC_ERR_ATA_BUS)
+			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
+					  AC_ERR_INVALID);
+
+		/* any real error trumps unknown error */
+		if (qc->err_mask & ~AC_ERR_OTHER)
+			qc->err_mask &= ~AC_ERR_OTHER;
+
+		/* SENSE_VALID trumps dev/unknown error and revalidation */
+		if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
+			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
+			action &= ~ATA_EH_REVALIDATE;
+		}
+
+		/* accumulate error info */
+		failed_dev = qc->dev;
+		all_err_mask |= qc->err_mask;
+		if (qc->flags & ATA_QCFLAG_IO)
+			is_io = 1;
+	}
+
+	/* speed down iff command was in progress */
+	if (failed_dev)
+		action |= ata_eh_speed_down(failed_dev, is_io, all_err_mask);
+
+	if (all_err_mask)
+		action |= ATA_EH_REVALIDATE;
+
+	ehc->i.dev = failed_dev;
+	ehc->i.action = action;
+
+	DPRINTK("EXIT\n");
+}
+
+/**
+ *	ata_eh_report - report error handling to user
+ *	@ap: ATA port EH is going on
+ *
+ *	Report EH to user.
+ *
+ *	LOCKING:
+ *	None.
+ */
+static void ata_eh_report(struct ata_port *ap)
+{
+	struct ata_eh_context *ehc = &ap->eh_context;
+	const char *frozen, *desc;
+	int tag, nr_failed = 0;
+
+	desc = NULL;
+	if (ehc->i.desc[0] != '\0')
+		desc = ehc->i.desc;
+
+	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
+		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
+
+		if (!(qc->flags & ATA_QCFLAG_FAILED))
+			continue;
+		if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
+			continue;
+
+		nr_failed++;
+	}
+
+	if (!nr_failed && !ehc->i.err_mask)
+		return;
+
+	frozen = "";
+	if (ap->flags & ATA_FLAG_FROZEN)
+		frozen = " frozen";
+
+	if (ehc->i.dev) {
+		ata_dev_printk(ehc->i.dev, KERN_ERR,
+			       "exception Emask 0x%x SErr 0x%x action 0x%x%s\n",
+			       ehc->i.err_mask, ehc->i.serror, ehc->i.action,
+			       frozen);
+		if (desc)
+			ata_dev_printk(ehc->i.dev, KERN_ERR, "(%s)\n", desc);
+	} else {
+		ata_port_printk(ap, KERN_ERR,
+				"exception Emask 0x%x SErr 0x%x action 0x%x%s\n",
+				ehc->i.err_mask, ehc->i.serror, ehc->i.action,
+				frozen);
+		if (desc)
+			ata_port_printk(ap, KERN_ERR, "(%s)\n", desc);
+	}
+
+	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
+		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
+
+		if (!(qc->flags & ATA_QCFLAG_FAILED) || !qc->err_mask)
+			continue;
+
+		ata_dev_printk(qc->dev, KERN_ERR, "tag %d cmd 0x%x "
+			       "Emask 0x%x stat 0x%x err 0x%x (%s)\n",
+			       qc->tag, qc->tf.command, qc->err_mask,
+			       qc->result_tf.command, qc->result_tf.feature,
+			       ata_err_string(qc->err_mask));
+	}
+}
+
+static int ata_eh_reset(struct ata_port *ap, ata_reset_fn_t softreset,
+			ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
+{
+	struct ata_eh_context *ehc = &ap->eh_context;
+	unsigned int classes[ATA_MAX_DEVICES];
+	int tries = ATA_EH_RESET_TRIES;
+	ata_reset_fn_t reset;
+	int rc;
+
+	if (softreset && (!hardreset || (!sata_set_spd_needed(ap) &&
+					 !(ehc->i.action & ATA_EH_HARDRESET))))
+		reset = softreset;
+	else
+		reset = hardreset;
+
+ retry:
+	ata_port_printk(ap, KERN_INFO, "%s resetting port\n",
+			reset == softreset ? "soft" : "hard");
+
+	/* reset */
+	ata_eh_about_to_do(ap, ATA_EH_RESET_MASK);
+	ehc->i.flags |= ATA_EHI_DID_RESET;
+
+	rc = ata_do_reset(ap, reset, classes);
+
+	if (rc && --tries) {
+		ata_port_printk(ap, KERN_WARNING,
+				"%sreset failed, retrying in 5 secs\n",
+				reset == softreset ? "soft" : "hard");
+		ssleep(5);
+
+		if (reset == hardreset)
+			sata_down_spd_limit(ap);
+		if (hardreset)
+			reset = hardreset;
+		goto retry;
+	}
+
+	if (rc == 0) {
+		if (postreset)
+			postreset(ap, classes);
+
+		/* reset successful, schedule revalidation */
+		ehc->i.dev = NULL;
+		ehc->i.action &= ~ATA_EH_RESET_MASK;
+		ehc->i.action |= ATA_EH_REVALIDATE;
+	}
+
+	return rc;
+}
+
+static int ata_eh_revalidate(struct ata_port *ap,
+			     struct ata_device **r_failed_dev)
+{
+	struct ata_eh_context *ehc = &ap->eh_context;
+	struct ata_device *dev;
+	int i, rc = 0;
+
+	DPRINTK("ENTER\n");
+
+	for (i = 0; i < ATA_MAX_DEVICES; i++) {
+		dev = &ap->device[i];
+
+		if (ehc->i.action & ATA_EH_REVALIDATE && ata_dev_enabled(dev) &&
+		    (!ehc->i.dev || ehc->i.dev == dev)) {
+			if (ata_port_offline(ap)) {
+				rc = -EIO;
+				break;
+			}
+
+			ata_eh_about_to_do(ap, ATA_EH_REVALIDATE);
+			rc = ata_dev_revalidate(dev,
+					ehc->i.flags & ATA_EHI_DID_RESET);
+			if (rc)
+				break;
+
+			ehc->i.action &= ~ATA_EH_REVALIDATE;
+		}
+	}
+
+	if (rc)
+		*r_failed_dev = dev;
+
+	DPRINTK("EXIT\n");
+	return rc;
+}
+
+static int ata_port_nr_enabled(struct ata_port *ap)
+{
+	int i, cnt = 0;
+
+	for (i = 0; i < ATA_MAX_DEVICES; i++)
+		if (ata_dev_enabled(&ap->device[i]))
+			cnt++;
+	return cnt;
+}
+
+/**
+ *	ata_eh_recover - recover host port after error
+ *	@ap: host port to recover
+ *	@softreset: softreset method (can be NULL)
+ *	@hardreset: hardreset method (can be NULL)
+ *	@postreset: postreset method (can be NULL)
+ *
+ *	This is the alpha and omega, eum and yang, heart and soul of
+ *	libata exception handling.  On entry, actions required to
+ *	recover each devices are recorded in eh_context.  This
+ *	function executes all the operations with appropriate retrials
+ *	and fallbacks to resurrect failed devices.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno on failure.
+ */
+static int ata_eh_recover(struct ata_port *ap, ata_reset_fn_t softreset,
+			  ata_reset_fn_t hardreset,
+			  ata_postreset_fn_t postreset)
+{
+	struct ata_eh_context *ehc = &ap->eh_context;
+	struct ata_device *dev;
+	int down_xfermask, i, rc;
+
+	DPRINTK("ENTER\n");
+
+	/* prep for recovery */
+	for (i = 0; i < ATA_MAX_DEVICES; i++) {
+		dev = &ap->device[i];
+
+		ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
+	}
+
+ retry:
+	down_xfermask = 0;
+	rc = 0;
+
+	/* skip EH if possible. */
+	if (!ata_port_nr_enabled(ap) && !(ap->flags & ATA_FLAG_FROZEN))
+		ehc->i.action = 0;
+
+	/* reset */
+	if (ehc->i.action & ATA_EH_RESET_MASK) {
+		ata_eh_freeze_port(ap);
+
+		rc = ata_eh_reset(ap, softreset, hardreset, postreset);
+		if (rc) {
+			ata_port_printk(ap, KERN_ERR,
+					"reset failed, giving up\n");
+			goto out;
+		}
+
+		ata_eh_thaw_port(ap);
+	}
+
+	/* revalidate existing devices */
+	rc = ata_eh_revalidate(ap, &dev);
+	if (rc)
+		goto dev_fail;
+
+	/* configure transfer mode if the port has been reset */
+	if (ehc->i.flags & ATA_EHI_DID_RESET) {
+		rc = ata_set_mode(ap, &dev);
+		if (rc) {
+			down_xfermask = 1;
+			goto dev_fail;
+		}
+	}
+
+	goto out;
+
+ dev_fail:
+	switch (rc) {
+	case -ENODEV:
+	case -EINVAL:
+		ehc->tries[dev->devno] = 0;
+		break;
+	case -EIO:
+		sata_down_spd_limit(ap);
+	default:
+		ehc->tries[dev->devno]--;
+		if (down_xfermask &&
+		    ata_down_xfermask_limit(dev, ehc->tries[dev->devno] == 1))
+			ehc->tries[dev->devno] = 0;
+	}
+
+	/* disable device if it has used up all its chances */
+	if (ata_dev_enabled(dev) && !ehc->tries[dev->devno])
+		ata_dev_disable(dev);
+
+	/* soft didn't work?  be haaaaard */
+	if (ehc->i.flags & ATA_EHI_DID_RESET)
+		ehc->i.action |= ATA_EH_HARDRESET;
+	else
+		ehc->i.action |= ATA_EH_SOFTRESET;
+
+	if (ata_port_nr_enabled(ap)) {
+		ata_port_printk(ap, KERN_WARNING, "failed to recover some "
+				"devices, retrying in 5 secs\n");
+		ssleep(5);
+	} else {
+		/* no device left, repeat fast */
+		msleep(500);
+	}
+
+	goto retry;
+
+ out:
+	if (rc) {
+		for (i = 0; i < ATA_MAX_DEVICES; i++)
+			ata_dev_disable(&ap->device[i]);
+	}
+
+	DPRINTK("EXIT, rc=%d\n", rc);
+	return rc;
+}
+
+/**
+ *	ata_eh_finish - finish up EH
+ *	@ap: host port to finish EH for
+ *
+ *	Recovery is complete.  Clean up EH states and retry or finish
+ *	failed qcs.
+ *
+ *	LOCKING:
+ *	None.
+ */
+static void ata_eh_finish(struct ata_port *ap)
+{
+	int tag;
+
+	/* retry or finish qcs */
+	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
+		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
+
+		if (!(qc->flags & ATA_QCFLAG_FAILED))
+			continue;
+
+		if (qc->err_mask) {
+			/* FIXME: Once EH migration is complete,
+			 * generate sense data in this function,
+			 * considering both err_mask and tf.
+			 */
+			if (qc->err_mask & AC_ERR_INVALID)
+				ata_eh_qc_complete(qc);
+			else
+				ata_eh_qc_retry(qc);
+		} else {
+			if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
+				ata_eh_qc_complete(qc);
+			} else {
+				/* feed zero TF to sense generation */
+				memset(&qc->result_tf, 0, sizeof(qc->result_tf));
+				ata_eh_qc_retry(qc);
+			}
+		}
+	}
+}
+
+/**
+ *	ata_do_eh - do standard error handling
+ *	@ap: host port to handle error for
+ *	@softreset: softreset method (can be NULL)
+ *	@hardreset: hardreset method (can be NULL)
+ *	@postreset: postreset method (can be NULL)
+ *
+ *	Perform standard error handling sequence.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ */
+void ata_do_eh(struct ata_port *ap, ata_reset_fn_t softreset,
+	       ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
+{
+	ata_eh_autopsy(ap);
+	ata_eh_report(ap);
+	ata_eh_recover(ap, softreset, hardreset, postreset);
+	ata_eh_finish(ap);
+}
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index 9871f82..e61cc35 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -302,7 +302,6 @@
 
 /**
  *	ata_scsi_qc_new - acquire new ata_queued_cmd reference
- *	@ap: ATA port to which the new command is attached
  *	@dev: ATA device to which the new command is attached
  *	@cmd: SCSI command that originated this ATA command
  *	@done: SCSI command completion function
@@ -321,14 +320,13 @@
  *	RETURNS:
  *	Command allocated, or %NULL if none available.
  */
-struct ata_queued_cmd *ata_scsi_qc_new(struct ata_port *ap,
-				       struct ata_device *dev,
+struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
 				       struct scsi_cmnd *cmd,
 				       void (*done)(struct scsi_cmnd *))
 {
 	struct ata_queued_cmd *qc;
 
-	qc = ata_qc_new_init(ap, dev);
+	qc = ata_qc_new_init(dev);
 	if (qc) {
 		qc->scsicmd = cmd;
 		qc->scsidone = done;
@@ -398,7 +396,7 @@
 	struct ata_port *ap = ata_shost_to_port(sdev->host);
 	struct ata_device *dev = &ap->device[sdev->id];
 
-	return ata_device_resume(ap, dev);
+	return ata_device_resume(dev);
 }
 
 int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state)
@@ -406,7 +404,7 @@
 	struct ata_port *ap = ata_shost_to_port(sdev->host);
 	struct ata_device *dev = &ap->device[sdev->id];
 
-	return ata_device_suspend(ap, dev, state);
+	return ata_device_suspend(dev, state);
 }
 
 /**
@@ -417,6 +415,7 @@
  *	@sk: the sense key we'll fill out
  *	@asc: the additional sense code we'll fill out
  *	@ascq: the additional sense code qualifier we'll fill out
+ *	@verbose: be verbose
  *
  *	Converts an ATA error into a SCSI error.  Fill out pointers to
  *	SK, ASC, and ASCQ bytes for later use in fixed or descriptor
@@ -426,7 +425,7 @@
  *	spin_lock_irqsave(host_set lock)
  */
 void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
-			u8 *ascq)
+			u8 *ascq, int verbose)
 {
 	int i;
 
@@ -491,8 +490,9 @@
 			}
 		}
 		/* No immediate match */
-		printk(KERN_WARNING "ata%u: no sense translation for "
-		       "error 0x%02x\n", id, drv_err);
+		if (verbose)
+			printk(KERN_WARNING "ata%u: no sense translation for "
+			       "error 0x%02x\n", id, drv_err);
 	}
 
 	/* Fall back to interpreting status bits */
@@ -505,8 +505,9 @@
 		}
 	}
 	/* No error?  Undecoded? */
-	printk(KERN_WARNING "ata%u: no sense translation for status: 0x%02x\n",
-	       id, drv_stat);
+	if (verbose)
+		printk(KERN_WARNING "ata%u: no sense translation for "
+		       "status: 0x%02x\n", id, drv_stat);
 
 	/* We need a sensible error return here, which is tricky, and one
 	   that won't cause people to do things like return a disk wrongly */
@@ -515,9 +516,10 @@
 	*ascq = 0x00;
 
  translate_done:
-	printk(KERN_ERR "ata%u: translated ATA stat/err 0x%02x/%02x to "
-	       "SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n", id, drv_stat, drv_err,
-	       *sk, *asc, *ascq);
+	if (verbose)
+		printk(KERN_ERR "ata%u: translated ATA stat/err 0x%02x/%02x "
+		       "to SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n",
+		       id, drv_stat, drv_err, *sk, *asc, *ascq);
 	return;
 }
 
@@ -537,9 +539,10 @@
 void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
 {
 	struct scsi_cmnd *cmd = qc->scsicmd;
-	struct ata_taskfile *tf = &qc->tf;
+	struct ata_taskfile *tf = &qc->result_tf;
 	unsigned char *sb = cmd->sense_buffer;
 	unsigned char *desc = sb + 8;
+	int verbose = qc->ap->ops->error_handler == NULL;
 
 	memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
 
@@ -552,7 +555,7 @@
 	if (qc->err_mask ||
 	    tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
 		ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
-				   &sb[1], &sb[2], &sb[3]);
+				   &sb[1], &sb[2], &sb[3], verbose);
 		sb[1] &= 0x0f;
 	}
 
@@ -608,8 +611,9 @@
 void ata_gen_fixed_sense(struct ata_queued_cmd *qc)
 {
 	struct scsi_cmnd *cmd = qc->scsicmd;
-	struct ata_taskfile *tf = &qc->tf;
+	struct ata_taskfile *tf = &qc->result_tf;
 	unsigned char *sb = cmd->sense_buffer;
+	int verbose = qc->ap->ops->error_handler == NULL;
 
 	memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
 
@@ -622,7 +626,7 @@
 	if (qc->err_mask ||
 	    tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
 		ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
-				   &sb[2], &sb[12], &sb[13]);
+				   &sb[2], &sb[12], &sb[13], verbose);
 		sb[2] &= 0x0f;
 	}
 
@@ -748,7 +752,7 @@
 		tf->nsect = 1;	/* 1 sector, lba=0 */
 
 		if (qc->dev->flags & ATA_DFLAG_LBA) {
-			qc->tf.flags |= ATA_TFLAG_LBA;
+			tf->flags |= ATA_TFLAG_LBA;
 
 			tf->lbah = 0x0;
 			tf->lbam = 0x0;
@@ -1199,14 +1203,11 @@
 	 */
 	if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
  	    ((cdb[2] & 0x20) || need_sense)) {
-		qc->ap->ops->tf_read(qc->ap, &qc->tf);
  		ata_gen_ata_desc_sense(qc);
 	} else {
 		if (!need_sense) {
 			cmd->result = SAM_STAT_GOOD;
 		} else {
-			qc->ap->ops->tf_read(qc->ap, &qc->tf);
-
 			/* TODO: decide which descriptor format to use
 			 * for 48b LBA devices and call that here
 			 * instead of the fixed desc, which is only
@@ -1217,10 +1218,8 @@
 		}
 	}
 
-	if (need_sense) {
-		/* The ata_gen_..._sense routines fill in tf */
-		ata_dump_status(qc->ap->id, &qc->tf);
-	}
+	if (need_sense && !qc->ap->ops->error_handler)
+		ata_dump_status(qc->ap->id, &qc->result_tf);
 
 	qc->scsidone(cmd);
 
@@ -1229,7 +1228,6 @@
 
 /**
  *	ata_scsi_translate - Translate then issue SCSI command to ATA device
- *	@ap: ATA port to which the command is addressed
  *	@dev: ATA device to which the command is addressed
  *	@cmd: SCSI command to execute
  *	@done: SCSI command completion function
@@ -1252,17 +1250,16 @@
  *	spin_lock_irqsave(host_set lock)
  */
 
-static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
-			      struct scsi_cmnd *cmd,
-			      void (*done)(struct scsi_cmnd *),
-			      ata_xlat_func_t xlat_func)
+static void ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
+			       void (*done)(struct scsi_cmnd *),
+			       ata_xlat_func_t xlat_func)
 {
 	struct ata_queued_cmd *qc;
 	u8 *scsicmd = cmd->cmnd;
 
 	VPRINTK("ENTER\n");
 
-	qc = ata_scsi_qc_new(ap, dev, cmd, done);
+	qc = ata_scsi_qc_new(dev, cmd, done);
 	if (!qc)
 		goto err_mem;
 
@@ -1270,8 +1267,8 @@
 	if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
 	    cmd->sc_data_direction == DMA_TO_DEVICE) {
 		if (unlikely(cmd->request_bufflen < 1)) {
-			printk(KERN_WARNING "ata%u(%u): WARNING: zero len r/w req\n",
-			       ap->id, dev->devno);
+			ata_dev_printk(dev, KERN_WARNING,
+				       "WARNING: zero len r/w req\n");
 			goto err_did;
 		}
 
@@ -2004,7 +2001,6 @@
 		 * a sense descriptors, since that's only
 		 * correct for ATA, not ATAPI
 		 */
-		qc->ap->ops->tf_read(qc->ap, &qc->tf);
 		ata_gen_ata_desc_sense(qc);
 	}
 
@@ -2070,6 +2066,26 @@
 
 	VPRINTK("ENTER, err_mask 0x%X\n", err_mask);
 
+	/* handle completion from new EH */
+	if (unlikely(qc->ap->ops->error_handler &&
+		     (err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID))) {
+
+		if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
+			/* FIXME: not quite right; we don't want the
+			 * translation of taskfile registers into a
+			 * sense descriptors, since that's only
+			 * correct for ATA, not ATAPI
+			 */
+			ata_gen_ata_desc_sense(qc);
+		}
+
+		qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
+		qc->scsidone(cmd);
+		ata_qc_free(qc);
+		return;
+	}
+
+	/* successful completion or old EH failure path */
 	if (unlikely(err_mask & AC_ERR_DEV)) {
 		cmd->result = SAM_STAT_CHECK_CONDITION;
 		atapi_request_sense(qc);
@@ -2080,7 +2096,6 @@
 		 * a sense descriptors, since that's only
 		 * correct for ATA, not ATAPI
 		 */
-		qc->ap->ops->tf_read(qc->ap, &qc->tf);
 		ata_gen_ata_desc_sense(qc);
 	} else {
 		u8 *scsicmd = cmd->cmnd;
@@ -2211,8 +2226,9 @@
 
 	if (!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) {
 		if (unlikely(dev->class == ATA_DEV_ATAPI)) {
-			printk(KERN_WARNING "ata%u(%u): WARNING: ATAPI is %s, device ignored.\n",
-			       ap->id, dev->devno, atapi_enabled ? "not supported with this driver" : "disabled");
+			ata_dev_printk(dev, KERN_WARNING,
+				"WARNING: ATAPI is %s, device ignored.\n",
+				atapi_enabled ? "not supported with this driver" : "disabled");
 			return NULL;
 		}
 	}
@@ -2361,6 +2377,9 @@
 	 */
 	qc->nsect = cmd->bufflen / ATA_SECT_SIZE;
 
+	/* request result TF */
+	qc->flags |= ATA_QCFLAG_RESULT_TF;
+
 	return 0;
 
  invalid_fld:
@@ -2437,19 +2456,20 @@
 #endif
 }
 
-static inline void __ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *),
-				       struct ata_port *ap, struct ata_device *dev)
+static inline void __ata_scsi_queuecmd(struct scsi_cmnd *cmd,
+				       void (*done)(struct scsi_cmnd *),
+				       struct ata_device *dev)
 {
 	if (dev->class == ATA_DEV_ATA) {
 		ata_xlat_func_t xlat_func = ata_get_xlat_func(dev,
 							      cmd->cmnd[0]);
 
 		if (xlat_func)
-			ata_scsi_translate(ap, dev, cmd, done, xlat_func);
+			ata_scsi_translate(dev, cmd, done, xlat_func);
 		else
-			ata_scsi_simulate(ap, dev, cmd, done);
+			ata_scsi_simulate(dev, cmd, done);
 	} else
-		ata_scsi_translate(ap, dev, cmd, done, atapi_xlat);
+		ata_scsi_translate(dev, cmd, done, atapi_xlat);
 }
 
 /**
@@ -2487,7 +2507,7 @@
 
 	dev = ata_scsi_find_dev(ap, scsidev);
 	if (likely(dev))
-		__ata_scsi_queuecmd(cmd, done, ap, dev);
+		__ata_scsi_queuecmd(cmd, done, dev);
 	else {
 		cmd->result = (DID_BAD_TARGET << 16);
 		done(cmd);
@@ -2500,7 +2520,6 @@
 
 /**
  *	ata_scsi_simulate - simulate SCSI command on ATA device
- *	@ap: port the device is connected to
  *	@dev: the target device
  *	@cmd: SCSI command being sent to device.
  *	@done: SCSI command completion function.
@@ -2512,14 +2531,12 @@
  *	spin_lock_irqsave(host_set lock)
  */
 
-void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev,
-		      struct scsi_cmnd *cmd,
+void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
 		      void (*done)(struct scsi_cmnd *))
 {
 	struct ata_scsi_args args;
 	const u8 *scsicmd = cmd->cmnd;
 
-	args.ap = ap;
 	args.dev = dev;
 	args.id = dev->id;
 	args.cmd = cmd;
@@ -2605,3 +2622,26 @@
 	}
 }
 
+/**
+ *	ata_schedule_scsi_eh - schedule EH for SCSI host
+ *	@shost:	SCSI host to invoke error handling on.
+ *
+ *	Schedule SCSI EH without scmd.  This is a hack.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ **/
+void ata_schedule_scsi_eh(struct Scsi_Host *shost)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(shost->host_lock, flags);
+
+	if (scsi_host_set_state(shost, SHOST_RECOVERY) == 0 ||
+	    scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY) == 0) {
+		shost->host_eh_scheduled++;
+		scsi_eh_wakeup(shost);
+	}
+
+	spin_unlock_irqrestore(shost->host_lock, flags);
+}
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index 3f8b0a8..b76ad7d 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -32,7 +32,6 @@
 #define DRV_VERSION	"1.30"	/* must be exactly four chars */
 
 struct ata_scsi_args {
-	struct ata_port		*ap;
 	struct ata_device	*dev;
 	u16			*id;
 	struct scsi_cmnd	*cmd;
@@ -43,23 +42,22 @@
 extern int atapi_enabled;
 extern int atapi_dmadir;
 extern int libata_fua;
-extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
-				      struct ata_device *dev);
+extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev);
 extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc);
-extern void ata_dev_disable(struct ata_port *ap, struct ata_device *dev);
+extern void ata_dev_disable(struct ata_device *dev);
 extern void ata_port_flush_task(struct ata_port *ap);
-extern unsigned ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
+extern unsigned ata_exec_internal(struct ata_device *dev,
 				  struct ata_taskfile *tf, const u8 *cdb,
 				  int dma_dir, void *buf, unsigned int buflen);
-extern int ata_down_sata_spd_limit(struct ata_port *ap);
-extern int ata_set_sata_spd_needed(struct ata_port *ap);
-extern int ata_down_xfermask_limit(struct ata_port *ap, struct ata_device *dev,
-				   int force_pio0);
+extern int sata_down_spd_limit(struct ata_port *ap);
+extern int sata_set_spd_needed(struct ata_port *ap);
+extern int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0);
 extern int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev);
 extern int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset,
-			ata_postreset_fn_t postreset, unsigned int *classes);
+			unsigned int *classes);
 extern void ata_qc_free(struct ata_queued_cmd *qc);
 extern void ata_qc_issue(struct ata_queued_cmd *qc);
+extern void __ata_qc_complete(struct ata_queued_cmd *qc);
 extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
 extern void ata_dev_select(struct ata_port *ap, unsigned int device,
                            unsigned int wait, unsigned int can_sleep);
@@ -100,9 +98,11 @@
 extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
                         unsigned int (*actor) (struct ata_scsi_args *args,
                                            u8 *rbuf, unsigned int buflen));
+extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
 
 /* libata-eh.c */
 extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
 extern void ata_scsi_error(struct Scsi_Host *host);
+extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc);
 
 #endif /* __LIBATA_H__ */
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index 670ef16..e6d141d 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -680,7 +680,7 @@
 	}
 
 	if (EDMA_EN & reg) {
-		printk(KERN_ERR "ata%u: Unable to stop eDMA\n", ap->id);
+		ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
 		/* FIXME: Consider doing a reset here to recover */
 	}
 }
@@ -1309,8 +1309,8 @@
 	edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
 
 	if (EDMA_ERR_SERR & edma_err_cause) {
-		serr = scr_read(ap, SCR_ERROR);
-		scr_write_flush(ap, SCR_ERROR, serr);
+		sata_scr_read(ap, SCR_ERROR, &serr);
+		sata_scr_write_flush(ap, SCR_ERROR, serr);
 	}
 	if (EDMA_ERR_SELF_DIS & edma_err_cause) {
 		struct mv_port_priv *pp	= ap->private_data;
@@ -1934,15 +1934,16 @@
 
 	/* Issue COMRESET via SControl */
 comreset_retry:
-	scr_write_flush(ap, SCR_CONTROL, 0x301);
+	sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
 	__msleep(1, can_sleep);
 
-	scr_write_flush(ap, SCR_CONTROL, 0x300);
+	sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
 	__msleep(20, can_sleep);
 
 	timeout = jiffies + msecs_to_jiffies(200);
 	do {
-		sstatus = scr_read(ap, SCR_STATUS) & 0x3;
+		sata_scr_read(ap, SCR_STATUS, &sstatus);
+		sstatus &= 0x3;
 		if ((sstatus == 3) || (sstatus == 0))
 			break;
 
@@ -1959,11 +1960,12 @@
 		"SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
 		mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
 
-	if (sata_dev_present(ap)) {
+	if (ata_port_online(ap)) {
 		ata_port_probe(ap);
 	} else {
-		printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n",
-		       ap->id, scr_read(ap, SCR_STATUS));
+		sata_scr_read(ap, SCR_STATUS, &sstatus);
+		ata_port_printk(ap, KERN_INFO,
+				"no device found (phy stat %08x)\n", sstatus);
 		ata_port_disable(ap);
 		return;
 	}
@@ -2021,7 +2023,7 @@
 {
 	struct ata_queued_cmd *qc;
 
-	printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id);
+	ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
 	DPRINTK("All regs @ start of eng_timeout\n");
 	mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no,
 			 to_pci_dev(ap->host_set->dev));
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index aaf896a..bb00043 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -436,7 +436,7 @@
 	switch (qc->tf.protocol) {
 	case ATA_PROT_DMA:
 	case ATA_PROT_NODATA:
-		printk(KERN_ERR "ata%u: command timeout\n", ap->id);
+		ata_port_printk(ap, KERN_ERR, "command timeout\n");
 		drv_stat = ata_wait_idle(ap);
 		qc->err_mask |= __ac_err_mask(drv_stat);
 		break;
@@ -444,8 +444,9 @@
 	default:
 		drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
 
-		printk(KERN_ERR "ata%u: unknown timeout, cmd 0x%x stat 0x%x\n",
-		       ap->id, qc->tf.command, drv_stat);
+		ata_port_printk(ap, KERN_ERR,
+				"unknown timeout, cmd 0x%x stat 0x%x\n",
+				qc->tf.command, drv_stat);
 
 		qc->err_mask |= ac_err_mask(drv_stat);
 		break;
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
index c933357..aa63044 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/scsi/sata_sil.c
@@ -96,6 +96,8 @@
 static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg);
 static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
 static void sil_post_set_mode (struct ata_port *ap);
+static void sil_freeze(struct ata_port *ap);
+static void sil_thaw(struct ata_port *ap);
 
 
 static const struct pci_device_id sil_pci_tbl[] = {
@@ -174,7 +176,10 @@
 	.bmdma_status		= ata_bmdma_status,
 	.qc_prep		= ata_qc_prep,
 	.qc_issue		= ata_qc_issue_prot,
-	.eng_timeout		= ata_eng_timeout,
+	.freeze			= sil_freeze,
+	.thaw			= sil_thaw,
+	.error_handler		= ata_bmdma_error_handler,
+	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
 	.irq_handler		= ata_interrupt,
 	.irq_clear		= ata_bmdma_irq_clear,
 	.scr_read		= sil_scr_read,
@@ -314,6 +319,33 @@
 		writel(val, mmio);
 }
 
+static void sil_freeze(struct ata_port *ap)
+{
+	void __iomem *mmio_base = ap->host_set->mmio_base;
+	u32 tmp;
+
+	/* plug IRQ */
+	tmp = readl(mmio_base + SIL_SYSCFG);
+	tmp |= SIL_MASK_IDE0_INT << ap->port_no;
+	writel(tmp, mmio_base + SIL_SYSCFG);
+	readl(mmio_base + SIL_SYSCFG);	/* flush */
+}
+
+static void sil_thaw(struct ata_port *ap)
+{
+	void __iomem *mmio_base = ap->host_set->mmio_base;
+	u32 tmp;
+
+	/* clear IRQ */
+	ata_chk_status(ap);
+	ata_bmdma_irq_clear(ap);
+
+	/* turn on IRQ */
+	tmp = readl(mmio_base + SIL_SYSCFG);
+	tmp &= ~(SIL_MASK_IDE0_INT << ap->port_no);
+	writel(tmp, mmio_base + SIL_SYSCFG);
+}
+
 /**
  *	sil_dev_config - Apply device/host-specific errata fixups
  *	@ap: Port containing device to be examined
@@ -360,16 +392,16 @@
 	if (slow_down ||
 	    ((ap->flags & SIL_FLAG_MOD15WRITE) &&
 	     (quirks & SIL_QUIRK_MOD15WRITE))) {
-		printk(KERN_INFO "ata%u(%u): applying Seagate errata fix (mod15write workaround)\n",
-		       ap->id, dev->devno);
+		ata_dev_printk(dev, KERN_INFO, "applying Seagate errata fix "
+			       "(mod15write workaround)\n");
 		dev->max_sectors = 15;
 		return;
 	}
 
 	/* limit to udma5 */
 	if (quirks & SIL_QUIRK_UDMA5MAX) {
-		printk(KERN_INFO "ata%u(%u): applying Maxtor errata fix %s\n",
-		       ap->id, dev->devno, model_num);
+		ata_dev_printk(dev, KERN_INFO,
+			       "applying Maxtor errata fix %s\n", model_num);
 		dev->udma_mask &= ATA_UDMA5;
 		return;
 	}
@@ -384,7 +416,7 @@
 	int rc;
 	unsigned int i;
 	int pci_dev_busy = 0;
-	u32 tmp, irq_mask;
+	u32 tmp;
 	u8 cls;
 
 	if (!printed_version++)
@@ -474,24 +506,11 @@
 	}
 
 	if (ent->driver_data == sil_3114) {
-		irq_mask = SIL_MASK_4PORT;
-
 		/* flip the magic "make 4 ports work" bit */
 		tmp = readl(mmio_base + sil_port[2].bmdma);
 		if ((tmp & SIL_INTR_STEERING) == 0)
 			writel(tmp | SIL_INTR_STEERING,
 			       mmio_base + sil_port[2].bmdma);
-
-	} else {
-		irq_mask = SIL_MASK_2PORT;
-	}
-
-	/* make sure IDE0/1/2/3 interrupts are not masked */
-	tmp = readl(mmio_base + SIL_SYSCFG);
-	if (tmp & irq_mask) {
-		tmp &= ~irq_mask;
-		writel(tmp, mmio_base + SIL_SYSCFG);
-		readl(mmio_base + SIL_SYSCFG);	/* flush */
 	}
 
 	/* mask all SATA phy-related interrupts */
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
index e9fd869..6e7728c 100644
--- a/drivers/scsi/sata_sil24.c
+++ b/drivers/scsi/sata_sil24.c
@@ -156,6 +156,9 @@
 	PORT_IRQ_HANDSHAKE	= (1 << 10), /* handshake error threshold */
 	PORT_IRQ_SDB_NOTIFY	= (1 << 11), /* SDB notify received */
 
+	DEF_PORT_IRQ		= PORT_IRQ_COMPLETE | PORT_IRQ_ERROR |
+				  PORT_IRQ_DEV_XCHG | PORT_IRQ_UNK_FIS,
+
 	/* bits[27:16] are unmasked (raw) */
 	PORT_IRQ_RAW_SHIFT	= 16,
 	PORT_IRQ_MASKED_MASK	= 0x7ff,
@@ -242,6 +245,58 @@
 	struct sil24_atapi_block atapi;
 };
 
+static struct sil24_cerr_info {
+	unsigned int err_mask, action;
+	const char *desc;
+} sil24_cerr_db[] = {
+	[0]			= { AC_ERR_DEV, ATA_EH_REVALIDATE,
+				    "device error" },
+	[PORT_CERR_DEV]		= { AC_ERR_DEV, ATA_EH_REVALIDATE,
+				    "device error via D2H FIS" },
+	[PORT_CERR_SDB]		= { AC_ERR_DEV, ATA_EH_REVALIDATE,
+				    "device error via SDB FIS" },
+	[PORT_CERR_DATA]	= { AC_ERR_ATA_BUS, ATA_EH_SOFTRESET,
+				    "error in data FIS" },
+	[PORT_CERR_SEND]	= { AC_ERR_ATA_BUS, ATA_EH_SOFTRESET,
+				    "failed to transmit command FIS" },
+	[PORT_CERR_INCONSISTENT] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
+				     "protocol mismatch" },
+	[PORT_CERR_DIRECTION]	= { AC_ERR_HSM, ATA_EH_SOFTRESET,
+				    "data directon mismatch" },
+	[PORT_CERR_UNDERRUN]	= { AC_ERR_HSM, ATA_EH_SOFTRESET,
+				    "ran out of SGEs while writing" },
+	[PORT_CERR_OVERRUN]	= { AC_ERR_HSM, ATA_EH_SOFTRESET,
+				    "ran out of SGEs while reading" },
+	[PORT_CERR_PKT_PROT]	= { AC_ERR_HSM, ATA_EH_SOFTRESET,
+				    "invalid data directon for ATAPI CDB" },
+	[PORT_CERR_SGT_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_SOFTRESET,
+				     "SGT no on qword boundary" },
+	[PORT_CERR_SGT_TGTABRT]	= { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
+				    "PCI target abort while fetching SGT" },
+	[PORT_CERR_SGT_MSTABRT]	= { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
+				    "PCI master abort while fetching SGT" },
+	[PORT_CERR_SGT_PCIPERR]	= { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
+				    "PCI parity error while fetching SGT" },
+	[PORT_CERR_CMD_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_SOFTRESET,
+				     "PRB not on qword boundary" },
+	[PORT_CERR_CMD_TGTABRT]	= { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
+				    "PCI target abort while fetching PRB" },
+	[PORT_CERR_CMD_MSTABRT]	= { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
+				    "PCI master abort while fetching PRB" },
+	[PORT_CERR_CMD_PCIPERR]	= { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
+				    "PCI parity error while fetching PRB" },
+	[PORT_CERR_XFR_UNDEF]	= { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
+				    "undefined error while transferring data" },
+	[PORT_CERR_XFR_TGTABRT]	= { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
+				    "PCI target abort while transferring data" },
+	[PORT_CERR_XFR_MSTABRT]	= { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
+				    "PCI master abort while transferring data" },
+	[PORT_CERR_XFR_PCIPERR]	= { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
+				    "PCI parity error while transferring data" },
+	[PORT_CERR_SENDSERVICE]	= { AC_ERR_HSM, ATA_EH_SOFTRESET,
+				    "FIS received while sending service FIS" },
+};
+
 /*
  * ap->private_data
  *
@@ -269,8 +324,11 @@
 static void sil24_qc_prep(struct ata_queued_cmd *qc);
 static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
 static void sil24_irq_clear(struct ata_port *ap);
-static void sil24_eng_timeout(struct ata_port *ap);
 static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+static void sil24_freeze(struct ata_port *ap);
+static void sil24_thaw(struct ata_port *ap);
+static void sil24_error_handler(struct ata_port *ap);
+static void sil24_post_internal_cmd(struct ata_queued_cmd *qc);
 static int sil24_port_start(struct ata_port *ap);
 static void sil24_port_stop(struct ata_port *ap);
 static void sil24_host_stop(struct ata_host_set *host_set);
@@ -325,14 +383,17 @@
 	.qc_prep		= sil24_qc_prep,
 	.qc_issue		= sil24_qc_issue,
 
-	.eng_timeout		= sil24_eng_timeout,
-
 	.irq_handler		= sil24_interrupt,
 	.irq_clear		= sil24_irq_clear,
 
 	.scr_read		= sil24_scr_read,
 	.scr_write		= sil24_scr_write,
 
+	.freeze			= sil24_freeze,
+	.thaw			= sil24_thaw,
+	.error_handler		= sil24_error_handler,
+	.post_internal_cmd	= sil24_post_internal_cmd,
+
 	.port_start		= sil24_port_start,
 	.port_stop		= sil24_port_stop,
 	.host_stop		= sil24_host_stop,
@@ -459,21 +520,17 @@
 	struct sil24_port_priv *pp = ap->private_data;
 	struct sil24_prb *prb = &pp->cmd_block[0].ata.prb;
 	dma_addr_t paddr = pp->cmd_block_dma;
-	u32 mask, irq_enable, irq_stat;
+	u32 mask, irq_stat;
 	const char *reason;
 
 	DPRINTK("ENTER\n");
 
-	if (!sata_dev_present(ap)) {
+	if (ata_port_offline(ap)) {
 		DPRINTK("PHY reports no device\n");
 		*class = ATA_DEV_NONE;
 		goto out;
 	}
 
-	/* temporarily turn off IRQs during SRST */
-	irq_enable = readl(port + PORT_IRQ_ENABLE_SET);
-	writel(irq_enable, port + PORT_IRQ_ENABLE_CLR);
-
 	/* put the port into known state */
 	if (sil24_init_port(ap)) {
 		reason ="port not ready";
@@ -494,9 +551,6 @@
 	writel(irq_stat, port + PORT_IRQ_STAT); /* clear IRQs */
 	irq_stat >>= PORT_IRQ_RAW_SHIFT;
 
-	/* restore IRQs */
-	writel(irq_enable, port + PORT_IRQ_ENABLE_SET);
-
 	if (!(irq_stat & PORT_IRQ_COMPLETE)) {
 		if (irq_stat & PORT_IRQ_ERROR)
 			reason = "SRST command error";
@@ -516,7 +570,7 @@
 	return 0;
 
  err:
-	printk(KERN_ERR "ata%u: softreset failed (%s)\n", ap->id, reason);
+	ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
 	return -EIO;
 }
 
@@ -528,10 +582,10 @@
 	u32 tmp;
 
 	/* sil24 does the right thing(tm) without any protection */
-	ata_set_sata_spd(ap);
+	sata_set_spd(ap);
 
 	tout_msec = 100;
-	if (sata_dev_present(ap))
+	if (ata_port_online(ap))
 		tout_msec = 5000;
 
 	writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT);
@@ -544,7 +598,7 @@
 	msleep(100);
 
 	if (tmp & PORT_CS_DEV_RST) {
-		if (!sata_dev_present(ap))
+		if (ata_port_offline(ap))
 			return 0;
 		reason = "link not ready";
 		goto err;
@@ -561,7 +615,7 @@
 	return 0;
 
  err:
-	printk(KERN_ERR "ata%u: hardreset failed (%s)\n", ap->id, reason);
+	ata_port_printk(ap, KERN_ERR, "hardreset failed (%s)\n", reason);
 	return -EIO;
 }
 
@@ -655,166 +709,134 @@
 	/* unused */
 }
 
-static int __sil24_restart_controller(void __iomem *port)
+static void sil24_freeze(struct ata_port *ap)
 {
-	u32 tmp;
-	int cnt;
-
-	writel(PORT_CS_INIT, port + PORT_CTRL_STAT);
-
-	/* Max ~10ms */
-	for (cnt = 0; cnt < 10000; cnt++) {
-		tmp = readl(port + PORT_CTRL_STAT);
-		if (tmp & PORT_CS_RDY)
-			return 0;
-		udelay(1);
-	}
-
-	return -1;
-}
-
-static void sil24_restart_controller(struct ata_port *ap)
-{
-	if (__sil24_restart_controller((void __iomem *)ap->ioaddr.cmd_addr))
-		printk(KERN_ERR DRV_NAME
-		       " ata%u: failed to restart controller\n", ap->id);
-}
-
-static int __sil24_reset_controller(void __iomem *port)
-{
-	int cnt;
-	u32 tmp;
-
-	/* Reset controller state.  Is this correct? */
-	writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT);
-	readl(port + PORT_CTRL_STAT);	/* sync */
-
-	/* Max ~100ms */
-	for (cnt = 0; cnt < 1000; cnt++) {
-		udelay(100);
-		tmp = readl(port + PORT_CTRL_STAT);
-		if (!(tmp & PORT_CS_DEV_RST))
-			break;
-	}
-
-	if (tmp & PORT_CS_DEV_RST)
-		return -1;
-
-	if (tmp & PORT_CS_RDY)
-		return 0;
-
-	return __sil24_restart_controller(port);
-}
-
-static void sil24_reset_controller(struct ata_port *ap)
-{
-	printk(KERN_NOTICE DRV_NAME
-	       " ata%u: resetting controller...\n", ap->id);
-	if (__sil24_reset_controller((void __iomem *)ap->ioaddr.cmd_addr))
-                printk(KERN_ERR DRV_NAME
-                       " ata%u: failed to reset controller\n", ap->id);
-}
-
-static void sil24_eng_timeout(struct ata_port *ap)
-{
-	struct ata_queued_cmd *qc;
-
-	qc = ata_qc_from_tag(ap, ap->active_tag);
-
-	printk(KERN_ERR "ata%u: command timeout\n", ap->id);
-	qc->err_mask |= AC_ERR_TIMEOUT;
-	ata_eh_qc_complete(qc);
-
-	sil24_reset_controller(ap);
-}
-
-static void sil24_error_intr(struct ata_port *ap, u32 slot_stat)
-{
-	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
-	struct sil24_port_priv *pp = ap->private_data;
 	void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
-	u32 irq_stat, cmd_err, sstatus, serror;
-	unsigned int err_mask;
 
-	irq_stat = readl(port + PORT_IRQ_STAT);
-	writel(irq_stat, port + PORT_IRQ_STAT);		/* clear irq */
-
-	if (!(irq_stat & PORT_IRQ_ERROR)) {
-		/* ignore non-completion, non-error irqs for now */
-		printk(KERN_WARNING DRV_NAME
-		       "ata%u: non-error exception irq (irq_stat %x)\n",
-		       ap->id, irq_stat);
-		return;
-	}
-
-	cmd_err = readl(port + PORT_CMD_ERR);
-	sstatus = readl(port + PORT_SSTATUS);
-	serror = readl(port + PORT_SERROR);
-	if (serror)
-		writel(serror, port + PORT_SERROR);
-
-	/*
-	 * Don't log ATAPI device errors.  They're supposed to happen
-	 * and any serious errors will be logged using sense data by
-	 * the SCSI layer.
+	/* Port-wide IRQ mask in HOST_CTRL doesn't really work, clear
+	 * PORT_IRQ_ENABLE instead.
 	 */
-	if (ap->device[0].class != ATA_DEV_ATAPI || cmd_err > PORT_CERR_SDB)
-		printk("ata%u: error interrupt on port%d\n"
-		       "  stat=0x%x irq=0x%x cmd_err=%d sstatus=0x%x serror=0x%x\n",
-		       ap->id, ap->port_no, slot_stat, irq_stat, cmd_err, sstatus, serror);
+	writel(0xffff, port + PORT_IRQ_ENABLE_CLR);
+}
 
-	if (cmd_err == PORT_CERR_DEV || cmd_err == PORT_CERR_SDB) {
-		/*
-		 * Device is reporting error, tf registers are valid.
+static void sil24_thaw(struct ata_port *ap)
+{
+	void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
+	u32 tmp;
+
+	/* clear IRQ */
+	tmp = readl(port + PORT_IRQ_STAT);
+	writel(tmp, port + PORT_IRQ_STAT);
+
+	/* turn IRQ back on */
+	writel(DEF_PORT_IRQ, port + PORT_IRQ_ENABLE_SET);
+}
+
+static void sil24_error_intr(struct ata_port *ap)
+{
+	void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
+	struct ata_eh_info *ehi = &ap->eh_info;
+	int freeze = 0;
+	u32 irq_stat;
+
+	/* on error, we need to clear IRQ explicitly */
+	irq_stat = readl(port + PORT_IRQ_STAT);
+	writel(irq_stat, port + PORT_IRQ_STAT);
+
+	/* first, analyze and record host port events */
+	ata_ehi_clear_desc(ehi);
+
+	ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
+
+	if (irq_stat & PORT_IRQ_DEV_XCHG) {
+		ehi->err_mask |= AC_ERR_ATA_BUS;
+		/* sil24 doesn't recover very well from phy
+		 * disconnection with a softreset.  Force hardreset.
 		 */
-		sil24_update_tf(ap);
-		err_mask = ac_err_mask(pp->tf.command);
-		sil24_restart_controller(ap);
-	} else {
-		/*
-		 * Other errors.  libata currently doesn't have any
-		 * mechanism to report these errors.  Just turn on
-		 * ATA_ERR.
-		 */
-		err_mask = AC_ERR_OTHER;
-		sil24_reset_controller(ap);
+		ehi->action |= ATA_EH_HARDRESET;
+		ata_ehi_push_desc(ehi, ", device_exchanged");
+		freeze = 1;
 	}
 
-	if (qc) {
-		qc->err_mask |= err_mask;
-		ata_qc_complete(qc);
+	if (irq_stat & PORT_IRQ_UNK_FIS) {
+		ehi->err_mask |= AC_ERR_HSM;
+		ehi->action |= ATA_EH_SOFTRESET;
+		ata_ehi_push_desc(ehi , ", unknown FIS");
+		freeze = 1;
 	}
+
+	/* deal with command error */
+	if (irq_stat & PORT_IRQ_ERROR) {
+		struct sil24_cerr_info *ci = NULL;
+		unsigned int err_mask = 0, action = 0;
+		struct ata_queued_cmd *qc;
+		u32 cerr;
+
+		/* analyze CMD_ERR */
+		cerr = readl(port + PORT_CMD_ERR);
+		if (cerr < ARRAY_SIZE(sil24_cerr_db))
+			ci = &sil24_cerr_db[cerr];
+
+		if (ci && ci->desc) {
+			err_mask |= ci->err_mask;
+			action |= ci->action;
+			ata_ehi_push_desc(ehi, ", %s", ci->desc);
+		} else {
+			err_mask |= AC_ERR_OTHER;
+			action |= ATA_EH_SOFTRESET;
+			ata_ehi_push_desc(ehi, ", unknown command error %d",
+					  cerr);
+		}
+
+		/* record error info */
+		qc = ata_qc_from_tag(ap, ap->active_tag);
+		if (qc) {
+			int tag = qc->tag;
+			if (unlikely(ata_tag_internal(tag)))
+				tag = 0;
+			sil24_update_tf(ap);
+			qc->err_mask |= err_mask;
+		} else
+			ehi->err_mask |= err_mask;
+
+		ehi->action |= action;
+	}
+
+	/* freeze or abort */
+	if (freeze)
+		ata_port_freeze(ap);
+	else
+		ata_port_abort(ap);
 }
 
 static inline void sil24_host_intr(struct ata_port *ap)
 {
-	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
 	void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
+	struct ata_queued_cmd *qc;
 	u32 slot_stat;
 
 	slot_stat = readl(port + PORT_SLOT_STAT);
-	if (!(slot_stat & HOST_SSTAT_ATTN)) {
-		struct sil24_port_priv *pp = ap->private_data;
 
-		if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
-			writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT);
+	if (unlikely(slot_stat & HOST_SSTAT_ATTN)) {
+		sil24_error_intr(ap);
+		return;
+	}
 
-		/*
-		 * !HOST_SSAT_ATTN guarantees successful completion,
-		 * so reading back tf registers is unnecessary for
-		 * most commands.  TODO: read tf registers for
-		 * commands which require these values on successful
-		 * completion (EXECUTE DEVICE DIAGNOSTIC, CHECK POWER,
-		 * DEVICE RESET and READ PORT MULTIPLIER (any more?).
-		 */
-		sil24_update_tf(ap);
+	if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
+		writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT);
 
-		if (qc) {
-			qc->err_mask |= ac_err_mask(pp->tf.command);
-			ata_qc_complete(qc);
-		}
-	} else
-		sil24_error_intr(ap, slot_stat);
+	qc = ata_qc_from_tag(ap, ap->active_tag);
+	if (qc) {
+		if (qc->flags & ATA_QCFLAG_RESULT_TF)
+			sil24_update_tf(ap);
+		ata_qc_complete(qc);
+		return;
+	}
+
+	if (ata_ratelimit())
+		ata_port_printk(ap, KERN_INFO, "spurious interrupt "
+			"(slot_stat 0x%x active_tag %d)\n",
+			slot_stat, ap->active_tag);
 }
 
 static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
@@ -854,6 +876,31 @@
 	return IRQ_RETVAL(handled);
 }
 
+static void sil24_error_handler(struct ata_port *ap)
+{
+	struct ata_eh_context *ehc = &ap->eh_context;
+
+	if (sil24_init_port(ap)) {
+		ata_eh_freeze_port(ap);
+		ehc->i.action |= ATA_EH_HARDRESET;
+	}
+
+	/* perform recovery */
+	ata_do_eh(ap, sil24_softreset, sil24_hardreset, ata_std_postreset);
+}
+
+static void sil24_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+
+	if (qc->flags & ATA_QCFLAG_FAILED)
+		qc->err_mask |= AC_ERR_OTHER;
+
+	/* make DMA engine forget about the failed command */
+	if (qc->err_mask)
+		sil24_init_port(ap);
+}
+
 static inline void sil24_cblk_free(struct sil24_port_priv *pp, struct device *dev)
 {
 	const size_t cb_size = sizeof(*pp->cmd_block);
@@ -1066,15 +1113,6 @@
 		/* Always use 64bit activation */
 		writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
 
-		/* Configure interrupts */
-		writel(0xffff, port + PORT_IRQ_ENABLE_CLR);
-		writel(PORT_IRQ_COMPLETE | PORT_IRQ_ERROR |
-		       PORT_IRQ_SDB_NOTIFY, port + PORT_IRQ_ENABLE_SET);
-
-		/* Clear interrupts */
-		writel(0x0fff0fff, port + PORT_IRQ_STAT);
-		writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
-
 		/* Clear port multiplier enable and resume bits */
 		writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR);
 	}
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index 4c07ba1..70a6954 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -868,15 +868,16 @@
 	switch (qc->tf.protocol) {
 	case ATA_PROT_DMA:
 	case ATA_PROT_NODATA:
-		printk(KERN_ERR "ata%u: command timeout\n", ap->id);
+		ata_port_printk(ap, KERN_ERR, "command timeout\n");
 		qc->err_mask |= __ac_err_mask(ata_wait_idle(ap));
 		break;
 
 	default:
 		drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
 
-		printk(KERN_ERR "ata%u: unknown timeout, cmd 0x%x stat 0x%x\n",
-		       ap->id, qc->tf.command, drv_stat);
+		ata_port_printk(ap, KERN_ERR,
+				"unknown timeout, cmd 0x%x stat 0x%x\n",
+				qc->tf.command, drv_stat);
 
 		qc->err_mask |= ac_err_mask(drv_stat);
 		break;
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 73994e2..dae4f08 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -720,6 +720,24 @@
 static DEFINE_PER_CPU(struct list_head, scsi_done_q);
 
 /**
+ * scsi_req_abort_cmd -- Request command recovery for the specified command
+ * cmd: pointer to the SCSI command of interest
+ *
+ * This function requests that SCSI Core start recovery for the
+ * command by deleting the timer and adding the command to the eh
+ * queue.  It can be called by either LLDDs or SCSI Core.  LLDDs who
+ * implement their own error recovery MAY ignore the timeout event if
+ * they generated scsi_req_abort_cmd.
+ */
+void scsi_req_abort_cmd(struct scsi_cmnd *cmd)
+{
+	if (!scsi_delete_timer(cmd))
+		return;
+	scsi_times_out(cmd);
+}
+EXPORT_SYMBOL(scsi_req_abort_cmd);
+
+/**
  * scsi_done - Enqueue the finished SCSI command into the done queue.
  * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
  * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 1c75646..9ca71cb 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -56,6 +56,7 @@
 				printk("Waking error handler thread\n"));
 	}
 }
+EXPORT_SYMBOL_GPL(scsi_eh_wakeup);
 
 /**
  * scsi_eh_scmd_add - add scsi cmd to error handling.
@@ -1517,7 +1518,7 @@
 	 */
 	set_current_state(TASK_INTERRUPTIBLE);
 	while (!kthread_should_stop()) {
-		if (shost->host_failed == 0 ||
+		if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
 		    shost->host_failed != shost->host_busy) {
 			SCSI_LOG_ERROR_RECOVERY(1,
 				printk("Error handler scsi_eh_%d sleeping\n",
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 7b0f9a3..c55d195 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -566,7 +566,7 @@
 	spin_lock_irqsave(shost->host_lock, flags);
 	shost->host_busy--;
 	if (unlikely(scsi_host_in_recovery(shost) &&
-		     shost->host_failed))
+		     (shost->host_failed || shost->host_eh_scheduled)))
 		scsi_eh_wakeup(shost);
 	spin_unlock(shost->host_lock);
 	spin_lock(sdev->request_queue->queue_lock);
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 27c4827..0b39081 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -63,7 +63,6 @@
 extern void scsi_times_out(struct scsi_cmnd *cmd);
 extern int scsi_error_handler(void *host);
 extern int scsi_decide_disposition(struct scsi_cmnd *cmd);
-extern void scsi_eh_wakeup(struct Scsi_Host *shost);
 extern int scsi_eh_scmd_add(struct scsi_cmnd *, int);
 
 /* scsi_lib.c */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 206d859..1cbeb43 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -97,6 +97,9 @@
 	ATA_DRQ			= (1 << 3),	/* data request i/o */
 	ATA_ERR			= (1 << 0),	/* have an error */
 	ATA_SRST		= (1 << 2),	/* software reset */
+	ATA_ICRC		= (1 << 7),	/* interface CRC error */
+	ATA_UNC			= (1 << 6),	/* uncorrectable media error */
+	ATA_IDNF		= (1 << 4),	/* ID not found */
 	ATA_ABORTED		= (1 << 2),	/* command aborted */
 
 	/* ATA command block registers */
@@ -192,6 +195,16 @@
 	SCR_ACTIVE		= 3,
 	SCR_NOTIFICATION	= 4,
 
+	/* SError bits */
+	SERR_DATA_RECOVERED	= (1 << 0), /* recovered data error */
+	SERR_COMM_RECOVERED	= (1 << 1), /* recovered comm failure */
+	SERR_DATA		= (1 << 8), /* unrecovered data error */
+	SERR_PERSISTENT		= (1 << 9), /* persistent data/comm error */
+	SERR_PROTOCOL		= (1 << 10), /* protocol violation */
+	SERR_INTERNAL		= (1 << 11), /* host internal error */
+	SERR_PHYRDY_CHG		= (1 << 16), /* PHY RDY changed */
+	SERR_DEV_XCHG		= (1 << 26), /* device exchanged */
+
 	/* struct ata_taskfile flags */
 	ATA_TFLAG_LBA48		= (1 << 0), /* enable 48-bit LBA and "HOB" */
 	ATA_TFLAG_ISADDR	= (1 << 1), /* enable r/w to nsect/lba regs */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index bf86ee4..db17723 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -108,7 +108,9 @@
 	LIBATA_MAX_PRD		= ATA_MAX_PRD / 2,
 	ATA_MAX_PORTS		= 8,
 	ATA_DEF_QUEUE		= 1,
-	ATA_MAX_QUEUE		= 1,
+	/* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */
+	ATA_MAX_QUEUE		= 2,
+	ATA_TAG_INTERNAL	= ATA_MAX_QUEUE - 1,
 	ATA_MAX_SECTORS		= 200,	/* FIXME */
 	ATA_MAX_BUS		= 2,
 	ATA_DEF_BUSY_WAIT	= 10000,
@@ -149,11 +151,15 @@
 	ATA_FLAG_PIO_POLLING	= (1 << 10), /* use polling PIO if LLD
 					      * doesn't handle PIO interrupts */
 
-	ATA_FLAG_DEBUGMSG	= (1 << 17),
-	ATA_FLAG_FLUSH_PORT_TASK = (1 << 18), /* flush port task */
+	ATA_FLAG_DEBUGMSG	= (1 << 14),
+	ATA_FLAG_FLUSH_PORT_TASK = (1 << 15), /* flush port task */
 
-	ATA_FLAG_DISABLED	= (1 << 19), /* port is disabled, ignore it */
-	ATA_FLAG_SUSPENDED	= (1 << 20), /* port is suspended */
+	ATA_FLAG_EH_PENDING	= (1 << 16), /* EH pending */
+	ATA_FLAG_FROZEN		= (1 << 17), /* port is frozen */
+	ATA_FLAG_RECOVERED	= (1 << 18), /* recovery action performed */
+
+	ATA_FLAG_DISABLED	= (1 << 22), /* port is disabled, ignore it */
+	ATA_FLAG_SUSPENDED	= (1 << 23), /* port is suspended (power) */
 
 	/* bits 24:31 of ap->flags are reserved for LLDD specific flags */
 
@@ -163,7 +169,11 @@
 	ATA_QCFLAG_SINGLE	= (1 << 2), /* no s/g, just a single buffer */
 	ATA_QCFLAG_DMAMAP	= ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE,
 	ATA_QCFLAG_IO		= (1 << 3), /* standard IO command */
-	ATA_QCFLAG_EH_SCHEDULED = (1 << 4), /* EH scheduled */
+	ATA_QCFLAG_RESULT_TF	= (1 << 4), /* result TF requested */
+
+	ATA_QCFLAG_FAILED	= (1 << 16), /* cmd failed and is owned by EH */
+	ATA_QCFLAG_SENSE_VALID	= (1 << 17), /* sense data valid */
+	ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */
 
 	/* host set flags */
 	ATA_HOST_SIMPLEX	= (1 << 0),	/* Host is simplex, one DMA channel per host_set only */
@@ -214,8 +224,29 @@
 	ATA_PORT_PRIMARY	= (1 << 0),
 	ATA_PORT_SECONDARY	= (1 << 1),
 
+	/* ering size */
+	ATA_ERING_SIZE		= 32,
+
+	/* desc_len for ata_eh_info and context */
+	ATA_EH_DESC_LEN		= 80,
+
+	/* reset / recovery action types */
+	ATA_EH_REVALIDATE	= (1 << 0),
+	ATA_EH_SOFTRESET	= (1 << 1),
+	ATA_EH_HARDRESET	= (1 << 2),
+
+	ATA_EH_RESET_MASK	= ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
+
+	/* ata_eh_info->flags */
+	ATA_EHI_DID_RESET	= (1 << 0), /* already reset this port */
+
+	/* max repeat if error condition is still set after ->error_handler */
+	ATA_EH_MAX_REPEAT	= 5,
+
 	/* how hard are we gonna try to probe/recover devices */
 	ATA_PROBE_MAX_TRIES	= 3,
+	ATA_EH_RESET_TRIES	= 3,
+	ATA_EH_DEV_TRIES	= 3,
 };
 
 enum hsm_task_states {
@@ -340,7 +371,7 @@
 	struct scatterlist	*__sg;
 
 	unsigned int		err_mask;
-
+	struct ata_taskfile	result_tf;
 	ata_qc_cb_t		complete_fn;
 
 	void			*private_data;
@@ -352,12 +383,24 @@
 	unsigned long		rw_reqbuf;
 };
 
+struct ata_ering_entry {
+	int			is_io;
+	unsigned int		err_mask;
+	u64			timestamp;
+};
+
+struct ata_ering {
+	int			cursor;
+	struct ata_ering_entry	ring[ATA_ERING_SIZE];
+};
+
 struct ata_device {
+	struct ata_port		*ap;
 	u64			n_sectors;	/* size of device, if ATA */
 	unsigned long		flags;		/* ATA_DFLAG_xxx */
 	unsigned int		class;		/* ATA_DEV_xxx */
 	unsigned int		devno;		/* 0 or 1 */
-	u16			*id;		/* IDENTIFY xxx DEVICE data */
+	u16			id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
 	u8			pio_mode;
 	u8			dma_mode;
 	u8			xfer_mode;
@@ -377,6 +420,24 @@
 	u16			cylinders;	/* Number of cylinders */
 	u16			heads;		/* Number of heads */
 	u16			sectors;	/* Number of sectors per track */
+
+	/* error history */
+	struct ata_ering	ering;
+};
+
+struct ata_eh_info {
+	struct ata_device	*dev;		/* offending device */
+	u32			serror;		/* SError from LLDD */
+	unsigned int		err_mask;	/* port-wide err_mask */
+	unsigned int		action;		/* ATA_EH_* action mask */
+	unsigned int		flags;		/* ATA_EHI_* flags */
+	char			desc[ATA_EH_DESC_LEN];
+	int			desc_len;
+};
+
+struct ata_eh_context {
+	struct ata_eh_info	i;
+	int			tries[ATA_MAX_DEVICES];
 };
 
 struct ata_port {
@@ -403,6 +464,11 @@
 	unsigned int		cbl;	/* cable type; ATA_CBL_xxx */
 	unsigned int		sata_spd_limit;	/* SATA PHY speed limit */
 
+	/* record runtime error info, protected by host_set lock */
+	struct ata_eh_info	eh_info;
+	/* EH context owned by EH */
+	struct ata_eh_context	eh_context;
+
 	struct ata_device	device[ATA_MAX_DEVICES];
 
 	struct ata_queued_cmd	qcmd[ATA_MAX_QUEUE];
@@ -421,6 +487,8 @@
 	struct list_head	eh_done_q;
 
 	void			*private_data;
+
+	u8			sector_buf[ATA_SECT_SIZE]; /* owned by EH */
 };
 
 struct ata_port_operations {
@@ -454,7 +522,15 @@
 	void (*qc_prep) (struct ata_queued_cmd *qc);
 	unsigned int (*qc_issue) (struct ata_queued_cmd *qc);
 
-	void (*eng_timeout) (struct ata_port *ap);
+	/* Error handlers.  ->error_handler overrides ->eng_timeout and
+	 * indicates that new-style EH is in place.
+	 */
+	void (*eng_timeout) (struct ata_port *ap); /* obsolete */
+
+	void (*freeze) (struct ata_port *ap);
+	void (*thaw) (struct ata_port *ap);
+	void (*error_handler) (struct ata_port *ap);
+	void (*post_internal_cmd) (struct ata_queued_cmd *qc);
 
 	irqreturn_t (*irq_handler)(int, void *, struct pt_regs *);
 	void (*irq_clear) (struct ata_port *);
@@ -500,7 +576,7 @@
 extern void __sata_phy_reset(struct ata_port *ap);
 extern void sata_phy_reset(struct ata_port *ap);
 extern void ata_bus_reset(struct ata_port *ap);
-extern int ata_set_sata_spd(struct ata_port *ap);
+extern int sata_set_spd(struct ata_port *ap);
 extern int ata_drive_probe_reset(struct ata_port *ap,
 			ata_probeinit_fn_t probeinit,
 			ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
@@ -509,8 +585,7 @@
 extern int ata_std_softreset(struct ata_port *ap, unsigned int *classes);
 extern int sata_std_hardreset(struct ata_port *ap, unsigned int *class);
 extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes);
-extern int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
-			      int post_reset);
+extern int ata_dev_revalidate(struct ata_device *dev, int post_reset);
 extern void ata_port_disable(struct ata_port *);
 extern void ata_std_ports(struct ata_ioports *ioaddr);
 #ifdef CONFIG_PCI
@@ -526,14 +601,18 @@
 extern int ata_scsi_detect(struct scsi_host_template *sht);
 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
-extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
-extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
 extern int ata_scsi_release(struct Scsi_Host *host);
 extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
+extern int sata_scr_valid(struct ata_port *ap);
+extern int sata_scr_read(struct ata_port *ap, int reg, u32 *val);
+extern int sata_scr_write(struct ata_port *ap, int reg, u32 val);
+extern int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val);
+extern int ata_port_online(struct ata_port *ap);
+extern int ata_port_offline(struct ata_port *ap);
 extern int ata_scsi_device_resume(struct scsi_device *);
 extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t state);
-extern int ata_device_resume(struct ata_port *, struct ata_device *);
-extern int ata_device_suspend(struct ata_port *, struct ata_device *, pm_message_t state);
+extern int ata_device_resume(struct ata_device *);
+extern int ata_device_suspend(struct ata_device *, pm_message_t state);
 extern int ata_ratelimit(void);
 extern unsigned int ata_busy_sleep(struct ata_port *ap,
 				   unsigned long timeout_pat,
@@ -578,16 +657,22 @@
 extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
 extern u8   ata_bmdma_status(struct ata_port *ap);
 extern void ata_bmdma_irq_clear(struct ata_port *ap);
-extern void __ata_qc_complete(struct ata_queued_cmd *qc);
-extern void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev,
-			      struct scsi_cmnd *cmd,
+extern void ata_bmdma_freeze(struct ata_port *ap);
+extern void ata_bmdma_thaw(struct ata_port *ap);
+extern void ata_bmdma_drive_eh(struct ata_port *ap,
+			       ata_reset_fn_t softreset,
+			       ata_reset_fn_t hardreset,
+			       ata_postreset_fn_t postreset);
+extern void ata_bmdma_error_handler(struct ata_port *ap);
+extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc);
+extern void ata_qc_complete(struct ata_queued_cmd *qc);
+extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
 			      void (*done)(struct scsi_cmnd *));
 extern int ata_std_bios_param(struct scsi_device *sdev,
 			      struct block_device *bdev,
 			      sector_t capacity, int geom[]);
 extern int ata_scsi_slave_config(struct scsi_device *sdev);
-extern struct ata_device *ata_dev_pair(struct ata_port *ap, 
-				       struct ata_device *adev);
+extern struct ata_device *ata_dev_pair(struct ata_device *adev);
 
 /*
  * Timing helpers
@@ -637,10 +722,46 @@
  * EH
  */
 extern void ata_eng_timeout(struct ata_port *ap);
+
+extern void ata_port_schedule_eh(struct ata_port *ap);
+extern int ata_port_abort(struct ata_port *ap);
+extern int ata_port_freeze(struct ata_port *ap);
+
+extern void ata_eh_freeze_port(struct ata_port *ap);
+extern void ata_eh_thaw_port(struct ata_port *ap);
+
 extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
 extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
 
+extern void ata_do_eh(struct ata_port *ap, ata_reset_fn_t softreset,
+		      ata_reset_fn_t hardreset, ata_postreset_fn_t postreset);
 
+/*
+ * printk helpers
+ */
+#define ata_port_printk(ap, lv, fmt, args...) \
+	printk(lv"ata%u: "fmt, (ap)->id , ##args)
+
+#define ata_dev_printk(dev, lv, fmt, args...) \
+	printk(lv"ata%u.%02u: "fmt, (dev)->ap->id, (dev)->devno , ##args)
+
+/*
+ * ata_eh_info helpers
+ */
+#define ata_ehi_push_desc(ehi, fmt, args...) do { \
+	(ehi)->desc_len += scnprintf((ehi)->desc + (ehi)->desc_len, \
+				     ATA_EH_DESC_LEN - (ehi)->desc_len, \
+				     fmt , ##args); \
+} while (0)
+
+#define ata_ehi_clear_desc(ehi) do { \
+	(ehi)->desc[0] = '\0'; \
+	(ehi)->desc_len = 0; \
+} while (0)
+
+/*
+ * qc helpers
+ */
 static inline int
 ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc)
 {
@@ -683,6 +804,11 @@
 	return (tag < ATA_MAX_QUEUE) ? 1 : 0;
 }
 
+static inline unsigned int ata_tag_internal(unsigned int tag)
+{
+	return tag == ATA_MAX_QUEUE - 1;
+}
+
 static inline unsigned int ata_class_enabled(unsigned int class)
 {
 	return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI;
@@ -791,20 +917,35 @@
 	qc->tf.ctl |= ATA_NIEN;
 }
 
-static inline struct ata_queued_cmd *ata_qc_from_tag (struct ata_port *ap,
-						      unsigned int tag)
+static inline struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap,
+						       unsigned int tag)
 {
 	if (likely(ata_tag_valid(tag)))
 		return &ap->qcmd[tag];
 	return NULL;
 }
 
-static inline void ata_tf_init(struct ata_port *ap, struct ata_taskfile *tf, unsigned int device)
+static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap,
+						     unsigned int tag)
+{
+	struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
+
+	if (unlikely(!qc) || !ap->ops->error_handler)
+		return qc;
+
+	if ((qc->flags & (ATA_QCFLAG_ACTIVE |
+			  ATA_QCFLAG_FAILED)) == ATA_QCFLAG_ACTIVE)
+		return qc;
+
+	return NULL;
+}
+
+static inline void ata_tf_init(struct ata_device *dev, struct ata_taskfile *tf)
 {
 	memset(tf, 0, sizeof(*tf));
 
-	tf->ctl = ap->ctl;
-	if (device == 0)
+	tf->ctl = dev->ap->ctl;
+	if (dev->devno == 0)
 		tf->device = ATA_DEVICE_OBS;
 	else
 		tf->device = ATA_DEVICE_OBS | ATA_DEV1;
@@ -819,26 +960,11 @@
 	qc->nbytes = qc->curbytes = 0;
 	qc->err_mask = 0;
 
-	ata_tf_init(qc->ap, &qc->tf, qc->dev->devno);
-}
+	ata_tf_init(qc->dev, &qc->tf);
 
-/**
- *	ata_qc_complete - Complete an active ATA command
- *	@qc: Command to complete
- *	@err_mask: ATA Status register contents
- *
- *	Indicate to the mid and upper layers that an ATA
- *	command has completed, with either an ok or not-ok status.
- *
- *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
- */
-static inline void ata_qc_complete(struct ata_queued_cmd *qc)
-{
-	if (unlikely(qc->flags & ATA_QCFLAG_EH_SCHEDULED))
-		return;
-
-	__ata_qc_complete(qc);
+	/* init result_tf such that it indicates normal completion */
+	qc->result_tf.command = ATA_DRDY;
+	qc->result_tf.feature = 0;
 }
 
 /**
@@ -917,28 +1043,6 @@
 	return status;
 }
 
-static inline u32 scr_read(struct ata_port *ap, unsigned int reg)
-{
-	return ap->ops->scr_read(ap, reg);
-}
-
-static inline void scr_write(struct ata_port *ap, unsigned int reg, u32 val)
-{
-	ap->ops->scr_write(ap, reg, val);
-}
-
-static inline void scr_write_flush(struct ata_port *ap, unsigned int reg,
-				   u32 val)
-{
-	ap->ops->scr_write(ap, reg, val);
-	(void) ap->ops->scr_read(ap, reg);
-}
-
-static inline unsigned int sata_dev_present(struct ata_port *ap)
-{
-	return ((scr_read(ap, SCR_STATUS) & 0xf) == 0x3) ? 1 : 0;
-}
-
 static inline int ata_try_flush_cache(const struct ata_device *dev)
 {
 	return ata_id_wcache_enabled(dev->id) ||
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 1ace1b9..88c6c4da 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -151,5 +151,6 @@
 extern void scsi_put_command(struct scsi_cmnd *);
 extern void scsi_io_completion(struct scsi_cmnd *, unsigned int, unsigned int);
 extern void scsi_finish_command(struct scsi_cmnd *cmd);
+extern void scsi_req_abort_cmd(struct scsi_cmnd *cmd);
 
 #endif /* _SCSI_SCSI_CMND_H */
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index d160880..212c983 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -35,6 +35,7 @@
 }
 
 
+extern void scsi_eh_wakeup(struct Scsi_Host *shost);
 extern void scsi_eh_finish_cmd(struct scsi_cmnd *scmd,
 			       struct list_head *done_q);
 extern void scsi_eh_flush_done_q(struct list_head *done_q);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index de6ce54..a42efd6 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -472,6 +472,7 @@
 	 */
 	unsigned int host_busy;		   /* commands actually active on low-level */
 	unsigned int host_failed;	   /* commands that failed. */
+	unsigned int host_eh_scheduled;    /* EH scheduled without command */
     
 	unsigned short host_no;  /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
 	int resetting; /* if set, it means that last_reset is a valid value */