Merge branch 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6:
  drm/i915: convert DRM_ERROR to DRM_DEBUG in phys object pwrite path
  drm/i915: make hw page ioremap use ioremap_wc
  drm: edid revision 0 is valid
  drm: Correct unbalanced drm_vblank_put() during mode setting.
  drm: disable encoders before re-routing them
  drm: Fix ordering of bit fields in EDID structure leading huge vsync values.
  drm: Fix shifts of EDID vsync offset/width fields.
  drm/i915: handle bogus VBT panel timing
  drm/i915: remove PLL debugging messages
diff --git a/arch/sparc/kernel/chmc.c b/arch/sparc/kernel/chmc.c
index 3b9f4d6..e1a9598 100644
--- a/arch/sparc/kernel/chmc.c
+++ b/arch/sparc/kernel/chmc.c
@@ -306,6 +306,7 @@
 		buf[1] = '?';
 		buf[2] = '?';
 		buf[3] = '\0';
+		return 0;
 	}
 	p = dp->controller;
 	prop = &p->layout;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 01e3cff..e246642 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1237,8 +1237,9 @@
 	update_head_pos(mirror, r1_bio);
 
 	if (atomic_dec_and_test(&r1_bio->remaining)) {
-		md_done_sync(mddev, r1_bio->sectors, uptodate);
+		sector_t s = r1_bio->sectors;
 		put_buf(r1_bio);
+		md_done_sync(mddev, s, uptodate);
 	}
 }
 
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 6736d6d..7301631 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1236,6 +1236,7 @@
 	/* for reconstruct, we always reschedule after a read.
 	 * for resync, only after all reads
 	 */
+	rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
 	if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
 	    atomic_dec_and_test(&r10_bio->remaining)) {
 		/* we have read all the blocks,
@@ -1243,7 +1244,6 @@
 		 */
 		reschedule_retry(r10_bio);
 	}
-	rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
 }
 
 static void end_sync_write(struct bio *bio, int error)
@@ -1264,11 +1264,13 @@
 
 	update_head_pos(i, r10_bio);
 
+	rdev_dec_pending(conf->mirrors[d].rdev, mddev);
 	while (atomic_dec_and_test(&r10_bio->remaining)) {
 		if (r10_bio->master_bio == NULL) {
 			/* the primary of several recovery bios */
-			md_done_sync(mddev, r10_bio->sectors, 1);
+			sector_t s = r10_bio->sectors;
 			put_buf(r10_bio);
+			md_done_sync(mddev, s, 1);
 			break;
 		} else {
 			r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio;
@@ -1276,7 +1278,6 @@
 			r10_bio = r10_bio2;
 		}
 	}
-	rdev_dec_pending(conf->mirrors[d].rdev, mddev);
 }
 
 /*
@@ -1749,8 +1750,6 @@
 	if (!go_faster && conf->nr_waiting)
 		msleep_interruptible(1000);
 
-	bitmap_cond_end_sync(mddev->bitmap, sector_nr);
-
 	/* Again, very different code for resync and recovery.
 	 * Both must result in an r10bio with a list of bios that
 	 * have bi_end_io, bi_sector, bi_bdev set,
@@ -1886,6 +1885,8 @@
 		/* resync. Schedule a read for every block at this virt offset */
 		int count = 0;
 
+		bitmap_cond_end_sync(mddev->bitmap, sector_nr);
+
 		if (!bitmap_start_sync(mddev->bitmap, sector_nr,
 				       &sync_blocks, mddev->degraded) &&
 		    !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
@@ -2010,13 +2011,13 @@
 	/* There is nowhere to write, so all non-sync
 	 * drives must be failed, so try the next chunk...
 	 */
-	{
-	sector_t sec = max_sector - sector_nr;
-	sectors_skipped += sec;
+	if (sector_nr + max_sync < max_sector)
+		max_sector = sector_nr + max_sync;
+
+	sectors_skipped += (max_sector - sector_nr);
 	chunks_skipped ++;
 	sector_nr = max_sector;
 	goto skipped;
-	}
 }
 
 static int run(mddev_t *mddev)
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index b0ee86c..ab13ff2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -148,7 +148,7 @@
 		pci_unmap_single(dev,
 				pci_unmap_addr(&txq->cmd[index]->meta, mapping),
 				pci_unmap_len(&txq->cmd[index]->meta, len),
-				PCI_DMA_TODEVICE);
+				PCI_DMA_BIDIRECTIONAL);
 
 	/* Unmap chunks, if any. */
 	for (i = 1; i < num_tbs; i++) {
@@ -964,7 +964,7 @@
 	 * within command buffer array. */
 	txcmd_phys = pci_map_single(priv->pci_dev,
 				    out_cmd, sizeof(struct iwl_cmd),
-				    PCI_DMA_TODEVICE);
+				    PCI_DMA_BIDIRECTIONAL);
 	pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys);
 	pci_unmap_len_set(&out_cmd->meta, len, sizeof(struct iwl_cmd));
 	/* Add buffer containing Tx command and MAC(!) header to TFD's
@@ -1115,7 +1115,7 @@
 			IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd);
 
 	phys_addr = pci_map_single(priv->pci_dev, out_cmd,
-				   len, PCI_DMA_TODEVICE);
+				   len, PCI_DMA_BIDIRECTIONAL);
 	pci_unmap_addr_set(&out_cmd->meta, mapping, phys_addr);
 	pci_unmap_len_set(&out_cmd->meta, len, len);
 	phys_addr += offsetof(struct iwl_cmd, hdr);
@@ -1212,7 +1212,7 @@
 	pci_unmap_single(priv->pci_dev,
 		pci_unmap_addr(&txq->cmd[cmd_idx]->meta, mapping),
 		pci_unmap_len(&txq->cmd[cmd_idx]->meta, len),
-		PCI_DMA_TODEVICE);
+		PCI_DMA_BIDIRECTIONAL);
 
 	for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
 	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index f5a662a..26c536b 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -330,6 +330,14 @@
 	entry_header = (struct acpi_dmar_header *)(dmar + 1);
 	while (((unsigned long)entry_header) <
 			(((unsigned long)dmar) + dmar_tbl->length)) {
+		/* Avoid looping forever on bad ACPI tables */
+		if (entry_header->length == 0) {
+			printk(KERN_WARNING PREFIX
+				"Invalid 0-length structure\n");
+			ret = -EINVAL;
+			break;
+		}
+
 		dmar_table_print_dmar_entry(entry_header);
 
 		switch (entry_header->type) {
@@ -491,7 +499,7 @@
 	int map_size;
 	u32 ver;
 	static int iommu_allocated = 0;
-	int agaw;
+	int agaw = 0;
 
 	iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
 	if (!iommu)
@@ -507,6 +515,7 @@
 	iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
 	iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
 
+#ifdef CONFIG_DMAR
 	agaw = iommu_calculate_agaw(iommu);
 	if (agaw < 0) {
 		printk(KERN_ERR
@@ -514,6 +523,7 @@
 			iommu->seq_id);
 		goto error;
 	}
+#endif
 	iommu->agaw = agaw;
 
 	/* the registers might be more than one page */
@@ -571,19 +581,49 @@
 	}
 }
 
+static int qi_check_fault(struct intel_iommu *iommu, int index)
+{
+	u32 fault;
+	int head;
+	struct q_inval *qi = iommu->qi;
+	int wait_index = (index + 1) % QI_LENGTH;
+
+	fault = readl(iommu->reg + DMAR_FSTS_REG);
+
+	/*
+	 * If IQE happens, the head points to the descriptor associated
+	 * with the error. No new descriptors are fetched until the IQE
+	 * is cleared.
+	 */
+	if (fault & DMA_FSTS_IQE) {
+		head = readl(iommu->reg + DMAR_IQH_REG);
+		if ((head >> 4) == index) {
+			memcpy(&qi->desc[index], &qi->desc[wait_index],
+					sizeof(struct qi_desc));
+			__iommu_flush_cache(iommu, &qi->desc[index],
+					sizeof(struct qi_desc));
+			writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
 /*
  * Submit the queued invalidation descriptor to the remapping
  * hardware unit and wait for its completion.
  */
-void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
+int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
 {
+	int rc = 0;
 	struct q_inval *qi = iommu->qi;
 	struct qi_desc *hw, wait_desc;
 	int wait_index, index;
 	unsigned long flags;
 
 	if (!qi)
-		return;
+		return 0;
 
 	hw = qi->desc;
 
@@ -601,7 +641,8 @@
 
 	hw[index] = *desc;
 
-	wait_desc.low = QI_IWD_STATUS_DATA(2) | QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
+	wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
+			QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
 	wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
 
 	hw[wait_index] = wait_desc;
@@ -612,13 +653,11 @@
 	qi->free_head = (qi->free_head + 2) % QI_LENGTH;
 	qi->free_cnt -= 2;
 
-	spin_lock(&iommu->register_lock);
 	/*
 	 * update the HW tail register indicating the presence of
 	 * new descriptors.
 	 */
 	writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
-	spin_unlock(&iommu->register_lock);
 
 	while (qi->desc_status[wait_index] != QI_DONE) {
 		/*
@@ -628,15 +667,21 @@
 		 * a deadlock where the interrupt context can wait indefinitely
 		 * for free slots in the queue.
 		 */
+		rc = qi_check_fault(iommu, index);
+		if (rc)
+			goto out;
+
 		spin_unlock(&qi->q_lock);
 		cpu_relax();
 		spin_lock(&qi->q_lock);
 	}
-
-	qi->desc_status[index] = QI_DONE;
+out:
+	qi->desc_status[index] = qi->desc_status[wait_index] = QI_DONE;
 
 	reclaim_free_desc(qi);
 	spin_unlock_irqrestore(&qi->q_lock, flags);
+
+	return rc;
 }
 
 /*
@@ -649,13 +694,13 @@
 	desc.low = QI_IEC_TYPE;
 	desc.high = 0;
 
+	/* should never fail */
 	qi_submit_sync(&desc, iommu);
 }
 
 int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
 		     u64 type, int non_present_entry_flush)
 {
-
 	struct qi_desc desc;
 
 	if (non_present_entry_flush) {
@@ -669,10 +714,7 @@
 			| QI_CC_GRAN(type) | QI_CC_TYPE;
 	desc.high = 0;
 
-	qi_submit_sync(&desc, iommu);
-
-	return 0;
-
+	return qi_submit_sync(&desc, iommu);
 }
 
 int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
@@ -702,10 +744,7 @@
 	desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
 		| QI_IOTLB_AM(size_order);
 
-	qi_submit_sync(&desc, iommu);
-
-	return 0;
-
+	return qi_submit_sync(&desc, iommu);
 }
 
 /*
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index f78371b..45effc5 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -207,7 +207,7 @@
 	return index;
 }
 
-static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
+static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
 {
 	struct qi_desc desc;
 
@@ -215,7 +215,7 @@
 		   | QI_IEC_SELECTIVE;
 	desc.high = 0;
 
-	qi_submit_sync(&desc, iommu);
+	return qi_submit_sync(&desc, iommu);
 }
 
 int map_irq_to_irte_handle(int irq, u16 *sub_handle)
@@ -283,6 +283,7 @@
 
 int modify_irte(int irq, struct irte *irte_modified)
 {
+	int rc;
 	int index;
 	struct irte *irte;
 	struct intel_iommu *iommu;
@@ -303,14 +304,15 @@
 	set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1));
 	__iommu_flush_cache(iommu, irte, sizeof(*irte));
 
-	qi_flush_iec(iommu, index, 0);
-
+	rc = qi_flush_iec(iommu, index, 0);
 	spin_unlock(&irq_2_ir_lock);
-	return 0;
+
+	return rc;
 }
 
 int flush_irte(int irq)
 {
+	int rc;
 	int index;
 	struct intel_iommu *iommu;
 	struct irq_2_iommu *irq_iommu;
@@ -326,10 +328,10 @@
 
 	index = irq_iommu->irte_index + irq_iommu->sub_handle;
 
-	qi_flush_iec(iommu, index, irq_iommu->irte_mask);
+	rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
 	spin_unlock(&irq_2_ir_lock);
 
-	return 0;
+	return rc;
 }
 
 struct intel_iommu *map_ioapic_to_ir(int apic)
@@ -355,6 +357,7 @@
 
 int free_irte(int irq)
 {
+	int rc = 0;
 	int index, i;
 	struct irte *irte;
 	struct intel_iommu *iommu;
@@ -375,7 +378,7 @@
 	if (!irq_iommu->sub_handle) {
 		for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
 			set_64bit((unsigned long *)irte, 0);
-		qi_flush_iec(iommu, index, irq_iommu->irte_mask);
+		rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
 	}
 
 	irq_iommu->iommu = NULL;
@@ -385,7 +388,7 @@
 
 	spin_unlock(&irq_2_ir_lock);
 
-	return 0;
+	return rc;
 }
 
 static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index c4f6c10..d2e3cbf 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -194,6 +194,7 @@
 /* FSTS_REG */
 #define DMA_FSTS_PPF ((u32)2)
 #define DMA_FSTS_PFO ((u32)1)
+#define DMA_FSTS_IQE (1 << 4)
 #define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
 
 /* FRCD_REG, 32 bits access */
@@ -328,7 +329,7 @@
 			  unsigned int size_order, u64 type,
 			  int non_present_entry_flush);
 
-extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
+extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
 
 extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
 extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t);