ioat: add some dev_dbg() calls

Provide some output for debugging the driver.

Signed-off-by: Maciej Sosnowski <maciej.sosnowski@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 696d4de..edf4f5e 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -134,6 +134,7 @@
 	dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
 	xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
 	xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
+	dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap);
 
 #ifdef  CONFIG_I7300_IDLE_IOAT_CHANNEL
 	if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
@@ -167,6 +168,8 @@
 {
 	void __iomem *reg_base = ioat->base.reg_base;
 
+	dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n",
+		__func__, ioat->pending);
 	ioat->pending = 0;
 	writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET);
 }
@@ -251,6 +254,7 @@
 	if (!ioat->used_desc.prev)
 		return;
 
+	dev_dbg(to_dev(chan), "%s\n", __func__);
 	chanerr = readl(reg_base + IOAT_CHANERR_OFFSET);
 	chansts = (chan->completion_virt->low
 					& IOAT_CHANSTS_DMA_TRANSFER_STATUS);
@@ -382,6 +386,7 @@
 		cookie = 1;
 	c->cookie = cookie;
 	tx->cookie = cookie;
+	dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
 
 	/* write address into NextDescriptor field of last desc in chain */
 	first = to_ioat_desc(tx->tx_list.next);
@@ -390,6 +395,8 @@
 	wmb();
 	chain_tail->hw->next = first->txd.phys;
 	list_splice_tail_init(&tx->tx_list, &ioat->used_desc);
+	dump_desc_dbg(ioat, chain_tail);
+	dump_desc_dbg(ioat, first);
 
 	ioat->pending += desc->tx_cnt;
 	if (ioat->pending >= ioat_pending_level)
@@ -429,6 +436,7 @@
 	desc_sw->txd.tx_submit = ioat1_tx_submit;
 	desc_sw->hw = desc;
 	desc_sw->txd.phys = phys;
+	set_desc_id(desc_sw, -1);
 
 	return desc_sw;
 }
@@ -474,6 +482,7 @@
 			dev_err(to_dev(chan), "Only %d initial descriptors\n", i);
 			break;
 		}
+		set_desc_id(desc, i);
 		list_add_tail(&desc->node, &tmp_list);
 	}
 	spin_lock_bh(&ioat->desc_lock);
@@ -495,6 +504,8 @@
 
 	tasklet_enable(&chan->cleanup_task);
 	ioat1_dma_start_null_desc(ioat);  /* give chain to dma device */
+	dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
+		__func__, ioat->desccount);
 	return ioat->desccount;
 }
 
@@ -527,8 +538,10 @@
 	mdelay(100);
 
 	spin_lock_bh(&ioat->desc_lock);
-	list_for_each_entry_safe(desc, _desc,
-				 &ioat->used_desc, node) {
+	list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) {
+		dev_dbg(to_dev(chan), "%s: freeing %d from used list\n",
+			__func__, desc_id(desc));
+		dump_desc_dbg(ioat, desc);
 		in_use_descs++;
 		list_del(&desc->node);
 		pci_pool_free(ioatdma_device->dma_pool, desc->hw,
@@ -585,7 +598,8 @@
 			return NULL;
 		}
 	}
-
+	dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n",
+		__func__, desc_id(new));
 	prefetch(new->hw);
 	return new;
 }
@@ -630,6 +644,7 @@
 			async_tx_ack(&desc->txd);
 			next = ioat1_dma_get_next_descriptor(ioat);
 			hw->next = next ? next->txd.phys : 0;
+			dump_desc_dbg(ioat, desc);
 			desc = next;
 		} else
 			hw->next = 0;
@@ -652,6 +667,7 @@
 	list_splice(&chain, &desc->txd.tx_list);
 	hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
 	hw->ctl_f.compl_write = 1;
+	dump_desc_dbg(ioat, desc);
 
 	return &desc->txd;
 }
@@ -707,6 +723,9 @@
 	phys_complete = chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
 #endif
 
+	dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
+		(unsigned long long) phys_complete);
+
 	if ((chan->completion_virt->full
 		& IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
 				IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
@@ -758,6 +777,8 @@
 		return;
 	}
 
+	dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n",
+		 __func__, phys_complete);
 	list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) {
 		tx = &desc->txd;
 		/*
@@ -765,6 +786,7 @@
 		 * due to exceeding xfercap, perhaps. If so, only the
 		 * last one will have a cookie, and require unmapping.
 		 */
+		dump_desc_dbg(ioat, desc);
 		if (tx->cookie) {
 			cookie = tx->cookie;
 			ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
@@ -848,6 +870,7 @@
 	async_tx_ack(&desc->txd);
 	hw->next = 0;
 	list_add_tail(&desc->node, &ioat->used_desc);
+	dump_desc_dbg(ioat, desc);
 
 	writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF,
 	       chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);