SGISEEQ: use cached memory access to make driver work on IP28

- Use inline functions for dma_sync_* instead of macros
- added Kconfig change to make selection for similair SGI boxes easier

Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Acked-by: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index 3145ca1..c69bb8b 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -56,14 +56,6 @@
 				  (dma_addr_t)((unsigned long)(v) -            \
 					       (unsigned long)((sp)->rx_desc)))
 
-#define DMA_SYNC_DESC_CPU(dev, addr) \
-	do { dma_cache_sync((dev)->dev.parent, (void *)addr, \
-	     sizeof(struct sgiseeq_rx_desc), DMA_FROM_DEVICE); } while (0)
-
-#define DMA_SYNC_DESC_DEV(dev, addr) \
-	do { dma_cache_sync((dev)->dev.parent, (void *)addr, \
-	     sizeof(struct sgiseeq_rx_desc), DMA_TO_DEVICE); } while (0)
-
 /* Copy frames shorter than rx_copybreak, otherwise pass on up in
  * a full sized sk_buff.  Value of 100 stolen from tulip.c (!alpha).
  */
@@ -116,6 +108,18 @@
 	spinlock_t tx_lock;
 };
 
+static inline void dma_sync_desc_cpu(struct net_device *dev, void *addr)
+{
+	dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
+		       DMA_FROM_DEVICE);
+}
+
+static inline void dma_sync_desc_dev(struct net_device *dev, void *addr)
+{
+	dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
+		       DMA_TO_DEVICE);
+}
+
 static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs)
 {
 	hregs->reset = HPC3_ERST_CRESET | HPC3_ERST_CLRIRQ;
@@ -184,7 +188,7 @@
 	/* Setup tx ring. */
 	for(i = 0; i < SEEQ_TX_BUFFERS; i++) {
 		sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT;
-		DMA_SYNC_DESC_DEV(dev, &sp->tx_desc[i]);
+		dma_sync_desc_dev(dev, &sp->tx_desc[i]);
 	}
 
 	/* And now the rx ring. */
@@ -203,10 +207,10 @@
 			sp->rx_desc[i].rdma.pbuf = dma_addr;
 		}
 		sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT;
-		DMA_SYNC_DESC_DEV(dev, &sp->rx_desc[i]);
+		dma_sync_desc_dev(dev, &sp->rx_desc[i]);
 	}
 	sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR;
-	DMA_SYNC_DESC_DEV(dev, &sp->rx_desc[i - 1]);
+	dma_sync_desc_dev(dev, &sp->rx_desc[i - 1]);
 	return 0;
 }
 
@@ -341,7 +345,7 @@
 
 	/* Service every received packet. */
 	rd = &sp->rx_desc[sp->rx_new];
-	DMA_SYNC_DESC_CPU(dev, rd);
+	dma_sync_desc_cpu(dev, rd);
 	while (!(rd->rdma.cntinfo & HPCDMA_OWN)) {
 		len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3;
 		dma_unmap_single(dev->dev.parent, rd->rdma.pbuf,
@@ -397,16 +401,16 @@
 		/* Return the entry to the ring pool. */
 		rd->rdma.cntinfo = RCNTINFO_INIT;
 		sp->rx_new = NEXT_RX(sp->rx_new);
-		DMA_SYNC_DESC_DEV(dev, rd);
+		dma_sync_desc_dev(dev, rd);
 		rd = &sp->rx_desc[sp->rx_new];
-		DMA_SYNC_DESC_CPU(dev, rd);
+		dma_sync_desc_cpu(dev, rd);
 	}
-	DMA_SYNC_DESC_CPU(dev, &sp->rx_desc[orig_end]);
+	dma_sync_desc_cpu(dev, &sp->rx_desc[orig_end]);
 	sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR);
-	DMA_SYNC_DESC_DEV(dev, &sp->rx_desc[orig_end]);
-	DMA_SYNC_DESC_CPU(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
+	dma_sync_desc_dev(dev, &sp->rx_desc[orig_end]);
+	dma_sync_desc_cpu(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
 	sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR;
-	DMA_SYNC_DESC_DEV(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
+	dma_sync_desc_dev(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
 	rx_maybe_restart(sp, hregs, sregs);
 }
 
@@ -433,12 +437,12 @@
 	 * is not active!
 	 */
 	td = &sp->tx_desc[i];
-	DMA_SYNC_DESC_CPU(dev, td);
+	dma_sync_desc_cpu(dev, td);
 	while ((td->tdma.cntinfo & (HPCDMA_XIU | HPCDMA_ETXD)) ==
 	      (HPCDMA_XIU | HPCDMA_ETXD)) {
 		i = NEXT_TX(i);
 		td = &sp->tx_desc[i];
-		DMA_SYNC_DESC_CPU(dev, td);
+		dma_sync_desc_cpu(dev, td);
 	}
 	if (td->tdma.cntinfo & HPCDMA_XIU) {
 		hregs->tx_ndptr = VIRT_TO_DMA(sp, td);
@@ -470,7 +474,7 @@
 	for (j = sp->tx_old; j != sp->tx_new; j = NEXT_TX(j)) {
 		td = &sp->tx_desc[j];
 
-		DMA_SYNC_DESC_CPU(dev, td);
+		dma_sync_desc_cpu(dev, td);
 		if (!(td->tdma.cntinfo & (HPCDMA_XIU)))
 			break;
 		if (!(td->tdma.cntinfo & (HPCDMA_ETXD))) {
@@ -488,7 +492,7 @@
 			dev_kfree_skb_any(td->skb);
 			td->skb = NULL;
 		}
-		DMA_SYNC_DESC_DEV(dev, td);
+		dma_sync_desc_dev(dev, td);
 	}
 }
 
@@ -598,7 +602,7 @@
 	dev->stats.tx_bytes += len;
 	entry = sp->tx_new;
 	td = &sp->tx_desc[entry];
-	DMA_SYNC_DESC_CPU(dev, td);
+	dma_sync_desc_cpu(dev, td);
 
 	/* Create entry.  There are so many races with adding a new
 	 * descriptor to the chain:
@@ -618,14 +622,14 @@
 				       len, DMA_TO_DEVICE);
 	td->tdma.cntinfo = (len & HPCDMA_BCNT) |
 	                   HPCDMA_XIU | HPCDMA_EOXP | HPCDMA_XIE | HPCDMA_EOX;
-	DMA_SYNC_DESC_DEV(dev, td);
+	dma_sync_desc_dev(dev, td);
 	if (sp->tx_old != sp->tx_new) {
 		struct sgiseeq_tx_desc *backend;
 
 		backend = &sp->tx_desc[PREV_TX(sp->tx_new)];
-		DMA_SYNC_DESC_CPU(dev, backend);
+		dma_sync_desc_cpu(dev, backend);
 		backend->tdma.cntinfo &= ~HPCDMA_EOX;
-		DMA_SYNC_DESC_DEV(dev, backend);
+		dma_sync_desc_dev(dev, backend);
 	}
 	sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */
 
@@ -681,11 +685,11 @@
 	while (i < (nbufs - 1)) {
 		buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf + i + 1);
 		buf[i].tdma.pbuf = 0;
-		DMA_SYNC_DESC_DEV(dev, &buf[i]);
+		dma_sync_desc_dev(dev, &buf[i]);
 		i++;
 	}
 	buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf);
-	DMA_SYNC_DESC_DEV(dev, &buf[i]);
+	dma_sync_desc_dev(dev, &buf[i]);
 }
 
 static inline void setup_rx_ring(struct net_device *dev,
@@ -698,12 +702,12 @@
 	while (i < (nbufs - 1)) {
 		buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf + i + 1);
 		buf[i].rdma.pbuf = 0;
-		DMA_SYNC_DESC_DEV(dev, &buf[i]);
+		dma_sync_desc_dev(dev, &buf[i]);
 		i++;
 	}
 	buf[i].rdma.pbuf = 0;
 	buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf);
-	DMA_SYNC_DESC_DEV(dev, &buf[i]);
+	dma_sync_desc_dev(dev, &buf[i]);
 }
 
 static int __init sgiseeq_probe(struct platform_device *pdev)