ARC: dma: ioremap: use phys_addr_t consistenctly in code paths

To support dma in physical memory beyond 4GB with PAE40

Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index d6b30f6..608c143 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -65,13 +65,13 @@
 
 	/* This is kernel Virtual address (0x7000_0000 based) */
 	if (need_kvaddr) {
-		kvaddr = ioremap_nocache((unsigned long)paddr, size);
+		kvaddr = ioremap_nocache(paddr, size);
 		if (kvaddr == NULL) {
 			__free_pages(page, order);
 			return NULL;
 		}
 	} else {
-		kvaddr = (void *)paddr;
+		kvaddr = (void *)(u32)paddr;
 	}
 
 	/*
@@ -85,7 +85,7 @@
 	 * will be optimized as a separate commit
 	 */
 	if (need_coh)
-		dma_cache_wback_inv((unsigned long)paddr, size);
+		dma_cache_wback_inv(paddr, size);
 
 	return kvaddr;
 }
@@ -110,7 +110,7 @@
  * CPU accesses page via normal paddr, thus needs to explicitly made
  * consistent before each use
  */
-static void _dma_cache_sync(unsigned long paddr, size_t size,
+static void _dma_cache_sync(phys_addr_t paddr, size_t size,
 		enum dma_data_direction dir)
 {
 	switch (dir) {
@@ -124,7 +124,7 @@
 		dma_cache_wback_inv(paddr, size);
 		break;
 	default:
-		pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr);
+		pr_err("Invalid DMA dir [%d] for OP @ %pa[p]\n", dir, &paddr);
 	}
 }
 
@@ -132,7 +132,7 @@
 		unsigned long offset, size_t size, enum dma_data_direction dir,
 		struct dma_attrs *attrs)
 {
-	unsigned long paddr = page_to_phys(page) + offset;
+	phys_addr_t paddr = page_to_phys(page) + offset;
 	_dma_cache_sync(paddr, size, dir);
 	return (dma_addr_t)paddr;
 }