[POWERPC] Allow hooking of PCI MMIO & PIO accessors on 64 bits

This patch reworks the way iSeries hooks on PCI IO operations (both MMIO
and PIO) and provides a generic way for other platforms to do so (we
have need to do that for various other platforms).

While reworking the IO ops, I ended up doing some spring cleaning in
io.h and eeh.h which I might want to split into 2 or 3 patches (among
others, eeh.h had a lot of useless stuff in it).

A side effect is that EEH for PIO should work now (it used to pass IO
ports down to the eeh address check functions which is bogus).

Also, new are MMIO "repeat" ops, which other archs like ARM already had,
and that we have too now: readsb, readsw, readsl, writesb, writesw,
writesl.

In the long run, I might also make EEH use the hooks instead
of wrapping at the toplevel, which would make things even cleaner and
relegate EEH completely in platforms/iseries, but we have to measure the
performance impact there (though it's really only on MMIO reads)

Since I also need to hook on ioremap, I shuffled the functions a bit
there. I introduced ioremap_flags() to use by drivers who want to pass
explicit flags to ioremap (and it can be hooked). The old __ioremap() is
still there as a low level and cannot be hooked, thus drivers who use it
should migrate unless they know they want the low level version.

The patch "arch provides generic iomap missing accessors" (should be
number 4 in this series) is a pre-requisite to provide full iomap
API support with this patch.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index ac64f4a..e9b2184 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -129,22 +129,12 @@
 	return (void __iomem *) (ea + (addr & ~PAGE_MASK));
 }
 
-
-void __iomem *
-ioremap(unsigned long addr, unsigned long size)
-{
-	return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
-}
-
 void __iomem * __ioremap(unsigned long addr, unsigned long size,
 			 unsigned long flags)
 {
 	unsigned long pa, ea;
 	void __iomem *ret;
 
-	if (firmware_has_feature(FW_FEATURE_ISERIES))
-		return (void __iomem *)addr;
-
 	/*
 	 * Choose an address to map it to.
 	 * Once the imalloc system is running, we use it.
@@ -178,6 +168,25 @@
 	return ret;
 }
 
+
+void __iomem * ioremap(unsigned long addr, unsigned long size)
+{
+	unsigned long flags = _PAGE_NO_CACHE | _PAGE_GUARDED;
+
+	if (ppc_md.ioremap)
+		return ppc_md.ioremap(addr, size, flags);
+	return __ioremap(addr, size, flags);
+}
+
+void __iomem * ioremap_flags(unsigned long addr, unsigned long size,
+			     unsigned long flags)
+{
+	if (ppc_md.ioremap)
+		return ppc_md.ioremap(addr, size, flags);
+	return __ioremap(addr, size, flags);
+}
+
+
 #define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
 
 int __ioremap_explicit(unsigned long pa, unsigned long ea,
@@ -235,13 +244,10 @@
  *
  * XXX	what about calls before mem_init_done (ie python_countermeasures())
  */
-void iounmap(volatile void __iomem *token)
+void __iounmap(void __iomem *token)
 {
 	void *addr;
 
-	if (firmware_has_feature(FW_FEATURE_ISERIES))
-		return;
-
 	if (!mem_init_done)
 		return;
 	
@@ -250,6 +256,14 @@
 	im_free(addr);
 }
 
+void iounmap(void __iomem *token)
+{
+	if (ppc_md.iounmap)
+		ppc_md.iounmap(token);
+	else
+		__iounmap(token);
+}
+
 static int iounmap_subset_regions(unsigned long addr, unsigned long size)
 {
 	struct vm_struct *area;
@@ -268,7 +282,7 @@
 	return 0;
 }
 
-int iounmap_explicit(volatile void __iomem *start, unsigned long size)
+int __iounmap_explicit(void __iomem *start, unsigned long size)
 {
 	struct vm_struct *area;
 	unsigned long addr;
@@ -303,8 +317,10 @@
 }
 
 EXPORT_SYMBOL(ioremap);
+EXPORT_SYMBOL(ioremap_flags);
 EXPORT_SYMBOL(__ioremap);
 EXPORT_SYMBOL(iounmap);
+EXPORT_SYMBOL(__iounmap);
 
 void __iomem * reserve_phb_iospace(unsigned long size)
 {