sh: migrate to arch/sh/include/

This follows the sparc changes a439fe51a1f8eb087c22dd24d69cebae4a3addac.

Most of the moving about was done with Sam's directions at:

http://marc.info/?l=linux-sh&m=121724823706062&w=2

with subsequent hacking and fixups entirely my fault.

Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
new file mode 100644
index 0000000..02df18e
--- /dev/null
+++ b/arch/sh/include/asm/cache.h
@@ -0,0 +1,51 @@
+/* $Id: cache.h,v 1.6 2004/03/11 18:08:05 lethal Exp $
+ *
+ * include/asm-sh/cache.h
+ *
+ * Copyright 1999 (C) Niibe Yutaka
+ * Copyright 2002, 2003 (C) Paul Mundt
+ */
+#ifndef __ASM_SH_CACHE_H
+#define __ASM_SH_CACHE_H
+#ifdef __KERNEL__
+
+#include <linux/init.h>
+#include <cpu/cache.h>
+
+#define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
+
+#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+
+#ifndef __ASSEMBLY__
+struct cache_info {
+	unsigned int ways;		/* Number of cache ways */
+	unsigned int sets;		/* Number of cache sets */
+	unsigned int linesz;		/* Cache line size (bytes) */
+
+	unsigned int way_size;		/* sets * line size */
+
+	/*
+	 * way_incr is the address offset for accessing the next way
+	 * in memory mapped cache array ops.
+	 */
+	unsigned int way_incr;
+	unsigned int entry_shift;
+	unsigned int entry_mask;
+
+	/*
+	 * Compute a mask which selects the address bits which overlap between
+	 * 1. those used to select the cache set during indexing
+	 * 2. those in the physical page number.
+	 */
+	unsigned int alias_mask;
+
+	unsigned int n_aliases;		/* Number of aliases */
+
+	unsigned long flags;
+};
+
+int __init detect_cpu_and_cache_system(void);
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* __ASM_SH_CACHE_H */