[MIPS] Revert "[MIPS] tlbex.c: Cleanup __init usage."

This reverts commit aaf76a3245c02faba51c96b9a340c14d6bb0dcc0.

As requested by ranck Bui-Huu <fbuihuu@gmail.com>.

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 01b0961..a61246d 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -66,7 +66,7 @@
  * why; it's not an issue caused by the core RTL.
  *
  */
-static int __init m4kc_tlbp_war(void)
+static __init int __attribute__((unused)) m4kc_tlbp_war(void)
 {
 	return (current_cpu_data.processor_id & 0xffff00) ==
 	       (PRID_COMP_MIPS | PRID_IMP_4KC);
@@ -140,7 +140,7 @@
 	 | (e) << RE_SH						\
 	 | (f) << FUNC_SH)
 
-static struct insn insn_table[] __initdata = {
+static __initdata struct insn insn_table[] = {
 	{ insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
 	{ insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
 	{ insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
@@ -193,7 +193,7 @@
 
 #undef M
 
-static u32 __init build_rs(u32 arg)
+static __init u32 build_rs(u32 arg)
 {
 	if (arg & ~RS_MASK)
 		printk(KERN_WARNING "TLB synthesizer field overflow\n");
@@ -201,7 +201,7 @@
 	return (arg & RS_MASK) << RS_SH;
 }
 
-static u32 __init build_rt(u32 arg)
+static __init u32 build_rt(u32 arg)
 {
 	if (arg & ~RT_MASK)
 		printk(KERN_WARNING "TLB synthesizer field overflow\n");
@@ -209,7 +209,7 @@
 	return (arg & RT_MASK) << RT_SH;
 }
 
-static u32 __init build_rd(u32 arg)
+static __init u32 build_rd(u32 arg)
 {
 	if (arg & ~RD_MASK)
 		printk(KERN_WARNING "TLB synthesizer field overflow\n");
@@ -217,7 +217,7 @@
 	return (arg & RD_MASK) << RD_SH;
 }
 
-static u32 __init build_re(u32 arg)
+static __init u32 build_re(u32 arg)
 {
 	if (arg & ~RE_MASK)
 		printk(KERN_WARNING "TLB synthesizer field overflow\n");
@@ -225,7 +225,7 @@
 	return (arg & RE_MASK) << RE_SH;
 }
 
-static u32 __init build_simm(s32 arg)
+static __init u32 build_simm(s32 arg)
 {
 	if (arg > 0x7fff || arg < -0x8000)
 		printk(KERN_WARNING "TLB synthesizer field overflow\n");
@@ -233,7 +233,7 @@
 	return arg & 0xffff;
 }
 
-static u32 __init build_uimm(u32 arg)
+static __init u32 build_uimm(u32 arg)
 {
 	if (arg & ~IMM_MASK)
 		printk(KERN_WARNING "TLB synthesizer field overflow\n");
@@ -241,7 +241,7 @@
 	return arg & IMM_MASK;
 }
 
-static u32 __init build_bimm(s32 arg)
+static __init u32 build_bimm(s32 arg)
 {
 	if (arg > 0x1ffff || arg < -0x20000)
 		printk(KERN_WARNING "TLB synthesizer field overflow\n");
@@ -252,7 +252,7 @@
 	return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
 }
 
-static u32 __init build_jimm(u32 arg)
+static __init u32 build_jimm(u32 arg)
 {
 	if (arg & ~((JIMM_MASK) << 2))
 		printk(KERN_WARNING "TLB synthesizer field overflow\n");
@@ -260,7 +260,7 @@
 	return (arg >> 2) & JIMM_MASK;
 }
 
-static u32 __init build_func(u32 arg)
+static __init u32 build_func(u32 arg)
 {
 	if (arg & ~FUNC_MASK)
 		printk(KERN_WARNING "TLB synthesizer field overflow\n");
@@ -268,7 +268,7 @@
 	return arg & FUNC_MASK;
 }
 
-static u32 __init build_set(u32 arg)
+static __init u32 build_set(u32 arg)
 {
 	if (arg & ~SET_MASK)
 		printk(KERN_WARNING "TLB synthesizer field overflow\n");
@@ -315,69 +315,69 @@
 }
 
 #define I_u1u2u3(op)						\
-	static inline void i##op(u32 **buf, unsigned int a,	\
+	static inline void __init i##op(u32 **buf, unsigned int a,	\
 	 	unsigned int b, unsigned int c)			\
 	{							\
 		build_insn(buf, insn##op, a, b, c);		\
 	}
 
 #define I_u2u1u3(op)						\
-	static inline void i##op(u32 **buf, unsigned int a,	\
+	static inline void __init i##op(u32 **buf, unsigned int a,	\
 	 	unsigned int b, unsigned int c)			\
 	{							\
 		build_insn(buf, insn##op, b, a, c);		\
 	}
 
 #define I_u3u1u2(op)						\
-	static inline void i##op(u32 **buf, unsigned int a,	\
+	static inline void __init i##op(u32 **buf, unsigned int a,	\
 	 	unsigned int b, unsigned int c)			\
 	{							\
 		build_insn(buf, insn##op, b, c, a);		\
 	}
 
 #define I_u1u2s3(op)						\
-	static inline void i##op(u32 **buf, unsigned int a,	\
+	static inline void __init i##op(u32 **buf, unsigned int a,	\
 	 	unsigned int b, signed int c)			\
 	{							\
 		build_insn(buf, insn##op, a, b, c);		\
 	}
 
 #define I_u2s3u1(op)						\
-	static inline void i##op(u32 **buf, unsigned int a,	\
+	static inline void __init i##op(u32 **buf, unsigned int a,	\
 	 	signed int b, unsigned int c)			\
 	{							\
 		build_insn(buf, insn##op, c, a, b);		\
 	}
 
 #define I_u2u1s3(op)						\
-	static inline void i##op(u32 **buf, unsigned int a,	\
+	static inline void __init i##op(u32 **buf, unsigned int a,	\
 	 	unsigned int b, signed int c)			\
 	{							\
 		build_insn(buf, insn##op, b, a, c);		\
 	}
 
 #define I_u1u2(op)						\
-	static inline void i##op(u32 **buf, unsigned int a,	\
+	static inline void __init i##op(u32 **buf, unsigned int a,	\
 	 	unsigned int b)					\
 	{							\
 		build_insn(buf, insn##op, a, b);		\
 	}
 
 #define I_u1s2(op)						\
-	static inline void i##op(u32 **buf, unsigned int a,	\
+	static inline void __init i##op(u32 **buf, unsigned int a,	\
 	 	signed int b)					\
 	{							\
 		build_insn(buf, insn##op, a, b);		\
 	}
 
 #define I_u1(op)						\
-	static inline void i##op(u32 **buf, unsigned int a)	\
+	static inline void __init i##op(u32 **buf, unsigned int a)	\
 	{							\
 		build_insn(buf, insn##op, a);			\
 	}
 
 #define I_0(op)							\
-	static inline void i##op(u32 **buf)		\
+	static inline void __init i##op(u32 **buf)		\
 	{							\
 		build_insn(buf, insn##op);			\
 	}
@@ -457,7 +457,7 @@
 	enum label_id lab;
 };
 
-static void __init build_label(struct label **lab, u32 *addr,
+static __init void build_label(struct label **lab, u32 *addr,
 			       enum label_id l)
 {
 	(*lab)->addr = addr;
@@ -526,34 +526,34 @@
 #define i_ehb(buf) i_sll(buf, 0, 0, 3)
 
 #ifdef CONFIG_64BIT
-static int __init __maybe_unused in_compat_space_p(long addr)
+static __init int __maybe_unused in_compat_space_p(long addr)
 {
 	/* Is this address in 32bit compat space? */
 	return (((addr) & 0xffffffff00000000L) == 0xffffffff00000000L);
 }
 
-static int __init __maybe_unused rel_highest(long val)
+static __init int __maybe_unused rel_highest(long val)
 {
 	return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000;
 }
 
-static int __init __maybe_unused rel_higher(long val)
+static __init int __maybe_unused rel_higher(long val)
 {
 	return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000;
 }
 #endif
 
-static int __init rel_hi(long val)
+static __init int rel_hi(long val)
 {
 	return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
 }
 
-static int __init rel_lo(long val)
+static __init int rel_lo(long val)
 {
 	return ((val & 0xffff) ^ 0x8000) - 0x8000;
 }
 
-static void __init i_LA_mostly(u32 **buf, unsigned int rs, long addr)
+static __init void i_LA_mostly(u32 **buf, unsigned int rs, long addr)
 {
 #ifdef CONFIG_64BIT
 	if (!in_compat_space_p(addr)) {
@@ -571,7 +571,7 @@
 		i_lui(buf, rs, rel_hi(addr));
 }
 
-static void __init __maybe_unused i_LA(u32 **buf, unsigned int rs,
+static __init void __maybe_unused i_LA(u32 **buf, unsigned int rs,
 					     long addr)
 {
 	i_LA_mostly(buf, rs, addr);
@@ -589,7 +589,7 @@
 	enum label_id lab;
 };
 
-static void __init r_mips_pc16(struct reloc **rel, u32 *addr,
+static __init void r_mips_pc16(struct reloc **rel, u32 *addr,
 			       enum label_id l)
 {
 	(*rel)->addr = addr;
@@ -614,7 +614,7 @@
 	}
 }
 
-static void __init resolve_relocs(struct reloc *rel, struct label *lab)
+static __init void resolve_relocs(struct reloc *rel, struct label *lab)
 {
 	struct label *l;
 
@@ -624,7 +624,7 @@
 				__resolve_relocs(rel, l);
 }
 
-static void __init move_relocs(struct reloc *rel, u32 *first, u32 *end,
+static __init void move_relocs(struct reloc *rel, u32 *first, u32 *end,
 			       long off)
 {
 	for (; rel->lab != label_invalid; rel++)
@@ -632,7 +632,7 @@
 			rel->addr += off;
 }
 
-static void __init move_labels(struct label *lab, u32 *first, u32 *end,
+static __init void move_labels(struct label *lab, u32 *first, u32 *end,
 			       long off)
 {
 	for (; lab->lab != label_invalid; lab++)
@@ -640,7 +640,7 @@
 			lab->addr += off;
 }
 
-static void __init copy_handler(struct reloc *rel, struct label *lab,
+static __init void copy_handler(struct reloc *rel, struct label *lab,
 				u32 *first, u32 *end, u32 *target)
 {
 	long off = (long)(target - first);
@@ -651,7 +651,7 @@
 	move_labels(lab, first, end, off);
 }
 
-static int __init __maybe_unused insn_has_bdelay(struct reloc *rel,
+static __init int __maybe_unused insn_has_bdelay(struct reloc *rel,
 						       u32 *addr)
 {
 	for (; rel->lab != label_invalid; rel++) {
@@ -743,11 +743,11 @@
  * We deliberately chose a buffer size of 128, so we won't scribble
  * over anything important on overflow before we panic.
  */
-static u32 tlb_handler[128] __initdata;
+static __initdata u32 tlb_handler[128];
 
 /* simply assume worst case size for labels and relocs */
-static struct label labels[128] __initdata;
-static struct reloc relocs[128] __initdata;
+static __initdata struct label labels[128];
+static __initdata struct reloc relocs[128];
 
 /*
  * The R3000 TLB handler is simple.
@@ -801,7 +801,7 @@
  * other one.To keep things simple, we first assume linear space,
  * then we relocate it to the final handler layout as needed.
  */
-static u32 final_handler[64] __initdata;
+static __initdata u32 final_handler[64];
 
 /*
  * Hazards
@@ -825,7 +825,7 @@
  *
  * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
  */
-static void __init __maybe_unused build_tlb_probe_entry(u32 **p)
+static __init void __maybe_unused build_tlb_probe_entry(u32 **p)
 {
 	switch (current_cpu_type()) {
 	/* Found by experiment: R4600 v2.0 needs this, too.  */
@@ -849,7 +849,7 @@
  */
 enum tlb_write_entry { tlb_random, tlb_indexed };
 
-static void __init build_tlb_write_entry(u32 **p, struct label **l,
+static __init void build_tlb_write_entry(u32 **p, struct label **l,
 					 struct reloc **r,
 					 enum tlb_write_entry wmode)
 {
@@ -993,7 +993,7 @@
  * TMP and PTR are scratch.
  * TMP will be clobbered, PTR will hold the pmd entry.
  */
-static void __init
+static __init void
 build_get_pmde64(u32 **p, struct label **l, struct reloc **r,
 		 unsigned int tmp, unsigned int ptr)
 {
@@ -1054,7 +1054,7 @@
  * BVADDR is the faulting address, PTR is scratch.
  * PTR will hold the pgd for vmalloc.
  */
-static void __init
+static __init void
 build_get_pgd_vmalloc64(u32 **p, struct label **l, struct reloc **r,
 			unsigned int bvaddr, unsigned int ptr)
 {
@@ -1118,7 +1118,7 @@
  * TMP and PTR are scratch.
  * TMP will be clobbered, PTR will hold the pgd entry.
  */
-static void __init __maybe_unused
+static __init void __maybe_unused
 build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
 {
 	long pgdc = (long)pgd_current;
@@ -1153,7 +1153,7 @@
 
 #endif /* !CONFIG_64BIT */
 
-static void __init build_adjust_context(u32 **p, unsigned int ctx)
+static __init void build_adjust_context(u32 **p, unsigned int ctx)
 {
 	unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
 	unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
@@ -1179,7 +1179,7 @@
 	i_andi(p, ctx, ctx, mask);
 }
 
-static void __init build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
+static __init void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
 {
 	/*
 	 * Bug workaround for the Nevada. It seems as if under certain
@@ -1204,7 +1204,7 @@
 	i_ADDU(p, ptr, ptr, tmp); /* add in offset */
 }
 
-static void __init build_update_entries(u32 **p, unsigned int tmp,
+static __init void build_update_entries(u32 **p, unsigned int tmp,
 					unsigned int ptep)
 {
 	/*