MIPS: Delete __cpuinit/__CPUINIT usage from MIPS code

commit 3747069b25e419f6b51395f48127e9812abc3596 upstream.

The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications.  For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.

After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out.  Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.

Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
and are flagged as __cpuinit  -- so if we remove the __cpuinit from
the arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
related content into no-ops as early as possible, since that will get
rid of these warnings.  In any case, they are temporary and harmless.

Here, we remove all the MIPS __cpuinit from C code and __CPUINIT
from asm files.  MIPS is interesting in this respect, because there
are also uasm users hiding behind their own renamed versions of the
__cpuinit macros.

[1] https://lkml.org/lkml/2013/5/20/589

[ralf@linux-mips.org: Folded in Paul's followup fix.]

Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/5494/
Patchwork: https://patchwork.linux-mips.org/patch/5495/
Patchwork: https://patchwork.linux-mips.org/patch/5509/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 8557fb5..a0bcdbb 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -180,7 +180,7 @@
  * Probe Octeon's caches
  *
  */
-static void __cpuinit probe_octeon(void)
+static void probe_octeon(void)
 {
 	unsigned long icache_size;
 	unsigned long dcache_size;
@@ -251,7 +251,7 @@
 	}
 }
 
-static void  __cpuinit octeon_cache_error_setup(void)
+static void  octeon_cache_error_setup(void)
 {
 	extern char except_vec2_octeon;
 	set_handler(0x100, &except_vec2_octeon, 0x80);
@@ -261,7 +261,7 @@
  * Setup the Octeon cache flush routines
  *
  */
-void __cpuinit octeon_cache_init(void)
+void octeon_cache_init(void)
 {
 	probe_octeon();
 
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index 704dc73..2fcde0c 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -26,7 +26,7 @@
 static unsigned long icache_size, dcache_size;		/* Size in bytes */
 static unsigned long icache_lsize, dcache_lsize;	/* Size in bytes */
 
-unsigned long __cpuinit r3k_cache_size(unsigned long ca_flags)
+unsigned long r3k_cache_size(unsigned long ca_flags)
 {
 	unsigned long flags, status, dummy, size;
 	volatile unsigned long *p;
@@ -61,7 +61,7 @@
 	return size * sizeof(*p);
 }
 
-unsigned long __cpuinit r3k_cache_lsize(unsigned long ca_flags)
+unsigned long r3k_cache_lsize(unsigned long ca_flags)
 {
 	unsigned long flags, status, lsize, i;
 	volatile unsigned long *p;
@@ -90,7 +90,7 @@
 	return lsize * sizeof(*p);
 }
 
-static void __cpuinit r3k_probe_cache(void)
+static void r3k_probe_cache(void)
 {
 	dcache_size = r3k_cache_size(ST0_ISC);
 	if (dcache_size)
@@ -312,7 +312,7 @@
 	r3k_flush_dcache_range(start, start + size);
 }
 
-void __cpuinit r3k_cache_init(void)
+void r3k_cache_init(void)
 {
 	extern void build_clear_page(void);
 	extern void build_copy_page(void);
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 21813be..f749f68 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -107,7 +107,7 @@
 	blast_dcache64_page(addr);
 }
 
-static void __cpuinit r4k_blast_dcache_page_setup(void)
+static void r4k_blast_dcache_page_setup(void)
 {
 	unsigned long  dc_lsize = cpu_dcache_line_size();
 
@@ -123,7 +123,7 @@
 
 static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
 
-static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
+static void r4k_blast_dcache_page_indexed_setup(void)
 {
 	unsigned long dc_lsize = cpu_dcache_line_size();
 
@@ -140,7 +140,7 @@
 void (* r4k_blast_dcache)(void);
 EXPORT_SYMBOL(r4k_blast_dcache);
 
-static void __cpuinit r4k_blast_dcache_setup(void)
+static void r4k_blast_dcache_setup(void)
 {
 	unsigned long dc_lsize = cpu_dcache_line_size();
 
@@ -227,7 +227,7 @@
 
 static void (* r4k_blast_icache_page)(unsigned long addr);
 
-static void __cpuinit r4k_blast_icache_page_setup(void)
+static void r4k_blast_icache_page_setup(void)
 {
 	unsigned long ic_lsize = cpu_icache_line_size();
 
@@ -244,7 +244,7 @@
 
 static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
 
-static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
+static void r4k_blast_icache_page_indexed_setup(void)
 {
 	unsigned long ic_lsize = cpu_icache_line_size();
 
@@ -269,7 +269,7 @@
 void (* r4k_blast_icache)(void);
 EXPORT_SYMBOL(r4k_blast_icache);
 
-static void __cpuinit r4k_blast_icache_setup(void)
+static void r4k_blast_icache_setup(void)
 {
 	unsigned long ic_lsize = cpu_icache_line_size();
 
@@ -290,7 +290,7 @@
 
 static void (* r4k_blast_scache_page)(unsigned long addr);
 
-static void __cpuinit r4k_blast_scache_page_setup(void)
+static void r4k_blast_scache_page_setup(void)
 {
 	unsigned long sc_lsize = cpu_scache_line_size();
 
@@ -308,7 +308,7 @@
 
 static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
 
-static void __cpuinit r4k_blast_scache_page_indexed_setup(void)
+static void r4k_blast_scache_page_indexed_setup(void)
 {
 	unsigned long sc_lsize = cpu_scache_line_size();
 
@@ -326,7 +326,7 @@
 
 static void (* r4k_blast_scache)(void);
 
-static void __cpuinit r4k_blast_scache_setup(void)
+static void r4k_blast_scache_setup(void)
 {
 	unsigned long sc_lsize = cpu_scache_line_size();
 
@@ -797,11 +797,11 @@
 	}
 }
 
-static char *way_string[] __cpuinitdata = { NULL, "direct mapped", "2-way",
+static char *way_string[] = { NULL, "direct mapped", "2-way",
 	"3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
 };
 
-static void __cpuinit probe_pcache(void)
+static void probe_pcache(void)
 {
 	struct cpuinfo_mips *c = &current_cpu_data;
 	unsigned int config = read_c0_config();
@@ -1119,7 +1119,7 @@
  * executes in KSEG1 space or else you will crash and burn badly.  You have
  * been warned.
  */
-static int __cpuinit probe_scache(void)
+static int probe_scache(void)
 {
 	unsigned long flags, addr, begin, end, pow2;
 	unsigned int config = read_c0_config();
@@ -1196,7 +1196,7 @@
 extern int rm7k_sc_init(void);
 extern int mips_sc_init(void);
 
-static void __cpuinit setup_scache(void)
+static void setup_scache(void)
 {
 	struct cpuinfo_mips *c = &current_cpu_data;
 	unsigned int config = read_c0_config();
@@ -1329,7 +1329,7 @@
 	NXP_BARRIER();
 }
 
-static int __cpuinitdata cca = -1;
+static int cca = -1;
 
 static int __init cca_setup(char *str)
 {
@@ -1340,7 +1340,7 @@
 
 early_param("cca", cca_setup);
 
-static void __cpuinit coherency_setup(void)
+static void coherency_setup(void)
 {
 	if (cca < 0 || cca > 7)
 		cca = read_c0_config() & CONF_CM_CMASK;
@@ -1380,7 +1380,7 @@
 	}
 }
 
-static void __cpuinit r4k_cache_error_setup(void)
+static void r4k_cache_error_setup(void)
 {
 	extern char __weak except_vec2_generic;
 	extern char __weak except_vec2_sb1;
@@ -1398,7 +1398,7 @@
 	}
 }
 
-void __cpuinit r4k_cache_init(void)
+void r4k_cache_init(void)
 {
 	extern void build_clear_page(void);
 	extern void build_copy_page(void);
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c
index ba9da27..8d909db 100644
--- a/arch/mips/mm/c-tx39.c
+++ b/arch/mips/mm/c-tx39.c
@@ -344,7 +344,7 @@
 	}
 }
 
-void __cpuinit tx39_cache_init(void)
+void tx39_cache_init(void)
 {
 	extern void build_clear_page(void);
 	extern void build_copy_page(void);
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 5aeb3eb..15f813c 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -182,7 +182,7 @@
 	}
 }
 
-void __cpuinit cpu_cache_init(void)
+void cpu_cache_init(void)
 {
 	if (cpu_has_3k_cache) {
 		extern void __weak r3k_cache_init(void);
diff --git a/arch/mips/mm/cex-sb1.S b/arch/mips/mm/cex-sb1.S
index fe1d887..191cf6e 100644
--- a/arch/mips/mm/cex-sb1.S
+++ b/arch/mips/mm/cex-sb1.S
@@ -49,8 +49,6 @@
 	 * (0x170-0x17f) are used to preserve k0, k1, and ra.
 	 */
 
-	__CPUINIT
-
 LEAF(except_vec2_sb1)
 	/*
 	 * If this error is recoverable, we need to exit the handler
@@ -142,8 +140,6 @@
 
 END(except_vec2_sb1)
 
-	__FINIT
-
 	LEAF(handle_vec2_sb1)
 	mfc0	k0,CP0_CONFIG
 	li	k1,~CONF_CM_CMASK
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index 2c0bd58..218c2109 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -66,29 +66,29 @@
 UASM_L_LA(_copy_pref_store)
 
 /* We need one branch and therefore one relocation per target label. */
-static struct uasm_label __cpuinitdata labels[5];
-static struct uasm_reloc __cpuinitdata relocs[5];
+static struct uasm_label labels[5];
+static struct uasm_reloc relocs[5];
 
 #define cpu_is_r4600_v1_x()	((read_c0_prid() & 0xfffffff0) == 0x00002010)
 #define cpu_is_r4600_v2_x()	((read_c0_prid() & 0xfffffff0) == 0x00002020)
 
-static int pref_bias_clear_store __cpuinitdata;
-static int pref_bias_copy_load __cpuinitdata;
-static int pref_bias_copy_store __cpuinitdata;
+static int pref_bias_clear_store;
+static int pref_bias_copy_load;
+static int pref_bias_copy_store;
 
-static u32 pref_src_mode __cpuinitdata;
-static u32 pref_dst_mode __cpuinitdata;
+static u32 pref_src_mode;
+static u32 pref_dst_mode;
 
-static int clear_word_size __cpuinitdata;
-static int copy_word_size __cpuinitdata;
+static int clear_word_size;
+static int copy_word_size;
 
-static int half_clear_loop_size __cpuinitdata;
-static int half_copy_loop_size __cpuinitdata;
+static int half_clear_loop_size;
+static int half_copy_loop_size;
 
-static int cache_line_size __cpuinitdata;
+static int cache_line_size;
 #define cache_line_mask() (cache_line_size - 1)
 
-static inline void __cpuinit
+static inline void
 pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off)
 {
 	if (cpu_has_64bit_gp_regs && DADDI_WAR && r4k_daddiu_bug()) {
@@ -108,7 +108,7 @@
 	}
 }
 
-static void __cpuinit set_prefetch_parameters(void)
+static void set_prefetch_parameters(void)
 {
 	if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg)
 		clear_word_size = 8;
@@ -199,7 +199,7 @@
 				      4 * copy_word_size));
 }
 
-static void __cpuinit build_clear_store(u32 **buf, int off)
+static void build_clear_store(u32 **buf, int off)
 {
 	if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) {
 		uasm_i_sd(buf, ZERO, off, A0);
@@ -208,7 +208,7 @@
 	}
 }
 
-static inline void __cpuinit build_clear_pref(u32 **buf, int off)
+static inline void build_clear_pref(u32 **buf, int off)
 {
 	if (off & cache_line_mask())
 		return;
@@ -240,7 +240,7 @@
 extern u32 __copy_page_start;
 extern u32 __copy_page_end;
 
-void __cpuinit build_clear_page(void)
+void build_clear_page(void)
 {
 	int off;
 	u32 *buf = &__clear_page_start;
@@ -333,7 +333,7 @@
 	pr_debug("\t.set pop\n");
 }
 
-static void __cpuinit build_copy_load(u32 **buf, int reg, int off)
+static void build_copy_load(u32 **buf, int reg, int off)
 {
 	if (cpu_has_64bit_gp_regs) {
 		uasm_i_ld(buf, reg, off, A1);
@@ -342,7 +342,7 @@
 	}
 }
 
-static void __cpuinit build_copy_store(u32 **buf, int reg, int off)
+static void build_copy_store(u32 **buf, int reg, int off)
 {
 	if (cpu_has_64bit_gp_regs) {
 		uasm_i_sd(buf, reg, off, A0);
@@ -387,7 +387,7 @@
 	}
 }
 
-void __cpuinit build_copy_page(void)
+void build_copy_page(void)
 {
 	int off;
 	u32 *buf = &__copy_page_start;
diff --git a/arch/mips/mm/sc-ip22.c b/arch/mips/mm/sc-ip22.c
index c6aaed9..dc7c5a5 100644
--- a/arch/mips/mm/sc-ip22.c
+++ b/arch/mips/mm/sc-ip22.c
@@ -167,7 +167,7 @@
 	.bc_inv = indy_sc_wback_invalidate
 };
 
-void __cpuinit indy_sc_init(void)
+void indy_sc_init(void)
 {
 	if (indy_sc_probe()) {
 		indy_sc_enable();
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index df96da7..5d01392 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -132,7 +132,7 @@
 	return 1;
 }
 
-int __cpuinit mips_sc_init(void)
+int mips_sc_init(void)
 {
 	int found = mips_sc_probe();
 	if (found) {
diff --git a/arch/mips/mm/sc-r5k.c b/arch/mips/mm/sc-r5k.c
index 8bc6772..0216ed6 100644
--- a/arch/mips/mm/sc-r5k.c
+++ b/arch/mips/mm/sc-r5k.c
@@ -98,7 +98,7 @@
 	.bc_inv = r5k_dma_cache_inv_sc
 };
 
-void __cpuinit r5k_sc_init(void)
+void r5k_sc_init(void)
 {
 	if (r5k_sc_probe()) {
 		r5k_sc_enable();
diff --git a/arch/mips/mm/sc-rm7k.c b/arch/mips/mm/sc-rm7k.c
index 274af3b..aaffbba 100644
--- a/arch/mips/mm/sc-rm7k.c
+++ b/arch/mips/mm/sc-rm7k.c
@@ -104,7 +104,7 @@
 /*
  * This function is executed in uncached address space.
  */
-static __cpuinit void __rm7k_tc_enable(void)
+static void __rm7k_tc_enable(void)
 {
 	int i;
 
@@ -117,7 +117,7 @@
 		cache_op(Index_Store_Tag_T, CKSEG0ADDR(i));
 }
 
-static __cpuinit void rm7k_tc_enable(void)
+static void rm7k_tc_enable(void)
 {
 	if (read_c0_config() & RM7K_CONF_TE)
 		return;
@@ -130,7 +130,7 @@
 /*
  * This function is executed in uncached address space.
  */
-static __cpuinit void __rm7k_sc_enable(void)
+static void __rm7k_sc_enable(void)
 {
 	int i;
 
@@ -143,7 +143,7 @@
 		cache_op(Index_Store_Tag_SD, CKSEG0ADDR(i));
 }
 
-static __cpuinit void rm7k_sc_enable(void)
+static void rm7k_sc_enable(void)
 {
 	if (read_c0_config() & RM7K_CONF_SE)
 		return;
@@ -184,7 +184,7 @@
  * This is a probing function like the one found in c-r4k.c, we look for the
  * wrap around point with different addresses.
  */
-static __cpuinit void __probe_tcache(void)
+static void __probe_tcache(void)
 {
 	unsigned long flags, addr, begin, end, pow2;
 
@@ -226,7 +226,7 @@
 	local_irq_restore(flags);
 }
 
-void __cpuinit rm7k_sc_init(void)
+void rm7k_sc_init(void)
 {
 	struct cpuinfo_mips *c = &current_cpu_data;
 	unsigned int config = read_c0_config();
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
index a63d1ed..9aca109 100644
--- a/arch/mips/mm/tlb-r3k.c
+++ b/arch/mips/mm/tlb-r3k.c
@@ -276,7 +276,7 @@
 	}
 }
 
-void __cpuinit tlb_init(void)
+void tlb_init(void)
 {
 	local_flush_tlb_all();
 
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index c643de4..00b26a6 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -389,7 +389,7 @@
 
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
 
-static int __cpuinitdata ntlb;
+static int ntlb;
 static int __init set_ntlb(char *str)
 {
 	get_option(&str, &ntlb);
@@ -398,7 +398,7 @@
 
 __setup("ntlb=", set_ntlb);
 
-void __cpuinit tlb_init(void)
+void tlb_init(void)
 {
 	/*
 	 * You should never change this register:
diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c
index 91c2499..6a99733 100644
--- a/arch/mips/mm/tlb-r8k.c
+++ b/arch/mips/mm/tlb-r8k.c
@@ -213,14 +213,14 @@
 	local_irq_restore(flags);
 }
 
-static void __cpuinit probe_tlb(unsigned long config)
+static void probe_tlb(unsigned long config)
 {
 	struct cpuinfo_mips *c = &current_cpu_data;
 
 	c->tlbsize = 3 * 128;		/* 3 sets each 128 entries */
 }
 
-void __cpuinit tlb_init(void)
+void tlb_init(void)
 {
 	unsigned int config = read_c0_config();
 	unsigned long status;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 9ab0f90..34fce2b 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -136,7 +136,7 @@
  * why; it's not an issue caused by the core RTL.
  *
  */
-static int __cpuinit m4kc_tlbp_war(void)
+static int m4kc_tlbp_war(void)
 {
 	return (current_cpu_data.processor_id & 0xffff00) ==
 	       (PRID_COMP_MIPS | PRID_IMP_4KC);
@@ -181,11 +181,9 @@
 UASM_L_LA(_tlb_huge_update)
 #endif
 
-static int __cpuinitdata hazard_instance;
+static int hazard_instance;
 
-static void __cpuinit uasm_bgezl_hazard(u32 **p,
-					struct uasm_reloc **r,
-					int instance)
+static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance)
 {
 	switch (instance) {
 	case 0 ... 7:
@@ -196,9 +194,7 @@
 	}
 }
 
-static void __cpuinit uasm_bgezl_label(struct uasm_label **l,
-				       u32 **p,
-				       int instance)
+static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance)
 {
 	switch (instance) {
 	case 0 ... 7:
@@ -295,15 +291,15 @@
  * We deliberately chose a buffer size of 128, so we won't scribble
  * over anything important on overflow before we panic.
  */
-static u32 tlb_handler[128] __cpuinitdata;
+static u32 tlb_handler[128];
 
 /* simply assume worst case size for labels and relocs */
-static struct uasm_label labels[128] __cpuinitdata;
-static struct uasm_reloc relocs[128] __cpuinitdata;
+static struct uasm_label labels[128];
+static struct uasm_reloc relocs[128];
 
-static int check_for_high_segbits __cpuinitdata;
+static int check_for_high_segbits;
 
-static unsigned int kscratch_used_mask __cpuinitdata;
+static unsigned int kscratch_used_mask;
 
 static inline int __maybe_unused c0_kscratch(void)
 {
@@ -316,7 +312,7 @@
 	}
 }
 
-static int __cpuinit allocate_kscratch(void)
+static int allocate_kscratch(void)
 {
 	int r;
 	unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask;
@@ -333,11 +329,11 @@
 	return r;
 }
 
-static int scratch_reg __cpuinitdata;
-static int pgd_reg __cpuinitdata;
+static int scratch_reg;
+static int pgd_reg;
 enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};
 
-static struct work_registers __cpuinit build_get_work_registers(u32 **p)
+static struct work_registers build_get_work_registers(u32 **p)
 {
 	struct work_registers r;
 
@@ -393,7 +389,7 @@
 	return r;
 }
 
-static void __cpuinit build_restore_work_registers(u32 **p)
+static void build_restore_work_registers(u32 **p)
 {
 	if (scratch_reg >= 0) {
 		UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
@@ -418,7 +414,7 @@
 /*
  * The R3000 TLB handler is simple.
  */
-static void __cpuinit build_r3000_tlb_refill_handler(void)
+static void build_r3000_tlb_refill_handler(void)
 {
 	long pgdc = (long)pgd_current;
 	u32 *p;
@@ -463,7 +459,7 @@
  * other one.To keep things simple, we first assume linear space,
  * then we relocate it to the final handler layout as needed.
  */
-static u32 final_handler[64] __cpuinitdata;
+static u32 final_handler[64];
 
 /*
  * Hazards
@@ -487,7 +483,7 @@
  *
  * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
  */
-static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p)
+static void __maybe_unused build_tlb_probe_entry(u32 **p)
 {
 	switch (current_cpu_type()) {
 	/* Found by experiment: R4600 v2.0/R4700 needs this, too.  */
@@ -511,9 +507,9 @@
  */
 enum tlb_write_entry { tlb_random, tlb_indexed };
 
-static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
-					 struct uasm_reloc **r,
-					 enum tlb_write_entry wmode)
+static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
+				  struct uasm_reloc **r,
+				  enum tlb_write_entry wmode)
 {
 	void(*tlbw)(u32 **) = NULL;
 
@@ -647,8 +643,8 @@
 	}
 }
 
-static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
-								  unsigned int reg)
+static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
+							unsigned int reg)
 {
 	if (cpu_has_rixi) {
 		UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
@@ -663,11 +659,9 @@
 
 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 
-static __cpuinit void build_restore_pagemask(u32 **p,
-					     struct uasm_reloc **r,
-					     unsigned int tmp,
-					     enum label_id lid,
-					     int restore_scratch)
+static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
+				   unsigned int tmp, enum label_id lid,
+				   int restore_scratch)
 {
 	if (restore_scratch) {
 		/* Reset default page size */
@@ -706,12 +700,11 @@
 	}
 }
 
-static __cpuinit void build_huge_tlb_write_entry(u32 **p,
-						 struct uasm_label **l,
-						 struct uasm_reloc **r,
-						 unsigned int tmp,
-						 enum tlb_write_entry wmode,
-						 int restore_scratch)
+static void build_huge_tlb_write_entry(u32 **p, struct uasm_label **l,
+				       struct uasm_reloc **r,
+				       unsigned int tmp,
+				       enum tlb_write_entry wmode,
+				       int restore_scratch)
 {
 	/* Set huge page tlb entry size */
 	uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
@@ -726,9 +719,9 @@
 /*
  * Check if Huge PTE is present, if so then jump to LABEL.
  */
-static void __cpuinit
+static void
 build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
-		unsigned int pmd, int lid)
+		  unsigned int pmd, int lid)
 {
 	UASM_i_LW(p, tmp, 0, pmd);
 	if (use_bbit_insns()) {
@@ -739,9 +732,8 @@
 	}
 }
 
-static __cpuinit void build_huge_update_entries(u32 **p,
-						unsigned int pte,
-						unsigned int tmp)
+static void build_huge_update_entries(u32 **p, unsigned int pte,
+				      unsigned int tmp)
 {
 	int small_sequence;
 
@@ -771,11 +763,10 @@
 	UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */
 }
 
-static __cpuinit void build_huge_handler_tail(u32 **p,
-					      struct uasm_reloc **r,
-					      struct uasm_label **l,
-					      unsigned int pte,
-					      unsigned int ptr)
+static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
+				    struct uasm_label **l,
+				    unsigned int pte,
+				    unsigned int ptr)
 {
 #ifdef CONFIG_SMP
 	UASM_i_SC(p, pte, 0, ptr);
@@ -794,7 +785,7 @@
  * TMP and PTR are scratch.
  * TMP will be clobbered, PTR will hold the pmd entry.
  */
-static void __cpuinit
+static void
 build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
 		 unsigned int tmp, unsigned int ptr)
 {
@@ -886,7 +877,7 @@
  * BVADDR is the faulting address, PTR is scratch.
  * PTR will hold the pgd for vmalloc.
  */
-static void __cpuinit
+static void
 build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
 			unsigned int bvaddr, unsigned int ptr,
 			enum vmalloc64_mode mode)
@@ -956,7 +947,7 @@
  * TMP and PTR are scratch.
  * TMP will be clobbered, PTR will hold the pgd entry.
  */
-static void __cpuinit __maybe_unused
+static void __maybe_unused
 build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
 {
 	long pgdc = (long)pgd_current;
@@ -991,7 +982,7 @@
 
 #endif /* !CONFIG_64BIT */
 
-static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx)
+static void build_adjust_context(u32 **p, unsigned int ctx)
 {
 	unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
 	unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
@@ -1017,7 +1008,7 @@
 	uasm_i_andi(p, ctx, ctx, mask);
 }
 
-static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
+static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
 {
 	/*
 	 * Bug workaround for the Nevada. It seems as if under certain
@@ -1042,8 +1033,7 @@
 	UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
 }
 
-static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
-					unsigned int ptep)
+static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
 {
 	/*
 	 * 64bit address support (36bit on a 32bit CPU) in a 32bit
@@ -1104,7 +1094,7 @@
 	int restore_scratch;
 };
 
-static struct mips_huge_tlb_info __cpuinit
+static struct mips_huge_tlb_info
 build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
 			       struct uasm_reloc **r, unsigned int tmp,
 			       unsigned int ptr, int c0_scratch_reg)
@@ -1282,7 +1272,7 @@
  */
 #define MIPS64_REFILL_INSNS 32
 
-static void __cpuinit build_r4000_tlb_refill_handler(void)
+static void build_r4000_tlb_refill_handler(void)
 {
 	u32 *p = tlb_handler;
 	struct uasm_label *l = labels;
@@ -1462,7 +1452,7 @@
 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
 extern u32 tlbmiss_handler_setup_pgd[], tlbmiss_handler_setup_pgd_end[];
 
-static void __cpuinit build_r4000_setup_pgd(void)
+static void build_r4000_setup_pgd(void)
 {
 	const int a0 = 4;
 	const int a1 = 5;
@@ -1513,7 +1503,7 @@
 }
 #endif
 
-static void __cpuinit
+static void
 iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
 {
 #ifdef CONFIG_SMP
@@ -1533,7 +1523,7 @@
 #endif
 }
 
-static void __cpuinit
+static void
 iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
 	unsigned int mode)
 {
@@ -1593,7 +1583,7 @@
  * the page table where this PTE is located, PTE will be re-loaded
  * with it's original value.
  */
-static void __cpuinit
+static void
 build_pte_present(u32 **p, struct uasm_reloc **r,
 		  int pte, int ptr, int scratch, enum label_id lid)
 {
@@ -1621,7 +1611,7 @@
 }
 
 /* Make PTE valid, store result in PTR. */
-static void __cpuinit
+static void
 build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
 		 unsigned int ptr)
 {
@@ -1634,7 +1624,7 @@
  * Check if PTE can be written to, if not branch to LABEL. Regardless
  * restore PTE with value from PTR when done.
  */
-static void __cpuinit
+static void
 build_pte_writable(u32 **p, struct uasm_reloc **r,
 		   unsigned int pte, unsigned int ptr, int scratch,
 		   enum label_id lid)
@@ -1654,7 +1644,7 @@
 /* Make PTE writable, update software status bits as well, then store
  * at PTR.
  */
-static void __cpuinit
+static void
 build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
 		 unsigned int ptr)
 {
@@ -1668,7 +1658,7 @@
  * Check if PTE can be modified, if not branch to LABEL. Regardless
  * restore PTE with value from PTR when done.
  */
-static void __cpuinit
+static void
 build_pte_modifiable(u32 **p, struct uasm_reloc **r,
 		     unsigned int pte, unsigned int ptr, int scratch,
 		     enum label_id lid)
@@ -1697,7 +1687,7 @@
  * This places the pte into ENTRYLO0 and writes it with tlbwi.
  * Then it returns.
  */
-static void __cpuinit
+static void
 build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
 {
 	uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
@@ -1713,7 +1703,7 @@
  * may have the probe fail bit set as a result of a trap on a
  * kseg2 access, i.e. without refill.  Then it returns.
  */
-static void __cpuinit
+static void
 build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
 			     struct uasm_reloc **r, unsigned int pte,
 			     unsigned int tmp)
@@ -1731,7 +1721,7 @@
 	uasm_i_rfe(p); /* branch delay */
 }
 
-static void __cpuinit
+static void
 build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
 				   unsigned int ptr)
 {
@@ -1751,7 +1741,7 @@
 	uasm_i_tlbp(p); /* load delay */
 }
 
-static void __cpuinit build_r3000_tlb_load_handler(void)
+static void build_r3000_tlb_load_handler(void)
 {
 	u32 *p = handle_tlbl;
 	const int handle_tlbl_size = handle_tlbl_end - handle_tlbl;
@@ -1782,7 +1772,7 @@
 	dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_size);
 }
 
-static void __cpuinit build_r3000_tlb_store_handler(void)
+static void build_r3000_tlb_store_handler(void)
 {
 	u32 *p = handle_tlbs;
 	const int handle_tlbs_size = handle_tlbs_end - handle_tlbs;
@@ -1813,7 +1803,7 @@
 	dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_size);
 }
 
-static void __cpuinit build_r3000_tlb_modify_handler(void)
+static void build_r3000_tlb_modify_handler(void)
 {
 	u32 *p = handle_tlbm;
 	const int handle_tlbm_size = handle_tlbm_end - handle_tlbm;
@@ -1848,7 +1838,7 @@
 /*
  * R4000 style TLB load/store/modify handlers.
  */
-static struct work_registers __cpuinit
+static struct work_registers
 build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
 				   struct uasm_reloc **r)
 {
@@ -1884,7 +1874,7 @@
 	return wr;
 }
 
-static void __cpuinit
+static void
 build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
 				   struct uasm_reloc **r, unsigned int tmp,
 				   unsigned int ptr)
@@ -1902,7 +1892,7 @@
 #endif
 }
 
-static void __cpuinit build_r4000_tlb_load_handler(void)
+static void build_r4000_tlb_load_handler(void)
 {
 	u32 *p = handle_tlbl;
 	const int handle_tlbl_size = handle_tlbl_end - handle_tlbl;
@@ -2085,7 +2075,7 @@
 	dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_size);
 }
 
-static void __cpuinit build_r4000_tlb_store_handler(void)
+static void build_r4000_tlb_store_handler(void)
 {
 	u32 *p = handle_tlbs;
 	const int handle_tlbs_size = handle_tlbs_end - handle_tlbs;
@@ -2140,7 +2130,7 @@
 	dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_size);
 }
 
-static void __cpuinit build_r4000_tlb_modify_handler(void)
+static void build_r4000_tlb_modify_handler(void)
 {
 	u32 *p = handle_tlbm;
 	const int handle_tlbm_size = handle_tlbm_end - handle_tlbm;
@@ -2196,7 +2186,7 @@
 	dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_size);
 }
 
-static void __cpuinit flush_tlb_handlers(void)
+static void flush_tlb_handlers(void)
 {
 	local_flush_icache_range((unsigned long)handle_tlbl,
 			   (unsigned long)handle_tlbl_end);
@@ -2210,7 +2200,7 @@
 #endif
 }
 
-void __cpuinit build_tlb_refill_handler(void)
+void build_tlb_refill_handler(void)
 {
 	/*
 	 * The refill handler is generated per-CPU, multi-node systems
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
index 162ee6d..060000f 100644
--- a/arch/mips/mm/uasm-micromips.c
+++ b/arch/mips/mm/uasm-micromips.c
@@ -49,7 +49,7 @@
 
 #include "uasm.c"
 
-static struct insn insn_table_MM[] __uasminitdata = {
+static struct insn insn_table_MM[] = {
 	{ insn_addu, M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD },
 	{ insn_addiu, M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
 	{ insn_and, M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD },
@@ -118,7 +118,7 @@
 
 #undef M
 
-static inline __uasminit u32 build_bimm(s32 arg)
+static inline u32 build_bimm(s32 arg)
 {
 	WARN(arg > 0xffff || arg < -0x10000,
 	     KERN_WARNING "Micro-assembler field overflow\n");
@@ -128,7 +128,7 @@
 	return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 1) & 0x7fff);
 }
 
-static inline __uasminit u32 build_jimm(u32 arg)
+static inline u32 build_jimm(u32 arg)
 {
 
 	WARN(arg & ~((JIMM_MASK << 2) | 1),
@@ -141,7 +141,7 @@
  * The order of opcode arguments is implicitly left to right,
  * starting with RS and ending with FUNC or IMM.
  */
-static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
+static void build_insn(u32 **buf, enum opcode opc, ...)
 {
 	struct insn *ip = NULL;
 	unsigned int i;
@@ -199,7 +199,7 @@
 	(*buf)++;
 }
 
-static inline void __uasminit
+static inline void
 __resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
 {
 	long laddr = (long)lab->addr;
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index 5fcdd8f..0c72458 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -49,7 +49,7 @@
 
 #include "uasm.c"
 
-static struct insn insn_table[] __uasminitdata = {
+static struct insn insn_table[] = {
 	{ insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
 	{ insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
 	{ insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
@@ -119,7 +119,7 @@
 
 #undef M
 
-static inline __uasminit u32 build_bimm(s32 arg)
+static inline u32 build_bimm(s32 arg)
 {
 	WARN(arg > 0x1ffff || arg < -0x20000,
 	     KERN_WARNING "Micro-assembler field overflow\n");
@@ -129,7 +129,7 @@
 	return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
 }
 
-static inline __uasminit u32 build_jimm(u32 arg)
+static inline u32 build_jimm(u32 arg)
 {
 	WARN(arg & ~(JIMM_MASK << 2),
 	     KERN_WARNING "Micro-assembler field overflow\n");
@@ -141,7 +141,7 @@
  * The order of opcode arguments is implicitly left to right,
  * starting with RS and ending with FUNC or IMM.
  */
-static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
+static void build_insn(u32 **buf, enum opcode opc, ...)
 {
 	struct insn *ip = NULL;
 	unsigned int i;
@@ -187,7 +187,7 @@
 	(*buf)++;
 }
 
-static inline void __uasminit
+static inline void
 __resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
 {
 	long laddr = (long)lab->addr;
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 7eb5e43..b9d14b6 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -63,35 +63,35 @@
 	enum fields fields;
 };
 
-static inline __uasminit u32 build_rs(u32 arg)
+static inline u32 build_rs(u32 arg)
 {
 	WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n");
 
 	return (arg & RS_MASK) << RS_SH;
 }
 
-static inline __uasminit u32 build_rt(u32 arg)
+static inline u32 build_rt(u32 arg)
 {
 	WARN(arg & ~RT_MASK, KERN_WARNING "Micro-assembler field overflow\n");
 
 	return (arg & RT_MASK) << RT_SH;
 }
 
-static inline __uasminit u32 build_rd(u32 arg)
+static inline u32 build_rd(u32 arg)
 {
 	WARN(arg & ~RD_MASK, KERN_WARNING "Micro-assembler field overflow\n");
 
 	return (arg & RD_MASK) << RD_SH;
 }
 
-static inline __uasminit u32 build_re(u32 arg)
+static inline u32 build_re(u32 arg)
 {
 	WARN(arg & ~RE_MASK, KERN_WARNING "Micro-assembler field overflow\n");
 
 	return (arg & RE_MASK) << RE_SH;
 }
 
-static inline __uasminit u32 build_simm(s32 arg)
+static inline u32 build_simm(s32 arg)
 {
 	WARN(arg > 0x7fff || arg < -0x8000,
 	     KERN_WARNING "Micro-assembler field overflow\n");
@@ -99,14 +99,14 @@
 	return arg & 0xffff;
 }
 
-static inline __uasminit u32 build_uimm(u32 arg)
+static inline u32 build_uimm(u32 arg)
 {
 	WARN(arg & ~IMM_MASK, KERN_WARNING "Micro-assembler field overflow\n");
 
 	return arg & IMM_MASK;
 }
 
-static inline __uasminit u32 build_scimm(u32 arg)
+static inline u32 build_scimm(u32 arg)
 {
 	WARN(arg & ~SCIMM_MASK,
 	     KERN_WARNING "Micro-assembler field overflow\n");
@@ -114,21 +114,21 @@
 	return (arg & SCIMM_MASK) << SCIMM_SH;
 }
 
-static inline __uasminit u32 build_func(u32 arg)
+static inline u32 build_func(u32 arg)
 {
 	WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n");
 
 	return arg & FUNC_MASK;
 }
 
-static inline __uasminit u32 build_set(u32 arg)
+static inline u32 build_set(u32 arg)
 {
 	WARN(arg & ~SET_MASK, KERN_WARNING "Micro-assembler field overflow\n");
 
 	return arg & SET_MASK;
 }
 
-static void __uasminit build_insn(u32 **buf, enum opcode opc, ...);
+static void build_insn(u32 **buf, enum opcode opc, ...);
 
 #define I_u1u2u3(op)					\
 Ip_u1u2u3(op)						\
@@ -286,7 +286,7 @@
 
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
 #include <asm/octeon/octeon.h>
-void __uasminit ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b,
+void ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b,
 			    unsigned int c)
 {
 	if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5)
@@ -304,7 +304,7 @@
 #endif
 
 /* Handle labels. */
-void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, int lid)
+void ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, int lid)
 {
 	(*lab)->addr = addr;
 	(*lab)->lab = lid;
@@ -312,7 +312,7 @@
 }
 UASM_EXPORT_SYMBOL(ISAFUNC(uasm_build_label));
 
-int __uasminit ISAFUNC(uasm_in_compat_space_p)(long addr)
+int ISAFUNC(uasm_in_compat_space_p)(long addr)
 {
 	/* Is this address in 32bit compat space? */
 #ifdef CONFIG_64BIT
@@ -323,7 +323,7 @@
 }
 UASM_EXPORT_SYMBOL(ISAFUNC(uasm_in_compat_space_p));
 
-static int __uasminit uasm_rel_highest(long val)
+static int uasm_rel_highest(long val)
 {
 #ifdef CONFIG_64BIT
 	return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000;
@@ -332,7 +332,7 @@
 #endif
 }
 
-static int __uasminit uasm_rel_higher(long val)
+static int uasm_rel_higher(long val)
 {
 #ifdef CONFIG_64BIT
 	return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000;
@@ -341,19 +341,19 @@
 #endif
 }
 
-int __uasminit ISAFUNC(uasm_rel_hi)(long val)
+int ISAFUNC(uasm_rel_hi)(long val)
 {
 	return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
 }
 UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_hi));
 
-int __uasminit ISAFUNC(uasm_rel_lo)(long val)
+int ISAFUNC(uasm_rel_lo)(long val)
 {
 	return ((val & 0xffff) ^ 0x8000) - 0x8000;
 }
 UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_lo));
 
-void __uasminit ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr)
+void ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr)
 {
 	if (!ISAFUNC(uasm_in_compat_space_p)(addr)) {
 		ISAFUNC(uasm_i_lui)(buf, rs, uasm_rel_highest(addr));
@@ -371,7 +371,7 @@
 }
 UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA_mostly));
 
-void __uasminit ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr)
+void ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr)
 {
 	ISAFUNC(UASM_i_LA_mostly)(buf, rs, addr);
 	if (ISAFUNC(uasm_rel_lo(addr))) {
@@ -386,8 +386,7 @@
 UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA));
 
 /* Handle relocations. */
-void __uasminit
-ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid)
+void ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid)
 {
 	(*rel)->addr = addr;
 	(*rel)->type = R_MIPS_PC16;
@@ -396,11 +395,11 @@
 }
 UASM_EXPORT_SYMBOL(ISAFUNC(uasm_r_mips_pc16));
 
-static inline void __uasminit
-__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab);
+static inline void __resolve_relocs(struct uasm_reloc *rel,
+				    struct uasm_label *lab);
 
-void __uasminit
-ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel, struct uasm_label *lab)
+void ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel,
+				  struct uasm_label *lab)
 {
 	struct uasm_label *l;
 
@@ -411,8 +410,8 @@
 }
 UASM_EXPORT_SYMBOL(ISAFUNC(uasm_resolve_relocs));
 
-void __uasminit
-ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end, long off)
+void ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end,
+			       long off)
 {
 	for (; rel->lab != UASM_LABEL_INVALID; rel++)
 		if (rel->addr >= first && rel->addr < end)
@@ -420,8 +419,8 @@
 }
 UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_relocs));
 
-void __uasminit
-ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end, long off)
+void ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end,
+			       long off)
 {
 	for (; lab->lab != UASM_LABEL_INVALID; lab++)
 		if (lab->addr >= first && lab->addr < end)
@@ -429,9 +428,8 @@
 }
 UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_labels));
 
-void __uasminit
-ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
-		  u32 *end, u32 *target)
+void ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab,
+				u32 *first, u32 *end, u32 *target)
 {
 	long off = (long)(target - first);
 
@@ -442,7 +440,7 @@
 }
 UASM_EXPORT_SYMBOL(ISAFUNC(uasm_copy_handler));
 
-int __uasminit ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr)
+int ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr)
 {
 	for (; rel->lab != UASM_LABEL_INVALID; rel++) {
 		if (rel->addr == addr
@@ -456,83 +454,79 @@
 UASM_EXPORT_SYMBOL(ISAFUNC(uasm_insn_has_bdelay));
 
 /* Convenience functions for labeled branches. */
-void __uasminit
-ISAFUNC(uasm_il_bltz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+void ISAFUNC(uasm_il_bltz)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+			   int lid)
 {
 	uasm_r_mips_pc16(r, *p, lid);
 	ISAFUNC(uasm_i_bltz)(p, reg, 0);
 }
 UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bltz));
 
-void __uasminit
-ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid)
+void ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid)
 {
 	uasm_r_mips_pc16(r, *p, lid);
 	ISAFUNC(uasm_i_b)(p, 0);
 }
 UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b));
 
-void __uasminit
-ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+void ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+			   int lid)
 {
 	uasm_r_mips_pc16(r, *p, lid);
 	ISAFUNC(uasm_i_beqz)(p, reg, 0);
 }
 UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqz));
 
-void __uasminit
-ISAFUNC(uasm_il_beqzl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+void ISAFUNC(uasm_il_beqzl)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+			    int lid)
 {
 	uasm_r_mips_pc16(r, *p, lid);
 	ISAFUNC(uasm_i_beqzl)(p, reg, 0);
 }
 UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqzl));
 
-void __uasminit
-ISAFUNC(uasm_il_bne)(u32 **p, struct uasm_reloc **r, unsigned int reg1,
-	unsigned int reg2, int lid)
+void ISAFUNC(uasm_il_bne)(u32 **p, struct uasm_reloc **r, unsigned int reg1,
+			  unsigned int reg2, int lid)
 {
 	uasm_r_mips_pc16(r, *p, lid);
 	ISAFUNC(uasm_i_bne)(p, reg1, reg2, 0);
 }
 UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bne));
 
-void __uasminit
-ISAFUNC(uasm_il_bnez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+void ISAFUNC(uasm_il_bnez)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+			   int lid)
 {
 	uasm_r_mips_pc16(r, *p, lid);
 	ISAFUNC(uasm_i_bnez)(p, reg, 0);
 }
 UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bnez));
 
-void __uasminit
-ISAFUNC(uasm_il_bgezl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+void ISAFUNC(uasm_il_bgezl)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+			    int lid)
 {
 	uasm_r_mips_pc16(r, *p, lid);
 	ISAFUNC(uasm_i_bgezl)(p, reg, 0);
 }
 UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgezl));
 
-void __uasminit
-ISAFUNC(uasm_il_bgez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+void ISAFUNC(uasm_il_bgez)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+			   int lid)
 {
 	uasm_r_mips_pc16(r, *p, lid);
 	ISAFUNC(uasm_i_bgez)(p, reg, 0);
 }
 UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgez));
 
-void __uasminit
-ISAFUNC(uasm_il_bbit0)(u32 **p, struct uasm_reloc **r, unsigned int reg,
-	      unsigned int bit, int lid)
+void ISAFUNC(uasm_il_bbit0)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+			    unsigned int bit, int lid)
 {
 	uasm_r_mips_pc16(r, *p, lid);
 	ISAFUNC(uasm_i_bbit0)(p, reg, bit, 0);
 }
 UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit0));
 
-void __uasminit
-ISAFUNC(uasm_il_bbit1)(u32 **p, struct uasm_reloc **r, unsigned int reg,
-	      unsigned int bit, int lid)
+void ISAFUNC(uasm_il_bbit1)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+			    unsigned int bit, int lid)
 {
 	uasm_r_mips_pc16(r, *p, lid);
 	ISAFUNC(uasm_i_bbit1)(p, reg, bit, 0);