blob: 57bcdaf1f1c8255b30a420d1c10aaeacbbcf1275 [file] [log] [blame]
Jim Quinlane97c5b62012-09-06 11:36:56 -04001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
7 * Copyright (C) 1996 by Paul M. Antoine
8 * Copyright (C) 1999 Silicon Graphics
9 * Copyright (C) 2000 MIPS Technologies, Inc.
10 */
11#include <asm/irqflags.h>
12#include <asm/hazards.h>
13#include <linux/compiler.h>
14#include <linux/preempt.h>
15#include <linux/export.h>
Ralf Baechle02b849f2013-02-08 18:13:30 +010016#include <linux/stringify.h>
Jim Quinlane97c5b62012-09-06 11:36:56 -040017
Ralf Baechleb6336482014-05-23 16:29:44 +020018#ifndef CONFIG_CPU_MIPSR2
Jim Quinlane97c5b62012-09-06 11:36:56 -040019
20/*
21 * For cli() we have to insert nops to make sure that the new value
22 * has actually arrived in the status register before the end of this
23 * macro.
24 * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
25 * no nops at all.
26 */
27/*
28 * For TX49, operating only IE bit is not enough.
29 *
30 * If mfc0 $12 follows store and the mfc0 is last instruction of a
31 * page and fetching the next instruction causes TLB miss, the result
32 * of the mfc0 might wrongly contain EXL bit.
33 *
34 * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
35 *
36 * Workaround: mask EXL bit of the result or place a nop before mfc0.
37 */
Ralf Baechle02b849f2013-02-08 18:13:30 +010038notrace void arch_local_irq_disable(void)
39{
40 preempt_disable();
41
42 __asm__ __volatile__(
Jim Quinlane97c5b62012-09-06 11:36:56 -040043 " .set push \n"
44 " .set noat \n"
Ralf Baechleb6336482014-05-23 16:29:44 +020045#if defined(CONFIG_CPU_MIPSR2)
Jim Quinlane97c5b62012-09-06 11:36:56 -040046 /* see irqflags.h for inline function */
47#else
48 " mfc0 $1,$12 \n"
49 " ori $1,0x1f \n"
50 " xori $1,0x1f \n"
51 " .set noreorder \n"
52 " mtc0 $1,$12 \n"
53#endif
Ralf Baechle02b849f2013-02-08 18:13:30 +010054 " " __stringify(__irq_disable_hazard) " \n"
Jim Quinlane97c5b62012-09-06 11:36:56 -040055 " .set pop \n"
Ralf Baechle02b849f2013-02-08 18:13:30 +010056 : /* no outputs */
57 : /* no inputs */
58 : "memory");
Jim Quinlane97c5b62012-09-06 11:36:56 -040059
Jim Quinlane97c5b62012-09-06 11:36:56 -040060 preempt_enable();
61}
62EXPORT_SYMBOL(arch_local_irq_disable);
63
64
Ralf Baechle02b849f2013-02-08 18:13:30 +010065notrace unsigned long arch_local_irq_save(void)
66{
67 unsigned long flags;
68
69 preempt_disable();
70
71 __asm__ __volatile__(
Jim Quinlane97c5b62012-09-06 11:36:56 -040072 " .set push \n"
73 " .set reorder \n"
74 " .set noat \n"
Ralf Baechleb6336482014-05-23 16:29:44 +020075#if defined(CONFIG_CPU_MIPSR2)
Jim Quinlane97c5b62012-09-06 11:36:56 -040076 /* see irqflags.h for inline function */
77#else
Ralf Baechle02b849f2013-02-08 18:13:30 +010078 " mfc0 %[flags], $12 \n"
79 " ori $1, %[flags], 0x1f \n"
Jim Quinlane97c5b62012-09-06 11:36:56 -040080 " xori $1, 0x1f \n"
81 " .set noreorder \n"
82 " mtc0 $1, $12 \n"
83#endif
Ralf Baechle02b849f2013-02-08 18:13:30 +010084 " " __stringify(__irq_disable_hazard) " \n"
Jim Quinlane97c5b62012-09-06 11:36:56 -040085 " .set pop \n"
Ralf Baechle02b849f2013-02-08 18:13:30 +010086 : [flags] "=r" (flags)
87 : /* no inputs */
88 : "memory");
Jim Quinlane97c5b62012-09-06 11:36:56 -040089
Jim Quinlane97c5b62012-09-06 11:36:56 -040090 preempt_enable();
Ralf Baechle02b849f2013-02-08 18:13:30 +010091
Jim Quinlane97c5b62012-09-06 11:36:56 -040092 return flags;
93}
94EXPORT_SYMBOL(arch_local_irq_save);
95
Al Cooperf93a1a02012-11-15 18:16:14 -050096notrace void arch_local_irq_restore(unsigned long flags)
Jim Quinlane97c5b62012-09-06 11:36:56 -040097{
98 unsigned long __tmp1;
99
Jim Quinlane97c5b62012-09-06 11:36:56 -0400100 preempt_disable();
Ralf Baechle02b849f2013-02-08 18:13:30 +0100101
Jim Quinlane97c5b62012-09-06 11:36:56 -0400102 __asm__ __volatile__(
Ralf Baechle02b849f2013-02-08 18:13:30 +0100103 " .set push \n"
104 " .set noreorder \n"
105 " .set noat \n"
Ralf Baechleb6336482014-05-23 16:29:44 +0200106#if defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
Ralf Baechle02b849f2013-02-08 18:13:30 +0100107 /* see irqflags.h for inline function */
108#elif defined(CONFIG_CPU_MIPSR2)
109 /* see irqflags.h for inline function */
110#else
111 " mfc0 $1, $12 \n"
112 " andi %[flags], 1 \n"
113 " ori $1, 0x1f \n"
114 " xori $1, 0x1f \n"
115 " or %[flags], $1 \n"
116 " mtc0 %[flags], $12 \n"
117#endif
118 " " __stringify(__irq_disable_hazard) " \n"
119 " .set pop \n"
120 : [flags] "=r" (__tmp1)
121 : "0" (flags)
122 : "memory");
123
Jim Quinlane97c5b62012-09-06 11:36:56 -0400124 preempt_enable();
125}
126EXPORT_SYMBOL(arch_local_irq_restore);
127
128
Al Cooperf93a1a02012-11-15 18:16:14 -0500129notrace void __arch_local_irq_restore(unsigned long flags)
Jim Quinlane97c5b62012-09-06 11:36:56 -0400130{
131 unsigned long __tmp1;
132
133 preempt_disable();
Ralf Baechle02b849f2013-02-08 18:13:30 +0100134
Jim Quinlane97c5b62012-09-06 11:36:56 -0400135 __asm__ __volatile__(
Ralf Baechle02b849f2013-02-08 18:13:30 +0100136 " .set push \n"
137 " .set noreorder \n"
138 " .set noat \n"
Ralf Baechleb6336482014-05-23 16:29:44 +0200139#if defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
Ralf Baechle02b849f2013-02-08 18:13:30 +0100140 /* see irqflags.h for inline function */
141#elif defined(CONFIG_CPU_MIPSR2)
142 /* see irqflags.h for inline function */
143#else
144 " mfc0 $1, $12 \n"
145 " andi %[flags], 1 \n"
146 " ori $1, 0x1f \n"
147 " xori $1, 0x1f \n"
148 " or %[flags], $1 \n"
149 " mtc0 %[flags], $12 \n"
150#endif
151 " " __stringify(__irq_disable_hazard) " \n"
152 " .set pop \n"
153 : [flags] "=r" (__tmp1)
154 : "0" (flags)
155 : "memory");
156
Jim Quinlane97c5b62012-09-06 11:36:56 -0400157 preempt_enable();
158}
159EXPORT_SYMBOL(__arch_local_irq_restore);
160
Ralf Baechleb6336482014-05-23 16:29:44 +0200161#endif /* !CONFIG_CPU_MIPSR2 */