blob: 60ab4c44d30557b285a90cc28e7b8ae1dd2ca21a [file] [log] [blame]
Ralf Baechle49f2ec92013-05-21 10:53:37 +02001/*
2 * MIPS idle loop and WAIT instruction support.
3 *
4 * Copyright (C) xxxx the Anonymous
5 * Copyright (C) 1994 - 2006 Ralf Baechle
6 * Copyright (C) 2003, 2004 Maciej W. Rozycki
7 * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14#include <linux/export.h>
15#include <linux/init.h>
16#include <linux/irqflags.h>
17#include <linux/printk.h>
18#include <linux/sched.h>
19#include <asm/cpu.h>
20#include <asm/cpu-info.h>
Ralf Baechle69f24d12013-09-17 10:25:47 +020021#include <asm/cpu-type.h>
Ralf Baechlebdc92d742013-05-21 16:59:19 +020022#include <asm/idle.h>
Ralf Baechle49f2ec92013-05-21 10:53:37 +020023#include <asm/mipsregs.h>
24
25/*
26 * Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
27 * the implementation of the "wait" feature differs between CPU families. This
28 * points to the function that implements CPU specific wait.
29 * The wait instruction stops the pipeline and reduces the power consumption of
30 * the CPU very much.
31 */
32void (*cpu_wait)(void);
33EXPORT_SYMBOL(cpu_wait);
34
35static void r3081_wait(void)
36{
37 unsigned long cfg = read_c0_conf();
38 write_c0_conf(cfg | R30XX_CONF_HALT);
Ralf Baechlefb40bc32013-05-21 14:05:27 +020039 local_irq_enable();
Ralf Baechle49f2ec92013-05-21 10:53:37 +020040}
41
42static void r39xx_wait(void)
43{
Ralf Baechle49f2ec92013-05-21 10:53:37 +020044 if (!need_resched())
45 write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
46 local_irq_enable();
47}
48
Ralf Baechle087d9902013-05-21 17:33:32 +020049void r4k_wait(void)
50{
51 local_irq_enable();
52 __r4k_wait();
53}
54
Ralf Baechle49f2ec92013-05-21 10:53:37 +020055/*
56 * This variant is preferable as it allows testing need_resched and going to
57 * sleep depending on the outcome atomically. Unfortunately the "It is
58 * implementation-dependent whether the pipeline restarts when a non-enabled
59 * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
60 * using this version a gamble.
61 */
62void r4k_wait_irqoff(void)
63{
Ralf Baechle49f2ec92013-05-21 10:53:37 +020064 if (!need_resched())
Ralf Baechlef91a1482013-05-21 12:58:08 +020065 __asm__(
66 " .set push \n"
Ralf Baechlea809d462014-03-30 13:20:10 +020067 " .set arch=r4000 \n"
Ralf Baechlef91a1482013-05-21 12:58:08 +020068 " wait \n"
69 " .set pop \n");
Ralf Baechle49f2ec92013-05-21 10:53:37 +020070 local_irq_enable();
Ralf Baechle49f2ec92013-05-21 10:53:37 +020071}
72
73/*
74 * The RM7000 variant has to handle erratum 38. The workaround is to not
75 * have any pending stores when the WAIT instruction is executed.
76 */
77static void rm7k_wait_irqoff(void)
78{
Ralf Baechle49f2ec92013-05-21 10:53:37 +020079 if (!need_resched())
80 __asm__(
81 " .set push \n"
Ralf Baechlea809d462014-03-30 13:20:10 +020082 " .set arch=r4000 \n"
Ralf Baechle49f2ec92013-05-21 10:53:37 +020083 " .set noat \n"
84 " mfc0 $1, $12 \n"
85 " sync \n"
86 " mtc0 $1, $12 # stalls until W stage \n"
87 " wait \n"
88 " mtc0 $1, $12 # stalls until W stage \n"
89 " .set pop \n");
90 local_irq_enable();
91}
92
93/*
Manuel Lausse63a24d2013-06-08 19:15:41 +000094 * Au1 'wait' is only useful when the 32kHz counter is used as timer,
95 * since coreclock (and the cp0 counter) stops upon executing it. Only an
96 * interrupt can wake it, so they must be enabled before entering idle modes.
Ralf Baechle49f2ec92013-05-21 10:53:37 +020097 */
98static void au1k_wait(void)
99{
Manuel Lausse63a24d2013-06-08 19:15:41 +0000100 unsigned long c0status = read_c0_status() | 1; /* irqs on */
101
Ralf Baechlef91a1482013-05-21 12:58:08 +0200102 __asm__(
Ralf Baechlea809d462014-03-30 13:20:10 +0200103 " .set arch=r4000 \n"
Ralf Baechlef91a1482013-05-21 12:58:08 +0200104 " cache 0x14, 0(%0) \n"
105 " cache 0x14, 32(%0) \n"
106 " sync \n"
Manuel Lausse63a24d2013-06-08 19:15:41 +0000107 " mtc0 %1, $12 \n" /* wr c0status */
Ralf Baechlef91a1482013-05-21 12:58:08 +0200108 " wait \n"
109 " nop \n"
110 " nop \n"
111 " nop \n"
112 " nop \n"
113 " .set mips0 \n"
Manuel Lausse63a24d2013-06-08 19:15:41 +0000114 : : "r" (au1k_wait), "r" (c0status));
Ralf Baechle49f2ec92013-05-21 10:53:37 +0200115}
116
117static int __initdata nowait;
118
119static int __init wait_disable(char *s)
120{
121 nowait = 1;
122
123 return 1;
124}
125
126__setup("nowait", wait_disable);
127
128void __init check_wait(void)
129{
130 struct cpuinfo_mips *c = &current_cpu_data;
131
132 if (nowait) {
133 printk("Wait instruction disabled.\n");
134 return;
135 }
136
Paul Burton5b10a0e2015-09-22 11:24:20 -0700137 /*
138 * MIPSr6 specifies that masked interrupts should unblock an executing
139 * wait instruction, and thus that it is safe for us to use
140 * r4k_wait_irqoff. Yippee!
141 */
142 if (cpu_has_mips_r6) {
143 cpu_wait = r4k_wait_irqoff;
144 return;
145 }
146
Ralf Baechle69f24d12013-09-17 10:25:47 +0200147 switch (current_cpu_type()) {
Ralf Baechle49f2ec92013-05-21 10:53:37 +0200148 case CPU_R3081:
149 case CPU_R3081E:
150 cpu_wait = r3081_wait;
151 break;
152 case CPU_TX3927:
153 cpu_wait = r39xx_wait;
154 break;
155 case CPU_R4200:
156/* case CPU_R4300: */
157 case CPU_R4600:
158 case CPU_R4640:
159 case CPU_R4650:
160 case CPU_R4700:
161 case CPU_R5000:
162 case CPU_R5500:
163 case CPU_NEVADA:
164 case CPU_4KC:
165 case CPU_4KEC:
166 case CPU_4KSC:
167 case CPU_5KC:
Aurelien Jarnobf463f22015-09-05 18:47:31 +0200168 case CPU_5KE:
Ralf Baechle49f2ec92013-05-21 10:53:37 +0200169 case CPU_25KF:
170 case CPU_PR4450:
171 case CPU_BMIPS3300:
172 case CPU_BMIPS4350:
173 case CPU_BMIPS4380:
Ralf Baechle49f2ec92013-05-21 10:53:37 +0200174 case CPU_CAVIUM_OCTEON:
175 case CPU_CAVIUM_OCTEON_PLUS:
176 case CPU_CAVIUM_OCTEON2:
David Daney4122af02013-07-29 15:07:02 -0700177 case CPU_CAVIUM_OCTEON3:
Ralf Baechle49f2ec92013-05-21 10:53:37 +0200178 case CPU_JZRISC:
179 case CPU_LOONGSON1:
180 case CPU_XLR:
181 case CPU_XLP:
182 cpu_wait = r4k_wait;
183 break;
Huacai Chenb2edcfc2016-03-03 09:45:09 +0800184 case CPU_LOONGSON3:
185 if ((c->processor_id & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2)
186 cpu_wait = r4k_wait;
187 break;
188
Petri Gyntheradaa0b62015-10-19 11:44:24 -0700189 case CPU_BMIPS5000:
190 cpu_wait = r4k_wait_irqoff;
191 break;
Ralf Baechle49f2ec92013-05-21 10:53:37 +0200192 case CPU_RM7000:
193 cpu_wait = rm7k_wait_irqoff;
194 break;
195
James Hogane38df282015-01-29 11:14:11 +0000196 case CPU_PROAPTIV:
197 case CPU_P5600:
198 /*
199 * Incoming Fast Debug Channel (FDC) data during a wait
200 * instruction causes the wait never to resume, even if an
201 * interrupt is received. Avoid using wait at all if FDC data is
202 * likely to be received.
203 */
204 if (IS_ENABLED(CONFIG_MIPS_EJTAG_FDC_TTY))
205 break;
206 /* fall through */
Ralf Baechle49f2ec92013-05-21 10:53:37 +0200207 case CPU_M14KC:
208 case CPU_M14KEC:
209 case CPU_24K:
210 case CPU_34K:
211 case CPU_1004K:
Steven J. Hill442e14a2014-01-17 15:03:50 -0600212 case CPU_1074K:
Leonid Yegoshin26ab96d2013-11-27 10:07:53 +0000213 case CPU_INTERAPTIV:
Leonid Yegoshinf36c4722014-03-04 13:34:43 +0000214 case CPU_M5150:
Leonid Yegoshin46950892014-11-24 12:59:01 +0000215 case CPU_QEMU_GENERIC:
Ralf Baechle49f2ec92013-05-21 10:53:37 +0200216 cpu_wait = r4k_wait;
217 if (read_c0_config7() & MIPS_CONF7_WII)
218 cpu_wait = r4k_wait_irqoff;
219 break;
220
221 case CPU_74K:
222 cpu_wait = r4k_wait;
223 if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0))
224 cpu_wait = r4k_wait_irqoff;
225 break;
226
227 case CPU_TX49XX:
228 cpu_wait = r4k_wait_irqoff;
229 break;
230 case CPU_ALCHEMY:
231 cpu_wait = au1k_wait;
232 break;
233 case CPU_20KC:
234 /*
235 * WAIT on Rev1.0 has E1, E2, E3 and E16.
236 * WAIT on Rev2.0 and Rev3.0 has E16.
237 * Rev3.1 WAIT is nop, why bother
238 */
239 if ((c->processor_id & 0xff) <= 0x64)
240 break;
241
242 /*
243 * Another rev is incremeting c0_count at a reduced clock
244 * rate while in WAIT mode. So we basically have the choice
245 * between using the cp0 timer as clocksource or avoiding
246 * the WAIT instruction. Until more details are known,
247 * disable the use of WAIT for 20Kc entirely.
248 cpu_wait = r4k_wait;
249 */
250 break;
Ralf Baechle49f2ec92013-05-21 10:53:37 +0200251 default:
252 break;
253 }
254}
255
Ralf Baechle00baf852013-05-21 12:47:26 +0200256void arch_cpu_idle(void)
257{
Ralf Baechle49f2ec92013-05-21 10:53:37 +0200258 if (cpu_wait)
Ralf Baechlec9b68692013-05-21 13:02:12 +0200259 cpu_wait();
Ralf Baechle49f2ec92013-05-21 10:53:37 +0200260 else
261 local_irq_enable();
262}
Paul Burtonda9f9702014-04-14 16:16:41 +0100263
264#ifdef CONFIG_CPU_IDLE
265
266int mips_cpuidle_wait_enter(struct cpuidle_device *dev,
267 struct cpuidle_driver *drv, int index)
268{
269 arch_cpu_idle();
270 return index;
271}
272
273#endif