blob: 8730c0a3c37d222630da2eba69525a084da59c70 [file] [log] [blame]
David Howellsdf9ee292010-10-07 14:08:55 +01001/* MN10300 IRQ flag handling
2 *
3 * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#ifndef _ASM_IRQFLAGS_H
13#define _ASM_IRQFLAGS_H
14
15#include <asm/cpu-regs.h>
David Daneyc0691142013-06-17 08:46:07 -070016/* linux/smp.h <- linux/irqflags.h needs asm/smp.h first */
17#include <asm/smp.h>
David Howellsdf9ee292010-10-07 14:08:55 +010018
19/*
20 * interrupt control
21 * - "disabled": run in IM1/2
David Howells67ddb402011-03-18 16:54:30 +000022 * - level 0 - kernel debugger
David Howellsdf9ee292010-10-07 14:08:55 +010023 * - level 1 - virtual serial DMA (if present)
24 * - level 5 - normal interrupt priority
25 * - level 6 - timer interrupt
26 * - "enabled": run in IM7
27 */
Akira Takeuchi37e4ec92010-10-27 17:28:54 +010028#define MN10300_CLI_LEVEL (CONFIG_LINUX_CLI_LEVEL << EPSW_IM_SHIFT)
David Howellsdf9ee292010-10-07 14:08:55 +010029
30#ifndef __ASSEMBLY__
31
32static inline unsigned long arch_local_save_flags(void)
33{
34 unsigned long flags;
35
36 asm volatile("mov epsw,%0" : "=d"(flags));
37 return flags;
38}
39
40static inline void arch_local_irq_disable(void)
41{
42 asm volatile(
43 " and %0,epsw \n"
44 " or %1,epsw \n"
45 " nop \n"
46 " nop \n"
47 " nop \n"
48 :
49 : "i"(~EPSW_IM), "i"(EPSW_IE | MN10300_CLI_LEVEL)
50 : "memory");
51}
52
53static inline unsigned long arch_local_irq_save(void)
54{
55 unsigned long flags;
56
57 flags = arch_local_save_flags();
58 arch_local_irq_disable();
59 return flags;
60}
61
62/*
63 * we make sure arch_irq_enable() doesn't cause priority inversion
64 */
Akira Takeuchi368dd5a2010-10-27 17:28:55 +010065extern unsigned long __mn10300_irq_enabled_epsw[];
David Howellsdf9ee292010-10-07 14:08:55 +010066
67static inline void arch_local_irq_enable(void)
68{
69 unsigned long tmp;
Akira Takeuchi368dd5a2010-10-27 17:28:55 +010070 int cpu = raw_smp_processor_id();
David Howellsdf9ee292010-10-07 14:08:55 +010071
72 asm volatile(
73 " mov epsw,%0 \n"
74 " and %1,%0 \n"
75 " or %2,%0 \n"
76 " mov %0,epsw \n"
77 : "=&d"(tmp)
Akira Takeuchi368dd5a2010-10-27 17:28:55 +010078 : "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw[cpu])
79 : "memory", "cc");
David Howellsdf9ee292010-10-07 14:08:55 +010080}
81
82static inline void arch_local_irq_restore(unsigned long flags)
83{
84 asm volatile(
85 " mov %0,epsw \n"
86 " nop \n"
87 " nop \n"
88 " nop \n"
89 :
90 : "d"(flags)
91 : "memory", "cc");
92}
93
94static inline bool arch_irqs_disabled_flags(unsigned long flags)
95{
Akira Takeuchi37e4ec92010-10-27 17:28:54 +010096 return (flags & (EPSW_IE | EPSW_IM)) != (EPSW_IE | EPSW_IM_7);
David Howellsdf9ee292010-10-07 14:08:55 +010097}
98
99static inline bool arch_irqs_disabled(void)
100{
101 return arch_irqs_disabled_flags(arch_local_save_flags());
102}
103
104/*
105 * Hook to save power by halting the CPU
106 * - called from the idle loop
107 * - must reenable interrupts (which takes three instruction cycles to complete)
108 */
109static inline void arch_safe_halt(void)
110{
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100111#ifdef CONFIG_SMP
112 arch_local_irq_enable();
113#else
David Howellsdf9ee292010-10-07 14:08:55 +0100114 asm volatile(
115 " or %0,epsw \n"
116 " nop \n"
117 " nop \n"
118 " bset %2,(%1) \n"
119 :
120 : "i"(EPSW_IE|EPSW_IM), "n"(&CPUM), "i"(CPUM_SLEEP)
121 : "cc");
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100122#endif
David Howellsdf9ee292010-10-07 14:08:55 +0100123}
124
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100125#define __sleep_cpu() \
126do { \
127 asm volatile( \
128 " bset %1,(%0)\n" \
129 "1: btst %1,(%0)\n" \
130 " bne 1b\n" \
131 : \
132 : "i"(&CPUM), "i"(CPUM_SLEEP) \
133 : "cc" \
134 ); \
135} while (0)
136
Akira Takeuchi9f200d32010-10-27 17:28:37 +0100137static inline void arch_local_cli(void)
138{
139 asm volatile(
140 " and %0,epsw \n"
141 " nop \n"
142 " nop \n"
143 " nop \n"
144 :
145 : "i"(~EPSW_IE)
146 : "memory"
147 );
148}
149
150static inline unsigned long arch_local_cli_save(void)
151{
152 unsigned long flags = arch_local_save_flags();
153 arch_local_cli();
154 return flags;
155}
156
157static inline void arch_local_sti(void)
158{
159 asm volatile(
160 " or %0,epsw \n"
161 :
162 : "i"(EPSW_IE)
163 : "memory");
164}
165
166static inline void arch_local_change_intr_mask_level(unsigned long level)
167{
168 asm volatile(
169 " and %0,epsw \n"
170 " or %1,epsw \n"
171 :
172 : "i"(~EPSW_IM), "i"(EPSW_IE | level)
173 : "cc", "memory");
174}
175
176#else /* !__ASSEMBLY__ */
177
178#define LOCAL_SAVE_FLAGS(reg) \
179 mov epsw,reg
180
181#define LOCAL_IRQ_DISABLE \
182 and ~EPSW_IM,epsw; \
183 or EPSW_IE|MN10300_CLI_LEVEL,epsw; \
184 nop; \
185 nop; \
186 nop
187
188#define LOCAL_IRQ_ENABLE \
189 or EPSW_IE|EPSW_IM_7,epsw
190
191#define LOCAL_IRQ_RESTORE(reg) \
192 mov reg,epsw
193
194#define LOCAL_CLI_SAVE(reg) \
195 mov epsw,reg; \
196 and ~EPSW_IE,epsw; \
197 nop; \
198 nop; \
199 nop
200
201#define LOCAL_CLI \
202 and ~EPSW_IE,epsw; \
203 nop; \
204 nop; \
205 nop
206
207#define LOCAL_STI \
208 or EPSW_IE,epsw
209
210#define LOCAL_CHANGE_INTR_MASK_LEVEL(level) \
211 and ~EPSW_IM,epsw; \
212 or EPSW_IE|(level),epsw
213
David Howellsdf9ee292010-10-07 14:08:55 +0100214#endif /* __ASSEMBLY__ */
215#endif /* _ASM_IRQFLAGS_H */