blob: 06f1f75bfa9bf0402ec4e802dfefef7a4de723af [file] [log] [blame]
Jayachandran C5c64250672011-05-07 01:36:40 +05301/*
2 * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
3 * reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the NetLogic
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
31 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
32 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#ifndef _ASM_NLM_MIPS_EXTS_H
36#define _ASM_NLM_MIPS_EXTS_H
37
38/*
39 * XLR and XLP interrupt request and interrupt mask registers
40 */
Jayachandran C5c64250672011-05-07 01:36:40 +053041/*
Jayachandran C33ff7122013-03-23 17:27:53 +000042 * NOTE: Do not save/restore flags around write_c0_eimr().
43 * On non-R2 platforms the flags has part of EIMR that is shadowed in STATUS
44 * register. Restoring flags will overwrite the lower 8 bits of EIMR.
45 *
46 * Call with interrupts disabled.
Jayachandran C5c64250672011-05-07 01:36:40 +053047 */
48#define write_c0_eimr(val) \
49do { \
Ralf Baechle70342282013-01-22 12:59:30 +010050 if (sizeof(unsigned long) == 4) { \
Jayachandran C5c64250672011-05-07 01:36:40 +053051 __asm__ __volatile__( \
52 ".set\tmips64\n\t" \
53 "dsll\t%L0, %L0, 32\n\t" \
54 "dsrl\t%L0, %L0, 32\n\t" \
55 "dsll\t%M0, %M0, 32\n\t" \
56 "or\t%L0, %L0, %M0\n\t" \
57 "dmtc0\t%L0, $9, 7\n\t" \
58 ".set\tmips0" \
59 : : "r" (val)); \
Jayachandran C5c64250672011-05-07 01:36:40 +053060 } else \
61 __write_64bit_c0_register($9, 7, (val)); \
62} while (0)
63
Jayachandran C220d9122013-01-14 15:11:54 +000064/*
65 * Handling the 64 bit EIMR and EIRR registers in 32-bit mode with
66 * standard functions will be very inefficient. This provides
67 * optimized functions for the normal operations on the registers.
68 *
69 * Call with interrupts disabled.
70 */
71static inline void ack_c0_eirr(int irq)
72{
73 __asm__ __volatile__(
74 ".set push\n\t"
75 ".set mips64\n\t"
76 ".set noat\n\t"
77 "li $1, 1\n\t"
78 "dsllv $1, $1, %0\n\t"
79 "dmtc0 $1, $9, 6\n\t"
80 ".set pop"
81 : : "r" (irq));
82}
83
84static inline void set_c0_eimr(int irq)
85{
86 __asm__ __volatile__(
87 ".set push\n\t"
88 ".set mips64\n\t"
89 ".set noat\n\t"
90 "li $1, 1\n\t"
91 "dsllv %0, $1, %0\n\t"
92 "dmfc0 $1, $9, 7\n\t"
93 "or $1, %0\n\t"
94 "dmtc0 $1, $9, 7\n\t"
95 ".set pop"
96 : "+r" (irq));
97}
98
99static inline void clear_c0_eimr(int irq)
100{
101 __asm__ __volatile__(
102 ".set push\n\t"
103 ".set mips64\n\t"
104 ".set noat\n\t"
105 "li $1, 1\n\t"
106 "dsllv %0, $1, %0\n\t"
107 "dmfc0 $1, $9, 7\n\t"
108 "or $1, %0\n\t"
109 "xor $1, %0\n\t"
110 "dmtc0 $1, $9, 7\n\t"
111 ".set pop"
112 : "+r" (irq));
113}
114
115/*
116 * Read c0 eimr and c0 eirr, do AND of the two values, the result is
117 * the interrupts which are raised and are not masked.
118 */
119static inline uint64_t read_c0_eirr_and_eimr(void)
120{
121 uint64_t val;
122
123#ifdef CONFIG_64BIT
Jayachandran Ce6904ff2013-03-23 17:27:54 +0000124 val = __read_64bit_c0_register($9, 6) & __read_64bit_c0_register($9, 7);
Jayachandran C220d9122013-01-14 15:11:54 +0000125#else
126 __asm__ __volatile__(
127 ".set push\n\t"
128 ".set mips64\n\t"
129 ".set noat\n\t"
130 "dmfc0 %M0, $9, 6\n\t"
131 "dmfc0 %L0, $9, 7\n\t"
132 "and %M0, %L0\n\t"
133 "dsll %L0, %M0, 32\n\t"
134 "dsra %M0, %M0, 32\n\t"
135 "dsra %L0, %L0, 32\n\t"
136 ".set pop"
137 : "=r" (val));
138#endif
Jayachandran C220d9122013-01-14 15:11:54 +0000139 return val;
140}
141
Jayachandran C5c64250672011-05-07 01:36:40 +0530142static inline int hard_smp_processor_id(void)
143{
144 return __read_32bit_c0_register($15, 1) & 0x3ff;
145}
146
Jayachandran C77ae7982012-10-31 12:01:39 +0000147static inline int nlm_nodeid(void)
148{
Jayachandran C5874743e2014-04-29 20:07:49 +0530149 uint32_t prid = read_c0_prid() & PRID_IMP_MASK;
Jayachandran C98d48842013-12-21 16:52:26 +0530150
Yonghong Song1c983982014-04-29 20:07:53 +0530151 if ((prid == PRID_IMP_NETLOGIC_XLP9XX) ||
152 (prid == PRID_IMP_NETLOGIC_XLP5XX))
Jayachandran C98d48842013-12-21 16:52:26 +0530153 return (__read_32bit_c0_register($15, 1) >> 7) & 0x7;
154 else
155 return (__read_32bit_c0_register($15, 1) >> 5) & 0x3;
Jayachandran C77ae7982012-10-31 12:01:39 +0000156}
157
Ganesan Ramalingamed21cfe2012-10-31 12:01:42 +0000158static inline unsigned int nlm_core_id(void)
159{
160 return (read_c0_ebase() & 0x1c) >> 2;
161}
162
163static inline unsigned int nlm_thread_id(void)
164{
165 return read_c0_ebase() & 0x3;
166}
167
168#define __read_64bit_c2_split(source, sel) \
169({ \
170 unsigned long long __val; \
171 unsigned long __flags; \
172 \
173 local_irq_save(__flags); \
174 if (sel == 0) \
175 __asm__ __volatile__( \
176 ".set\tmips64\n\t" \
177 "dmfc2\t%M0, " #source "\n\t" \
178 "dsll\t%L0, %M0, 32\n\t" \
179 "dsra\t%M0, %M0, 32\n\t" \
180 "dsra\t%L0, %L0, 32\n\t" \
181 ".set\tmips0\n\t" \
182 : "=r" (__val)); \
183 else \
184 __asm__ __volatile__( \
185 ".set\tmips64\n\t" \
186 "dmfc2\t%M0, " #source ", " #sel "\n\t" \
187 "dsll\t%L0, %M0, 32\n\t" \
188 "dsra\t%M0, %M0, 32\n\t" \
189 "dsra\t%L0, %L0, 32\n\t" \
190 ".set\tmips0\n\t" \
191 : "=r" (__val)); \
192 local_irq_restore(__flags); \
193 \
194 __val; \
195})
196
197#define __write_64bit_c2_split(source, sel, val) \
198do { \
199 unsigned long __flags; \
200 \
201 local_irq_save(__flags); \
202 if (sel == 0) \
203 __asm__ __volatile__( \
204 ".set\tmips64\n\t" \
205 "dsll\t%L0, %L0, 32\n\t" \
206 "dsrl\t%L0, %L0, 32\n\t" \
207 "dsll\t%M0, %M0, 32\n\t" \
208 "or\t%L0, %L0, %M0\n\t" \
209 "dmtc2\t%L0, " #source "\n\t" \
210 ".set\tmips0\n\t" \
211 : : "r" (val)); \
212 else \
213 __asm__ __volatile__( \
214 ".set\tmips64\n\t" \
215 "dsll\t%L0, %L0, 32\n\t" \
216 "dsrl\t%L0, %L0, 32\n\t" \
217 "dsll\t%M0, %M0, 32\n\t" \
218 "or\t%L0, %L0, %M0\n\t" \
219 "dmtc2\t%L0, " #source ", " #sel "\n\t" \
220 ".set\tmips0\n\t" \
221 : : "r" (val)); \
222 local_irq_restore(__flags); \
223} while (0)
224
225#define __read_32bit_c2_register(source, sel) \
226({ uint32_t __res; \
227 if (sel == 0) \
228 __asm__ __volatile__( \
229 ".set\tmips32\n\t" \
230 "mfc2\t%0, " #source "\n\t" \
231 ".set\tmips0\n\t" \
232 : "=r" (__res)); \
233 else \
234 __asm__ __volatile__( \
235 ".set\tmips32\n\t" \
236 "mfc2\t%0, " #source ", " #sel "\n\t" \
237 ".set\tmips0\n\t" \
238 : "=r" (__res)); \
239 __res; \
240})
241
242#define __read_64bit_c2_register(source, sel) \
243({ unsigned long long __res; \
244 if (sizeof(unsigned long) == 4) \
245 __res = __read_64bit_c2_split(source, sel); \
246 else if (sel == 0) \
247 __asm__ __volatile__( \
248 ".set\tmips64\n\t" \
249 "dmfc2\t%0, " #source "\n\t" \
250 ".set\tmips0\n\t" \
251 : "=r" (__res)); \
252 else \
253 __asm__ __volatile__( \
254 ".set\tmips64\n\t" \
255 "dmfc2\t%0, " #source ", " #sel "\n\t" \
256 ".set\tmips0\n\t" \
257 : "=r" (__res)); \
258 __res; \
259})
260
261#define __write_64bit_c2_register(register, sel, value) \
262do { \
263 if (sizeof(unsigned long) == 4) \
264 __write_64bit_c2_split(register, sel, value); \
265 else if (sel == 0) \
266 __asm__ __volatile__( \
267 ".set\tmips64\n\t" \
268 "dmtc2\t%z0, " #register "\n\t" \
269 ".set\tmips0\n\t" \
270 : : "Jr" (value)); \
271 else \
272 __asm__ __volatile__( \
273 ".set\tmips64\n\t" \
274 "dmtc2\t%z0, " #register ", " #sel "\n\t" \
275 ".set\tmips0\n\t" \
276 : : "Jr" (value)); \
277} while (0)
278
279#define __write_32bit_c2_register(reg, sel, value) \
280({ \
281 if (sel == 0) \
282 __asm__ __volatile__( \
283 ".set\tmips32\n\t" \
284 "mtc2\t%z0, " #reg "\n\t" \
285 ".set\tmips0\n\t" \
286 : : "Jr" (value)); \
287 else \
Ralf Baechle70342282013-01-22 12:59:30 +0100288 __asm__ __volatile__( \
Ganesan Ramalingamed21cfe2012-10-31 12:01:42 +0000289 ".set\tmips32\n\t" \
290 "mtc2\t%z0, " #reg ", " #sel "\n\t" \
291 ".set\tmips0\n\t" \
292 : : "Jr" (value)); \
293})
294
Jayachandran C5c64250672011-05-07 01:36:40 +0530295#endif /*_ASM_NLM_MIPS_EXTS_H */