blob: a60825b91e7724fc0a7e6e54f17bd1059995a7fb [file] [log] [blame]
Akira Takeuchi9731d232010-10-27 17:28:45 +01001/* MN10300 CPU cache invalidation routines, using automatic purge registers
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#include <linux/sys.h>
12#include <linux/linkage.h>
13#include <asm/smp.h>
14#include <asm/page.h>
15#include <asm/cache.h>
16#include <asm/irqflags.h>
17#include <asm/cacheflush.h>
David Howellsb75bb232011-03-18 16:54:29 +000018#include "cache.inc"
Akira Takeuchi9731d232010-10-27 17:28:45 +010019
20#define mn10300_local_dcache_inv_range_intr_interval \
21 +((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
22
23#if mn10300_local_dcache_inv_range_intr_interval > 0xff
24#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less
25#endif
26
27 .am33_2
28
29#ifndef CONFIG_SMP
30 .globl mn10300_icache_inv
31 .globl mn10300_icache_inv_page
32 .globl mn10300_icache_inv_range
33 .globl mn10300_icache_inv_range2
34 .globl mn10300_dcache_inv
35 .globl mn10300_dcache_inv_page
36 .globl mn10300_dcache_inv_range
37 .globl mn10300_dcache_inv_range2
38
39mn10300_icache_inv = mn10300_local_icache_inv
40mn10300_icache_inv_page = mn10300_local_icache_inv_page
41mn10300_icache_inv_range = mn10300_local_icache_inv_range
42mn10300_icache_inv_range2 = mn10300_local_icache_inv_range2
43mn10300_dcache_inv = mn10300_local_dcache_inv
44mn10300_dcache_inv_page = mn10300_local_dcache_inv_page
45mn10300_dcache_inv_range = mn10300_local_dcache_inv_range
46mn10300_dcache_inv_range2 = mn10300_local_dcache_inv_range2
47
48#endif /* !CONFIG_SMP */
49
50###############################################################################
51#
52# void mn10300_local_icache_inv(void)
53# Invalidate the entire icache
54#
55###############################################################################
56 ALIGN
57 .globl mn10300_local_icache_inv
58 .type mn10300_local_icache_inv,@function
59mn10300_local_icache_inv:
60 mov CHCTR,a0
61
62 movhu (a0),d0
63 btst CHCTR_ICEN,d0
64 beq mn10300_local_icache_inv_end
65
David Howellsb75bb232011-03-18 16:54:29 +000066 invalidate_icache 1
Akira Takeuchi9731d232010-10-27 17:28:45 +010067
68mn10300_local_icache_inv_end:
69 ret [],0
70 .size mn10300_local_icache_inv,.-mn10300_local_icache_inv
71
72###############################################################################
73#
74# void mn10300_local_dcache_inv(void)
75# Invalidate the entire dcache
76#
77###############################################################################
78 ALIGN
79 .globl mn10300_local_dcache_inv
80 .type mn10300_local_dcache_inv,@function
81mn10300_local_dcache_inv:
82 mov CHCTR,a0
83
84 movhu (a0),d0
85 btst CHCTR_DCEN,d0
86 beq mn10300_local_dcache_inv_end
87
David Howellsb75bb232011-03-18 16:54:29 +000088 invalidate_dcache 1
89
Akira Takeuchi9731d232010-10-27 17:28:45 +010090mn10300_local_dcache_inv_end:
91 ret [],0
92 .size mn10300_local_dcache_inv,.-mn10300_local_dcache_inv
93
94###############################################################################
95#
96# void mn10300_local_dcache_inv_range(unsigned long start, unsigned long end)
97# void mn10300_local_dcache_inv_range2(unsigned long start, unsigned long size)
98# void mn10300_local_dcache_inv_page(unsigned long start)
99# Invalidate a range of addresses on a page in the dcache
100#
101###############################################################################
102 ALIGN
103 .globl mn10300_local_dcache_inv_page
104 .globl mn10300_local_dcache_inv_range
105 .globl mn10300_local_dcache_inv_range2
106 .type mn10300_local_dcache_inv_page,@function
107 .type mn10300_local_dcache_inv_range,@function
108 .type mn10300_local_dcache_inv_range2,@function
109mn10300_local_dcache_inv_page:
110 and ~(PAGE_SIZE-1),d0
111 mov PAGE_SIZE,d1
112mn10300_local_dcache_inv_range2:
113 add d0,d1
114mn10300_local_dcache_inv_range:
115 # If we are in writeback mode we check the start and end alignments,
116 # and if they're not cacheline-aligned, we must flush any bits outside
117 # the range that share cachelines with stuff inside the range
118#ifdef CONFIG_MN10300_CACHE_WBACK
David Howells7f386ac2011-03-18 16:54:30 +0000119 btst ~L1_CACHE_TAG_MASK,d0
Akira Takeuchi9731d232010-10-27 17:28:45 +0100120 bne 1f
David Howells7f386ac2011-03-18 16:54:30 +0000121 btst ~L1_CACHE_TAG_MASK,d1
Akira Takeuchi9731d232010-10-27 17:28:45 +0100122 beq 2f
1231:
124 bra mn10300_local_dcache_flush_inv_range
1252:
126#endif /* CONFIG_MN10300_CACHE_WBACK */
127
128 movm [d2,d3,a2],(sp)
129
130 mov CHCTR,a0
131 movhu (a0),d2
132 btst CHCTR_DCEN,d2
133 beq mn10300_local_dcache_inv_range_end
134
135 # round the addresses out to be full cachelines, unless we're in
136 # writeback mode, in which case we would be in flush and invalidate by
137 # now
138#ifndef CONFIG_MN10300_CACHE_WBACK
David Howells7f386ac2011-03-18 16:54:30 +0000139 and L1_CACHE_TAG_MASK,d0 # round start addr down
Akira Takeuchi9731d232010-10-27 17:28:45 +0100140
141 mov L1_CACHE_BYTES-1,d2
142 add d2,d1
David Howells7f386ac2011-03-18 16:54:30 +0000143 and L1_CACHE_TAG_MASK,d1 # round end addr up
Akira Takeuchi9731d232010-10-27 17:28:45 +0100144#endif /* !CONFIG_MN10300_CACHE_WBACK */
145
146 sub d0,d1,d2 # calculate the total size
147 mov d0,a2 # A2 = start address
148 mov d1,a1 # A1 = end address
149
150 LOCAL_CLI_SAVE(d3)
151
152 mov DCPGCR,a0 # make sure the purger isn't busy
153 setlb
154 mov (a0),d0
155 btst DCPGCR_DCPGBSY,d0
156 lne
157
158 # skip initial address alignment calculation if address is zero
159 mov d2,d1
160 cmp 0,a2
161 beq 1f
162
163dcivloop:
164 /* calculate alignsize
165 *
166 * alignsize = L1_CACHE_BYTES;
167 * while (! start & alignsize) {
168 * alignsize <<=1;
169 * }
170 * d1 = alignsize;
171 */
172 mov L1_CACHE_BYTES,d1
173 lsr 1,d1
174 setlb
175 add d1,d1
176 mov d1,d0
177 and a2,d0
178 leq
179
1801:
181 /* calculate invsize
182 *
183 * if (totalsize > alignsize) {
184 * invsize = alignsize;
185 * } else {
186 * invsize = totalsize;
187 * tmp = 0x80000000;
188 * while (! invsize & tmp) {
189 * tmp >>= 1;
190 * }
191 * invsize = tmp;
192 * }
193 * d1 = invsize
194 */
195 cmp d2,d1
196 bns 2f
197 mov d2,d1
198
199 mov 0x80000000,d0 # start from 31bit=1
200 setlb
201 lsr 1,d0
202 mov d0,e0
203 and d1,e0
204 leq
205 mov d0,d1
206
2072:
208 /* set mask
209 *
210 * mask = ~(invsize-1);
211 * DCPGMR = mask;
212 */
213 mov d1,d0
214 add -1,d0
215 not d0
216 mov d0,(DCPGMR)
217
218 # invalidate area
219 mov a2,d0
220 or DCPGCR_DCI,d0
221 mov d0,(a0) # DCPGCR = (mask & start) | DCPGCR_DCI
222
223 setlb # wait for the purge to complete
224 mov (a0),d0
225 btst DCPGCR_DCPGBSY,d0
226 lne
227
228 sub d1,d2 # decrease size remaining
229 add d1,a2 # increase next start address
230
231 /* check invalidating of end address
232 *
233 * a2 = a2 + invsize
234 * if (a2 < end) {
235 * goto dcivloop;
236 * } */
237 cmp a1,a2
238 bns dcivloop
239
240 LOCAL_IRQ_RESTORE(d3)
241
242mn10300_local_dcache_inv_range_end:
243 ret [d2,d3,a2],12
244 .size mn10300_local_dcache_inv_page,.-mn10300_local_dcache_inv_page
245 .size mn10300_local_dcache_inv_range,.-mn10300_local_dcache_inv_range
246 .size mn10300_local_dcache_inv_range2,.-mn10300_local_dcache_inv_range2
247
248###############################################################################
249#
250# void mn10300_local_icache_inv_page(unsigned long start)
251# void mn10300_local_icache_inv_range2(unsigned long start, unsigned long size)
252# void mn10300_local_icache_inv_range(unsigned long start, unsigned long end)
253# Invalidate a range of addresses on a page in the icache
254#
255###############################################################################
256 ALIGN
257 .globl mn10300_local_icache_inv_page
258 .globl mn10300_local_icache_inv_range
259 .globl mn10300_local_icache_inv_range2
260 .type mn10300_local_icache_inv_page,@function
261 .type mn10300_local_icache_inv_range,@function
262 .type mn10300_local_icache_inv_range2,@function
263mn10300_local_icache_inv_page:
264 and ~(PAGE_SIZE-1),d0
265 mov PAGE_SIZE,d1
266mn10300_local_icache_inv_range2:
267 add d0,d1
268mn10300_local_icache_inv_range:
269 movm [d2,d3,a2],(sp)
270
271 mov CHCTR,a0
272 movhu (a0),d2
273 btst CHCTR_ICEN,d2
274 beq mn10300_local_icache_inv_range_reg_end
275
276 /* calculate alignsize
277 *
278 * alignsize = L1_CACHE_BYTES;
279 * for (i = (end - start - 1) / L1_CACHE_BYTES ; i > 0; i >>= 1) {
280 * alignsize <<= 1;
281 * }
282 * d2 = alignsize;
283 */
284 mov L1_CACHE_BYTES,d2
285 sub d0,d1,d3
286 add -1,d3
287 lsr L1_CACHE_SHIFT,d3
288 beq 2f
2891:
290 add d2,d2
291 lsr 1,d3
292 bne 1b
2932:
294
295 /* a1 = end */
296 mov d1,a1
297
298 LOCAL_CLI_SAVE(d3)
299
300 mov ICIVCR,a0
301 /* wait for busy bit of area invalidation */
302 setlb
303 mov (a0),d1
304 btst ICIVCR_ICIVBSY,d1
305 lne
306
307 /* set mask
308 *
309 * mask = ~(alignsize-1);
310 * ICIVMR = mask;
311 */
312 mov d2,d1
313 add -1,d1
314 not d1
315 mov d1,(ICIVMR)
316 /* a2 = mask & start */
317 and d1,d0,a2
318
319icivloop:
320 /* area invalidate
321 *
322 * ICIVCR = (mask & start) | ICIVCR_ICI
323 */
324 mov a2,d0
325 or ICIVCR_ICI,d0
326 mov d0,(a0)
327
328 /* wait for busy bit of area invalidation */
329 setlb
330 mov (a0),d1
331 btst ICIVCR_ICIVBSY,d1
332 lne
333
334 /* check invalidating of end address
335 *
336 * a2 = a2 + alignsize
337 * if (a2 < end) {
338 * goto icivloop;
339 * } */
340 add d2,a2
341 cmp a1,a2
342 bns icivloop
343
344 LOCAL_IRQ_RESTORE(d3)
345
346mn10300_local_icache_inv_range_reg_end:
347 ret [d2,d3,a2],12
348 .size mn10300_local_icache_inv_page,.-mn10300_local_icache_inv_page
349 .size mn10300_local_icache_inv_range,.-mn10300_local_icache_inv_range
350 .size mn10300_local_icache_inv_range2,.-mn10300_local_icache_inv_range2