blob: c96c9f80521ec13dd974d0d96c7629a886d181db [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/ppc/boot/common/util.S
3 *
4 * Useful bootup functions, which are more easily done in asm than C.
5 *
6 * NOTE: Be very very careful about the registers you use here.
7 * We don't follow any ABI calling convention among the
8 * assembler functions that call each other, especially early
9 * in the initialization. Please preserve at least r3 and r4
10 * for these early functions, as they often contain information
11 * passed from boot roms into the C decompress function.
12 *
13 * Author: Tom Rini
14 * trini@mvista.com
15 * Derived from arch/ppc/boot/prep/head.S (Cort Dougan, many others).
16 *
17 * 2001-2004 (c) MontaVista, Software, Inc. This file is licensed under
18 * the terms of the GNU General Public License version 2. This program
19 * is licensed "as is" without any warranty of any kind, whether express
20 * or implied.
21 */
22
23#include <asm/processor.h>
24#include <asm/cache.h>
25#include <asm/ppc_asm.h>
26
27
28 .text
29
30#ifdef CONFIG_6xx
31 .globl disable_6xx_mmu
32disable_6xx_mmu:
33 /* Establish default MSR value, exception prefix 0xFFF.
34 * If necessary, this function must fix up the LR if we
35 * return to a different address space once the MMU is
36 * disabled.
37 */
38 li r8,MSR_IP|MSR_FP
39 mtmsr r8
40 isync
41
42 /* Test for a 601 */
43 mfpvr r10
44 srwi r10,r10,16
45 cmpwi 0,r10,1 /* 601 ? */
46 beq .clearbats_601
47
48 /* Clear BATs */
49 li r8,0
50 mtspr SPRN_DBAT0U,r8
51 mtspr SPRN_DBAT0L,r8
52 mtspr SPRN_DBAT1U,r8
53 mtspr SPRN_DBAT1L,r8
54 mtspr SPRN_DBAT2U,r8
55 mtspr SPRN_DBAT2L,r8
56 mtspr SPRN_DBAT3U,r8
57 mtspr SPRN_DBAT3L,r8
58.clearbats_601:
59 mtspr SPRN_IBAT0U,r8
60 mtspr SPRN_IBAT0L,r8
61 mtspr SPRN_IBAT1U,r8
62 mtspr SPRN_IBAT1L,r8
63 mtspr SPRN_IBAT2U,r8
64 mtspr SPRN_IBAT2L,r8
65 mtspr SPRN_IBAT3U,r8
66 mtspr SPRN_IBAT3L,r8
67 isync
68 sync
69 sync
70
71 /* Set segment registers */
72 li r8,16 /* load up segment register values */
73 mtctr r8 /* for context 0 */
74 lis r8,0x2000 /* Ku = 1, VSID = 0 */
75 li r10,0
763: mtsrin r8,r10
77 addi r8,r8,0x111 /* increment VSID */
78 addis r10,r10,0x1000 /* address of next segment */
79 bdnz 3b
80 blr
81
82 .globl disable_6xx_l1cache
83disable_6xx_l1cache:
84 /* Enable, invalidate and then disable the L1 icache/dcache. */
85 li r8,0
86 ori r8,r8,(HID0_ICE|HID0_DCE|HID0_ICFI|HID0_DCI)
87 mfspr r11,SPRN_HID0
88 or r11,r11,r8
89 andc r10,r11,r8
90 isync
91 mtspr SPRN_HID0,r8
92 sync
93 isync
94 mtspr SPRN_HID0,r10
95 sync
96 isync
97 blr
98#endif
99
100 .globl _setup_L2CR
101_setup_L2CR:
102/*
103 * We should be skipping this section on CPUs where this results in an
104 * illegal instruction. If not, please send trini@kernel.crashing.org
105 * the PVR of your CPU.
106 */
107 /* Invalidate/disable L2 cache */
108 sync
109 isync
110 mfspr r8,SPRN_L2CR
111 rlwinm r8,r8,0,1,31
112 oris r8,r8,L2CR_L2I@h
113 sync
114 isync
115 mtspr SPRN_L2CR,r8
116 sync
117 isync
118
119 /* Wait for the invalidation to complete */
120 mfspr r8,SPRN_PVR
121 srwi r8,r8,16
122 cmplwi cr0,r8,0x8000 /* 7450 */
123 cmplwi cr1,r8,0x8001 /* 7455 */
124 cmplwi cr2,r8,0x8002 /* 7457 */
125 cror 4*cr0+eq,4*cr0+eq,4*cr1+eq /* Now test if any are true. */
126 cror 4*cr0+eq,4*cr0+eq,4*cr2+eq
127 bne 2f
128
1291: mfspr r8,SPRN_L2CR /* On 745x, poll L2I bit (bit 10) */
130 rlwinm. r9,r8,0,10,10
131 bne 1b
132 b 3f
133
1342: mfspr r8,SPRN_L2CR /* On 75x & 74[01]0, poll L2IP bit (bit 31) */
135 rlwinm. r9,r8,0,31,31
136 bne 2b
137
1383: rlwinm r8,r8,0,11,9 /* Turn off L2I bit */
139 sync
140 isync
141 mtspr SPRN_L2CR,r8
142 sync
143 isync
144 blr
145
146 .globl _setup_L3CR
147_setup_L3CR:
148 /* Invalidate/disable L3 cache */
149 sync
150 isync
151 mfspr r8,SPRN_L3CR
152 rlwinm r8,r8,0,1,31
153 ori r8,r8,L3CR_L3I@l
154 sync
155 isync
156 mtspr SPRN_L3CR,r8
157 sync
158 isync
159
160 /* Wait for the invalidation to complete */
1611: mfspr r8,SPRN_L3CR
162 rlwinm. r9,r8,0,21,21
163 bne 1b
164
165 rlwinm r8,r8,0,22,20 /* Turn off L3I bit */
166 sync
167 isync
168 mtspr SPRN_L3CR,r8
169 sync
170 isync
171 blr
172
173
174/* udelay (on non-601 processors) needs to know the period of the
175 * timebase in nanoseconds. This used to be hardcoded to be 60ns
176 * (period of 66MHz/4). Now a variable is used that is initialized to
177 * 60 for backward compatibility, but it can be overridden as necessary
178 * with code something like this:
179 * extern unsigned long timebase_period_ns;
180 * timebase_period_ns = 1000000000 / bd->bi_tbfreq;
181 */
182 .data
183 .globl timebase_period_ns
184timebase_period_ns:
185 .long 60
186
187 .text
188/*
189 * Delay for a number of microseconds
190 */
191 .globl udelay
192udelay:
193 mfspr r4,SPRN_PVR
194 srwi r4,r4,16
195 cmpwi 0,r4,1 /* 601 ? */
196 bne .udelay_not_601
19700: li r0,86 /* Instructions / microsecond? */
198 mtctr r0
19910: addi r0,r0,0 /* NOP */
200 bdnz 10b
201 subic. r3,r3,1
202 bne 00b
203 blr
204
205.udelay_not_601:
206 mulli r4,r3,1000 /* nanoseconds */
207 /* Change r4 to be the number of ticks using:
208 * (nanoseconds + (timebase_period_ns - 1 )) / timebase_period_ns
209 * timebase_period_ns defaults to 60 (16.6MHz) */
210 lis r5,timebase_period_ns@ha
211 lwz r5,timebase_period_ns@l(r5)
212 add r4,r4,r5
213 addi r4,r4,-1
214 divw r4,r4,r5 /* BUS ticks */
2151: mftbu r5
216 mftb r6
217 mftbu r7
218 cmpw 0,r5,r7
219 bne 1b /* Get [synced] base time */
220 addc r9,r6,r4 /* Compute end time */
221 addze r8,r5
2222: mftbu r5
223 cmpw 0,r5,r8
224 blt 2b
225 bgt 3f
226 mftb r6
227 cmpw 0,r6,r9
228 blt 2b
2293: blr
230
231 .section ".relocate_code","xa"
232/*
233 * Flush and enable instruction cache
234 * First, flush the data cache in case it was enabled and may be
235 * holding instructions for copy back.
236 */
237_GLOBAL(flush_instruction_cache)
238 mflr r6
239 bl flush_data_cache
240
241#ifdef CONFIG_8xx
242 lis r3, IDC_INVALL@h
243 mtspr SPRN_IC_CST, r3
244 lis r3, IDC_ENABLE@h
245 mtspr SPRN_IC_CST, r3
246 lis r3, IDC_DISABLE@h
247 mtspr SPRN_DC_CST, r3
248#elif CONFIG_4xx
249 lis r3,start@h # r9 = &_start
250 lis r4,_etext@ha
251 addi r4,r4,_etext@l # r8 = &_etext
2521: dcbf r0,r3 # Flush the data cache
253 icbi r0,r3 # Invalidate the instruction cache
254 addi r3,r3,0x10 # Increment by one cache line
Frank van Maarseveen99cc2192005-09-09 13:01:46 -0700255 cmplw cr0,r3,r4 # Are we at the end yet?
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 blt 1b # No, keep flushing and invalidating
257#else
258 /* Enable, invalidate and then disable the L1 icache/dcache. */
259 li r3,0
260 ori r3,r3,(HID0_ICE|HID0_DCE|HID0_ICFI|HID0_DCI)
261 mfspr r4,SPRN_HID0
262 or r5,r4,r3
263 isync
264 mtspr SPRN_HID0,r5
265 sync
266 isync
267 ori r5,r4,HID0_ICE /* Enable cache */
268 mtspr SPRN_HID0,r5
269 sync
270 isync
271#endif
272 mtlr r6
273 blr
274
275#define NUM_CACHE_LINES 128*8
276#define cache_flush_buffer 0x1000
277
278/*
279 * Flush data cache
280 * Do this by just reading lots of stuff into the cache.
281 */
282_GLOBAL(flush_data_cache)
283 lis r3,cache_flush_buffer@h
284 ori r3,r3,cache_flush_buffer@l
285 li r4,NUM_CACHE_LINES
286 mtctr r4
28700: lwz r4,0(r3)
288 addi r3,r3,L1_CACHE_BYTES /* Next line, please */
289 bdnz 00b
29010: blr
291
292 .previous
293