blob: 3841d749a430069f4d4f2705c4199c08609b3757 [file] [log] [blame]
Paul Mackerras9994a332005-10-10 22:36:14 +10001/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
Paul Mackerras9994a332005-10-10 22:36:14 +100022#include <linux/errno.h>
Michael Ellermanc3525942015-07-23 20:21:01 +100023#include <linux/err.h>
Paul Mackerras9994a332005-10-10 22:36:14 +100024#include <linux/sys.h>
25#include <linux/threads.h>
26#include <asm/reg.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/cputable.h>
30#include <asm/thread_info.h>
31#include <asm/ppc_asm.h>
32#include <asm/asm-offsets.h>
33#include <asm/unistd.h>
Abhishek Sagar395a59d2008-06-21 23:47:27 +053034#include <asm/ftrace.h>
Stephen Rothwell46f52212010-11-18 15:06:17 +000035#include <asm/ptrace.h>
Al Viro9445aa12016-01-13 23:33:46 -050036#include <asm/export.h>
Paul Mackerras9994a332005-10-10 22:36:14 +100037
Paul Mackerras9994a332005-10-10 22:36:14 +100038/*
39 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
40 */
41#if MSR_KERNEL >= 0x10000
42#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
43#else
44#define LOAD_MSR_KERNEL(r, x) li r,(x)
45#endif
46
47#ifdef CONFIG_BOOKE
Paul Mackerras9994a332005-10-10 22:36:14 +100048 .globl mcheck_transfer_to_handler
49mcheck_transfer_to_handler:
Kumar Galafca622c2008-04-30 05:23:21 -050050 mfspr r0,SPRN_DSRR0
51 stw r0,_DSRR0(r11)
52 mfspr r0,SPRN_DSRR1
53 stw r0,_DSRR1(r11)
54 /* fall through */
Paul Mackerras9994a332005-10-10 22:36:14 +100055
56 .globl debug_transfer_to_handler
57debug_transfer_to_handler:
Kumar Galafca622c2008-04-30 05:23:21 -050058 mfspr r0,SPRN_CSRR0
59 stw r0,_CSRR0(r11)
60 mfspr r0,SPRN_CSRR1
61 stw r0,_CSRR1(r11)
62 /* fall through */
Paul Mackerras9994a332005-10-10 22:36:14 +100063
64 .globl crit_transfer_to_handler
65crit_transfer_to_handler:
Kumar Gala70fe3af2009-02-12 16:12:40 -060066#ifdef CONFIG_PPC_BOOK3E_MMU
Kumar Galafca622c2008-04-30 05:23:21 -050067 mfspr r0,SPRN_MAS0
68 stw r0,MAS0(r11)
69 mfspr r0,SPRN_MAS1
70 stw r0,MAS1(r11)
71 mfspr r0,SPRN_MAS2
72 stw r0,MAS2(r11)
73 mfspr r0,SPRN_MAS3
74 stw r0,MAS3(r11)
75 mfspr r0,SPRN_MAS6
76 stw r0,MAS6(r11)
77#ifdef CONFIG_PHYS_64BIT
78 mfspr r0,SPRN_MAS7
79 stw r0,MAS7(r11)
80#endif /* CONFIG_PHYS_64BIT */
Kumar Gala70fe3af2009-02-12 16:12:40 -060081#endif /* CONFIG_PPC_BOOK3E_MMU */
Kumar Galafca622c2008-04-30 05:23:21 -050082#ifdef CONFIG_44x
83 mfspr r0,SPRN_MMUCR
84 stw r0,MMUCR(r11)
85#endif
86 mfspr r0,SPRN_SRR0
87 stw r0,_SRR0(r11)
88 mfspr r0,SPRN_SRR1
89 stw r0,_SRR1(r11)
90
Stuart Yoder1f8b0bc2012-07-16 09:06:48 +000091 /* set the stack limit to the current stack
92 * and set the limit to protect the thread_info
93 * struct
94 */
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +000095 mfspr r8,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -050096 lwz r0,KSP_LIMIT(r8)
97 stw r0,SAVED_KSP_LIMIT(r11)
Stuart Yoder1f8b0bc2012-07-16 09:06:48 +000098 rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
Kumar Galafca622c2008-04-30 05:23:21 -050099 stw r0,KSP_LIMIT(r8)
Paul Mackerras9994a332005-10-10 22:36:14 +1000100 /* fall through */
101#endif
102
103#ifdef CONFIG_40x
104 .globl crit_transfer_to_handler
105crit_transfer_to_handler:
106 lwz r0,crit_r10@l(0)
107 stw r0,GPR10(r11)
108 lwz r0,crit_r11@l(0)
109 stw r0,GPR11(r11)
Kumar Galafca622c2008-04-30 05:23:21 -0500110 mfspr r0,SPRN_SRR0
111 stw r0,crit_srr0@l(0)
112 mfspr r0,SPRN_SRR1
113 stw r0,crit_srr1@l(0)
114
Stuart Yoder1f8b0bc2012-07-16 09:06:48 +0000115 /* set the stack limit to the current stack
116 * and set the limit to protect the thread_info
117 * struct
118 */
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +0000119 mfspr r8,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -0500120 lwz r0,KSP_LIMIT(r8)
121 stw r0,saved_ksp_limit@l(0)
Stuart Yoder1f8b0bc2012-07-16 09:06:48 +0000122 rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
Kumar Galafca622c2008-04-30 05:23:21 -0500123 stw r0,KSP_LIMIT(r8)
Paul Mackerras9994a332005-10-10 22:36:14 +1000124 /* fall through */
125#endif
126
127/*
128 * This code finishes saving the registers to the exception frame
129 * and jumps to the appropriate handler for the exception, turning
130 * on address translation.
131 * Note that we rely on the caller having set cr0.eq iff the exception
132 * occurred in kernel mode (i.e. MSR:PR = 0).
133 */
134 .globl transfer_to_handler_full
135transfer_to_handler_full:
136 SAVE_NVGPRS(r11)
137 /* fall through */
138
139 .globl transfer_to_handler
140transfer_to_handler:
141 stw r2,GPR2(r11)
142 stw r12,_NIP(r11)
143 stw r9,_MSR(r11)
144 andi. r2,r9,MSR_PR
145 mfctr r12
146 mfspr r2,SPRN_XER
147 stw r12,_CTR(r11)
148 stw r2,_XER(r11)
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +0000149 mfspr r12,SPRN_SPRG_THREAD
Paul Mackerras9994a332005-10-10 22:36:14 +1000150 addi r2,r12,-THREAD
151 tovirt(r2,r2) /* set r2 to current */
152 beq 2f /* if from user, fix up THREAD.regs */
153 addi r11,r1,STACK_FRAME_OVERHEAD
154 stw r11,PT_REGS(r12)
155#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
156 /* Check to see if the dbcr0 register is set up to debug. Use the
Kumar Gala4eaddb42008-04-09 16:15:40 -0500157 internal debug mode bit to do this. */
Paul Mackerras9994a332005-10-10 22:36:14 +1000158 lwz r12,THREAD_DBCR0(r12)
Kumar Gala2325f0a2008-07-26 05:27:33 +1000159 andis. r12,r12,DBCR0_IDM@h
Paul Mackerras9994a332005-10-10 22:36:14 +1000160 beq+ 3f
161 /* From user and task is ptraced - load up global dbcr0 */
162 li r12,-1 /* clear all pending debug events */
163 mtspr SPRN_DBSR,r12
164 lis r11,global_dbcr0@ha
165 tophys(r11,r11)
166 addi r11,r11,global_dbcr0@l
Kumar Gala4eaddb42008-04-09 16:15:40 -0500167#ifdef CONFIG_SMP
Stuart Yoder9778b692012-07-05 04:41:35 +0000168 CURRENT_THREAD_INFO(r9, r1)
Kumar Gala4eaddb42008-04-09 16:15:40 -0500169 lwz r9,TI_CPU(r9)
170 slwi r9,r9,3
171 add r11,r11,r9
172#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000173 lwz r12,0(r11)
174 mtspr SPRN_DBCR0,r12
175 lwz r12,4(r11)
176 addi r12,r12,-1
177 stw r12,4(r11)
178#endif
Christophe Leroyc223c902016-05-17 08:33:46 +0200179#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
180 CURRENT_THREAD_INFO(r9, r1)
181 tophys(r9, r9)
182 ACCOUNT_CPU_USER_ENTRY(r9, r11, r12)
183#endif
184
Paul Mackerras9994a332005-10-10 22:36:14 +1000185 b 3f
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000186
Paul Mackerras9994a332005-10-10 22:36:14 +10001872: /* if from kernel, check interrupted DOZE/NAP mode and
188 * check for stack overflow
189 */
Kumar Gala85218822008-04-28 16:21:22 +1000190 lwz r9,KSP_LIMIT(r12)
191 cmplw r1,r9 /* if r1 <= ksp_limit */
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000192 ble- stack_ovf /* then the kernel stack overflowed */
1935:
Kumar Galafc4033b2008-06-18 16:26:52 -0500194#if defined(CONFIG_6xx) || defined(CONFIG_E500)
Stuart Yoder9778b692012-07-05 04:41:35 +0000195 CURRENT_THREAD_INFO(r9, r1)
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000196 tophys(r9,r9) /* check local flags */
197 lwz r12,TI_LOCAL_FLAGS(r9)
198 mtcrf 0x01,r12
199 bt- 31-TLF_NAPPING,4f
Paul Mackerrasa5606432008-05-14 14:30:48 +1000200 bt- 31-TLF_SLEEPING,7f
Kumar Galafc4033b2008-06-18 16:26:52 -0500201#endif /* CONFIG_6xx || CONFIG_E500 */
Paul Mackerras9994a332005-10-10 22:36:14 +1000202 .globl transfer_to_handler_cont
203transfer_to_handler_cont:
Paul Mackerras9994a332005-10-10 22:36:14 +10002043:
205 mflr r9
206 lwz r11,0(r9) /* virtual address of handler */
207 lwz r9,4(r9) /* where to go when done */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000208#ifdef CONFIG_TRACE_IRQFLAGS
209 lis r12,reenable_mmu@h
210 ori r12,r12,reenable_mmu@l
211 mtspr SPRN_SRR0,r12
212 mtspr SPRN_SRR1,r10
213 SYNC
214 RFI
215reenable_mmu: /* re-enable mmu so we can */
216 mfmsr r10
217 lwz r12,_MSR(r1)
218 xor r10,r10,r12
219 andi. r10,r10,MSR_EE /* Did EE change? */
220 beq 1f
221
Kevin Hao2cd76622011-11-10 16:04:17 +0000222 /*
223 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
224 * If from user mode there is only one stack frame on the stack, and
225 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
226 * stack frame to make trace_hardirqs_off happy.
Benjamin Herrenschmidt08f1ec82012-04-10 17:21:35 +1000227 *
228 * This is handy because we also need to save a bunch of GPRs,
229 * r3 can be different from GPR3(r1) at this point, r9 and r11
230 * contains the old MSR and handler address respectively,
231 * r4 & r5 can contain page fault arguments that need to be passed
232 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
233 * they aren't useful past this point (aren't syscall arguments),
234 * the rest is restored from the exception frame.
Kevin Hao2cd76622011-11-10 16:04:17 +0000235 */
Benjamin Herrenschmidt08f1ec82012-04-10 17:21:35 +1000236 stwu r1,-32(r1)
237 stw r9,8(r1)
238 stw r11,12(r1)
239 stw r3,16(r1)
240 stw r4,20(r1)
241 stw r5,24(r1)
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000242 bl trace_hardirqs_off
Benjamin Herrenschmidt08f1ec82012-04-10 17:21:35 +1000243 lwz r5,24(r1)
244 lwz r4,20(r1)
245 lwz r3,16(r1)
246 lwz r11,12(r1)
247 lwz r9,8(r1)
248 addi r1,r1,32
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000249 lwz r0,GPR0(r1)
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000250 lwz r6,GPR6(r1)
251 lwz r7,GPR7(r1)
252 lwz r8,GPR8(r1)
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +00002531: mtctr r11
254 mtlr r9
255 bctr /* jump to handler */
256#else /* CONFIG_TRACE_IRQFLAGS */
Paul Mackerras9994a332005-10-10 22:36:14 +1000257 mtspr SPRN_SRR0,r11
258 mtspr SPRN_SRR1,r10
259 mtlr r9
260 SYNC
261 RFI /* jump to handler, enable MMU */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000262#endif /* CONFIG_TRACE_IRQFLAGS */
Paul Mackerras9994a332005-10-10 22:36:14 +1000263
Kumar Galafc4033b2008-06-18 16:26:52 -0500264#if defined (CONFIG_6xx) || defined(CONFIG_E500)
Paul Mackerrasf39224a2006-04-18 21:49:11 +10002654: rlwinm r12,r12,0,~_TLF_NAPPING
266 stw r12,TI_LOCAL_FLAGS(r9)
Kumar Galafc4033b2008-06-18 16:26:52 -0500267 b power_save_ppc32_restore
Paul Mackerrasa5606432008-05-14 14:30:48 +1000268
2697: rlwinm r12,r12,0,~_TLF_SLEEPING
270 stw r12,TI_LOCAL_FLAGS(r9)
271 lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
272 rlwinm r9,r9,0,~MSR_EE
273 lwz r12,_LINK(r11) /* and return to address in LR */
274 b fast_exception_return
Paul Mackerrasa0652fc2006-03-27 15:03:03 +1100275#endif
276
Paul Mackerras9994a332005-10-10 22:36:14 +1000277/*
278 * On kernel stack overflow, load up an initial stack pointer
279 * and call StackOverflow(regs), which should not return.
280 */
281stack_ovf:
282 /* sometimes we use a statically-allocated stack, which is OK. */
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000283 lis r12,_end@h
284 ori r12,r12,_end@l
285 cmplw r1,r12
286 ble 5b /* r1 <= &_end is OK */
Paul Mackerras9994a332005-10-10 22:36:14 +1000287 SAVE_NVGPRS(r11)
288 addi r3,r1,STACK_FRAME_OVERHEAD
289 lis r1,init_thread_union@ha
290 addi r1,r1,init_thread_union@l
291 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
292 lis r9,StackOverflow@ha
293 addi r9,r9,StackOverflow@l
294 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
295 FIX_SRR1(r10,r12)
296 mtspr SPRN_SRR0,r9
297 mtspr SPRN_SRR1,r10
298 SYNC
299 RFI
300
301/*
302 * Handle a system call.
303 */
304 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
305 .stabs "entry_32.S",N_SO,0,0,0f
3060:
307
308_GLOBAL(DoSyscall)
Paul Mackerras9994a332005-10-10 22:36:14 +1000309 stw r3,ORIG_GPR3(r1)
310 li r12,0
311 stw r12,RESULT(r1)
312 lwz r11,_CCR(r1) /* Clear SO bit in CR */
313 rlwinm r11,r11,0,4,2
314 stw r11,_CCR(r1)
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000315#ifdef CONFIG_TRACE_IRQFLAGS
316 /* Return from syscalls can (and generally will) hard enable
317 * interrupts. You aren't supposed to call a syscall with
318 * interrupts disabled in the first place. However, to ensure
319 * that we get it right vs. lockdep if it happens, we force
320 * that hard enable here with appropriate tracing if we see
321 * that we have been called with interrupts off
322 */
323 mfmsr r11
324 andi. r12,r11,MSR_EE
325 bne+ 1f
326 /* We came in with interrupts disabled, we enable them now */
327 bl trace_hardirqs_on
328 mfmsr r11
329 lwz r0,GPR0(r1)
330 lwz r3,GPR3(r1)
331 lwz r4,GPR4(r1)
332 ori r11,r11,MSR_EE
333 lwz r5,GPR5(r1)
334 lwz r6,GPR6(r1)
335 lwz r7,GPR7(r1)
336 lwz r8,GPR8(r1)
337 mtmsr r11
3381:
339#endif /* CONFIG_TRACE_IRQFLAGS */
Stuart Yoder9778b692012-07-05 04:41:35 +0000340 CURRENT_THREAD_INFO(r10, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000341 lwz r11,TI_FLAGS(r10)
Michael Ellerman10ea8342015-01-15 12:01:42 +1100342 andi. r11,r11,_TIF_SYSCALL_DOTRACE
Paul Mackerras9994a332005-10-10 22:36:14 +1000343 bne- syscall_dotrace
344syscall_dotrace_cont:
345 cmplwi 0,r0,NR_syscalls
346 lis r10,sys_call_table@h
347 ori r10,r10,sys_call_table@l
348 slwi r0,r0,2
349 bge- 66f
350 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
351 mtlr r10
352 addi r9,r1,STACK_FRAME_OVERHEAD
353 PPC440EP_ERR42
354 blrl /* Call handler */
355 .globl ret_from_syscall
356ret_from_syscall:
Paul Mackerras9994a332005-10-10 22:36:14 +1000357 mr r6,r3
Stuart Yoder9778b692012-07-05 04:41:35 +0000358 CURRENT_THREAD_INFO(r12, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000359 /* disable interrupts so current_thread_info()->flags can't change */
David Woodhouse401d1f02005-11-15 18:52:18 +0000360 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000361 /* Note: We don't bother telling lockdep about it */
Paul Mackerras9994a332005-10-10 22:36:14 +1000362 SYNC
363 MTMSRD(r10)
364 lwz r9,TI_FLAGS(r12)
Michael Ellermanc3525942015-07-23 20:21:01 +1000365 li r8,-MAX_ERRNO
Michael Ellerman10ea8342015-01-15 12:01:42 +1100366 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
Paul Mackerras9994a332005-10-10 22:36:14 +1000367 bne- syscall_exit_work
David Woodhouse401d1f02005-11-15 18:52:18 +0000368 cmplw 0,r3,r8
369 blt+ syscall_exit_cont
370 lwz r11,_CCR(r1) /* Load CR */
371 neg r3,r3
372 oris r11,r11,0x1000 /* Set SO bit in CR */
373 stw r11,_CCR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000374syscall_exit_cont:
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000375 lwz r8,_MSR(r1)
376#ifdef CONFIG_TRACE_IRQFLAGS
377 /* If we are going to return from the syscall with interrupts
378 * off, we trace that here. It shouldn't happen though but we
379 * want to catch the bugger if it does right ?
380 */
381 andi. r10,r8,MSR_EE
382 bne+ 1f
383 stw r3,GPR3(r1)
384 bl trace_hardirqs_off
385 lwz r3,GPR3(r1)
3861:
387#endif /* CONFIG_TRACE_IRQFLAGS */
Paul Mackerras9994a332005-10-10 22:36:14 +1000388#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
Kumar Gala4eaddb42008-04-09 16:15:40 -0500389 /* If the process has its own DBCR0 value, load it up. The internal
390 debug mode bit tells us that dbcr0 should be loaded. */
Paul Mackerras9994a332005-10-10 22:36:14 +1000391 lwz r0,THREAD+THREAD_DBCR0(r2)
Kumar Gala2325f0a2008-07-26 05:27:33 +1000392 andis. r10,r0,DBCR0_IDM@h
Paul Mackerras9994a332005-10-10 22:36:14 +1000393 bnel- load_dbcr0
394#endif
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100395#ifdef CONFIG_44x
Dave Kleikampe7f75ad2010-03-05 10:43:12 +0000396BEGIN_MMU_FTR_SECTION
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100397 lis r4,icache_44x_need_flush@ha
398 lwz r5,icache_44x_need_flush@l(r4)
399 cmplwi cr0,r5,0
400 bne- 2f
4011:
Dave Kleikampe7f75ad2010-03-05 10:43:12 +0000402END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100403#endif /* CONFIG_44x */
Becky Bruceb64f87c2007-11-10 09:17:49 +1100404BEGIN_FTR_SECTION
405 lwarx r7,0,r1
406END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
Paul Mackerras9994a332005-10-10 22:36:14 +1000407 stwcx. r0,0,r1 /* to clear the reservation */
Christophe Leroyc223c902016-05-17 08:33:46 +0200408#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
409 andi. r4,r8,MSR_PR
410 beq 3f
411 CURRENT_THREAD_INFO(r4, r1)
412 ACCOUNT_CPU_USER_EXIT(r4, r5, r7)
4133:
414#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000415 lwz r4,_LINK(r1)
416 lwz r5,_CCR(r1)
417 mtlr r4
418 mtcr r5
419 lwz r7,_NIP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000420 FIX_SRR1(r8, r0)
421 lwz r2,GPR2(r1)
422 lwz r1,GPR1(r1)
423 mtspr SPRN_SRR0,r7
424 mtspr SPRN_SRR1,r8
425 SYNC
426 RFI
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100427#ifdef CONFIG_44x
4282: li r7,0
429 iccci r0,r0
430 stw r7,icache_44x_need_flush@l(r4)
431 b 1b
432#endif /* CONFIG_44x */
Paul Mackerras9994a332005-10-10 22:36:14 +1000433
43466: li r3,-ENOSYS
435 b ret_from_syscall
436
437 .globl ret_from_fork
438ret_from_fork:
439 REST_NVGPRS(r1)
440 bl schedule_tail
441 li r3,0
442 b ret_from_syscall
443
Al Viro58254e12012-09-12 18:32:42 -0400444 .globl ret_from_kernel_thread
445ret_from_kernel_thread:
446 REST_NVGPRS(r1)
447 bl schedule_tail
448 mtlr r14
449 mr r3,r15
450 PPC440EP_ERR42
451 blrl
452 li r3,0
Al Virobe6abfa2012-08-31 15:48:05 -0400453 b ret_from_syscall
454
Paul Mackerras9994a332005-10-10 22:36:14 +1000455/* Traced system call support */
456syscall_dotrace:
457 SAVE_NVGPRS(r1)
458 li r0,0xc00
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000459 stw r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000460 addi r3,r1,STACK_FRAME_OVERHEAD
461 bl do_syscall_trace_enter
Roland McGrath4f72c422008-07-27 16:51:03 +1000462 /*
463 * Restore argument registers possibly just changed.
464 * We use the return value of do_syscall_trace_enter
465 * for call number to look up in the table (r0).
466 */
467 mr r0,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000468 lwz r3,GPR3(r1)
469 lwz r4,GPR4(r1)
470 lwz r5,GPR5(r1)
471 lwz r6,GPR6(r1)
472 lwz r7,GPR7(r1)
473 lwz r8,GPR8(r1)
474 REST_NVGPRS(r1)
Michael Ellermand3837412015-07-23 20:21:02 +1000475
476 cmplwi r0,NR_syscalls
477 /* Return code is already in r3 thanks to do_syscall_trace_enter() */
478 bge- ret_from_syscall
Paul Mackerras9994a332005-10-10 22:36:14 +1000479 b syscall_dotrace_cont
480
481syscall_exit_work:
David Woodhouse401d1f02005-11-15 18:52:18 +0000482 andi. r0,r9,_TIF_RESTOREALL
Paul Mackerras1bd79332006-03-08 13:24:22 +1100483 beq+ 0f
484 REST_NVGPRS(r1)
485 b 2f
4860: cmplw 0,r3,r8
David Woodhouse401d1f02005-11-15 18:52:18 +0000487 blt+ 1f
488 andi. r0,r9,_TIF_NOERROR
489 bne- 1f
490 lwz r11,_CCR(r1) /* Load CR */
491 neg r3,r3
492 oris r11,r11,0x1000 /* Set SO bit in CR */
493 stw r11,_CCR(r1)
494
4951: stw r6,RESULT(r1) /* Save result */
Paul Mackerras9994a332005-10-10 22:36:14 +1000496 stw r3,GPR3(r1) /* Update return value */
David Woodhouse401d1f02005-11-15 18:52:18 +00004972: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
498 beq 4f
499
Paul Mackerras1bd79332006-03-08 13:24:22 +1100500 /* Clear per-syscall TIF flags if any are set. */
David Woodhouse401d1f02005-11-15 18:52:18 +0000501
502 li r11,_TIF_PERSYSCALL_MASK
503 addi r12,r12,TI_FLAGS
5043: lwarx r8,0,r12
505 andc r8,r8,r11
506#ifdef CONFIG_IBM405_ERR77
507 dcbt 0,r12
508#endif
509 stwcx. r8,0,r12
510 bne- 3b
511 subi r12,r12,TI_FLAGS
512
5134: /* Anything which requires enabling interrupts? */
Michael Ellerman10ea8342015-01-15 12:01:42 +1100514 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
Paul Mackerras1bd79332006-03-08 13:24:22 +1100515 beq ret_from_except
516
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000517 /* Re-enable interrupts. There is no need to trace that with
518 * lockdep as we are supposed to have IRQs on at this point
519 */
Paul Mackerras1bd79332006-03-08 13:24:22 +1100520 ori r10,r10,MSR_EE
521 SYNC
522 MTMSRD(r10)
David Woodhouse401d1f02005-11-15 18:52:18 +0000523
524 /* Save NVGPRS if they're not saved already */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000525 lwz r4,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000526 andi. r4,r4,1
David Woodhouse401d1f02005-11-15 18:52:18 +0000527 beq 5f
Paul Mackerras9994a332005-10-10 22:36:14 +1000528 SAVE_NVGPRS(r1)
529 li r4,0xc00
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000530 stw r4,_TRAP(r1)
Paul Mackerras1bd79332006-03-08 13:24:22 +11005315:
Paul Mackerras9994a332005-10-10 22:36:14 +1000532 addi r3,r1,STACK_FRAME_OVERHEAD
533 bl do_syscall_trace_leave
Paul Mackerras1bd79332006-03-08 13:24:22 +1100534 b ret_from_except_full
David Woodhouse401d1f02005-11-15 18:52:18 +0000535
Paul Mackerras9994a332005-10-10 22:36:14 +1000536/*
David Woodhouse401d1f02005-11-15 18:52:18 +0000537 * The fork/clone functions need to copy the full register set into
538 * the child process. Therefore we need to save all the nonvolatile
539 * registers (r13 - r31) before calling the C code.
Paul Mackerras9994a332005-10-10 22:36:14 +1000540 */
Paul Mackerras9994a332005-10-10 22:36:14 +1000541 .globl ppc_fork
542ppc_fork:
543 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000544 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000545 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000546 stw r0,_TRAP(r1) /* register set saved */
Paul Mackerras9994a332005-10-10 22:36:14 +1000547 b sys_fork
548
549 .globl ppc_vfork
550ppc_vfork:
551 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000552 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000553 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000554 stw r0,_TRAP(r1) /* register set saved */
Paul Mackerras9994a332005-10-10 22:36:14 +1000555 b sys_vfork
556
557 .globl ppc_clone
558ppc_clone:
559 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000560 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000561 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000562 stw r0,_TRAP(r1) /* register set saved */
Paul Mackerras9994a332005-10-10 22:36:14 +1000563 b sys_clone
564
Paul Mackerras1bd79332006-03-08 13:24:22 +1100565 .globl ppc_swapcontext
566ppc_swapcontext:
567 SAVE_NVGPRS(r1)
568 lwz r0,_TRAP(r1)
569 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
570 stw r0,_TRAP(r1) /* register set saved */
571 b sys_swapcontext
572
Paul Mackerras9994a332005-10-10 22:36:14 +1000573/*
574 * Top-level page fault handling.
575 * This is in assembler because if do_page_fault tells us that
576 * it is a bad kernel page fault, we want to save the non-volatile
577 * registers before calling bad_page_fault.
578 */
579 .globl handle_page_fault
580handle_page_fault:
581 stw r4,_DAR(r1)
582 addi r3,r1,STACK_FRAME_OVERHEAD
583 bl do_page_fault
584 cmpwi r3,0
585 beq+ ret_from_except
586 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000587 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000588 clrrwi r0,r0,1
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000589 stw r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000590 mr r5,r3
591 addi r3,r1,STACK_FRAME_OVERHEAD
592 lwz r4,_DAR(r1)
593 bl bad_page_fault
594 b ret_from_except_full
595
596/*
597 * This routine switches between two different tasks. The process
598 * state of one is saved on its kernel stack. Then the state
599 * of the other is restored from its kernel stack. The memory
600 * management hardware is updated to the second process's state.
601 * Finally, we can return to the second process.
602 * On entry, r3 points to the THREAD for the current task, r4
603 * points to the THREAD for the new task.
604 *
605 * This routine is always called with interrupts disabled.
606 *
607 * Note: there are two ways to get to the "going out" portion
608 * of this code; either by coming in via the entry (_switch)
609 * or via "fork" which must set up an environment equivalent
610 * to the "_switch" path. If you change this , you'll have to
611 * change the fork code also.
612 *
613 * The code which creates the new task context is in 'copy_thread'
614 * in arch/ppc/kernel/process.c
615 */
616_GLOBAL(_switch)
617 stwu r1,-INT_FRAME_SIZE(r1)
618 mflr r0
619 stw r0,INT_FRAME_SIZE+4(r1)
620 /* r3-r12 are caller saved -- Cort */
621 SAVE_NVGPRS(r1)
622 stw r0,_NIP(r1) /* Return to switch caller */
623 mfmsr r11
624 li r0,MSR_FP /* Disable floating-point */
625#ifdef CONFIG_ALTIVEC
626BEGIN_FTR_SECTION
627 oris r0,r0,MSR_VEC@h /* Disable altivec */
628 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
629 stw r12,THREAD+THREAD_VRSAVE(r2)
630END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
631#endif /* CONFIG_ALTIVEC */
632#ifdef CONFIG_SPE
Kumar Gala5e14d212007-09-13 01:44:20 -0500633BEGIN_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000634 oris r0,r0,MSR_SPE@h /* Disable SPE */
635 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
636 stw r12,THREAD+THREAD_SPEFSCR(r2)
Kumar Gala5e14d212007-09-13 01:44:20 -0500637END_FTR_SECTION_IFSET(CPU_FTR_SPE)
Paul Mackerras9994a332005-10-10 22:36:14 +1000638#endif /* CONFIG_SPE */
639 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
640 beq+ 1f
641 andc r11,r11,r0
642 MTMSRD(r11)
643 isync
6441: stw r11,_MSR(r1)
645 mfcr r10
646 stw r10,_CCR(r1)
647 stw r1,KSP(r3) /* Set old stack pointer */
648
649#ifdef CONFIG_SMP
650 /* We need a sync somewhere here to make sure that if the
651 * previous task gets rescheduled on another CPU, it sees all
652 * stores it has performed on this one.
653 */
654 sync
655#endif /* CONFIG_SMP */
656
657 tophys(r0,r4)
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +0000658 mtspr SPRN_SPRG_THREAD,r0 /* Update current THREAD phys addr */
Paul Mackerras9994a332005-10-10 22:36:14 +1000659 lwz r1,KSP(r4) /* Load new stack pointer */
660
661 /* save the old current 'last' for return value */
662 mr r3,r2
663 addi r2,r4,-THREAD /* Update current */
664
665#ifdef CONFIG_ALTIVEC
666BEGIN_FTR_SECTION
667 lwz r0,THREAD+THREAD_VRSAVE(r2)
668 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
669END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
670#endif /* CONFIG_ALTIVEC */
671#ifdef CONFIG_SPE
Kumar Gala5e14d212007-09-13 01:44:20 -0500672BEGIN_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000673 lwz r0,THREAD+THREAD_SPEFSCR(r2)
674 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
Kumar Gala5e14d212007-09-13 01:44:20 -0500675END_FTR_SECTION_IFSET(CPU_FTR_SPE)
Paul Mackerras9994a332005-10-10 22:36:14 +1000676#endif /* CONFIG_SPE */
677
678 lwz r0,_CCR(r1)
679 mtcrf 0xFF,r0
680 /* r3-r12 are destroyed -- Cort */
681 REST_NVGPRS(r1)
682
683 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
684 mtlr r4
685 addi r1,r1,INT_FRAME_SIZE
686 blr
687
688 .globl fast_exception_return
689fast_exception_return:
690#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
691 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
692 beq 1f /* if not, we've got problems */
693#endif
694
6952: REST_4GPRS(3, r11)
696 lwz r10,_CCR(r11)
697 REST_GPR(1, r11)
698 mtcr r10
699 lwz r10,_LINK(r11)
700 mtlr r10
701 REST_GPR(10, r11)
702 mtspr SPRN_SRR1,r9
703 mtspr SPRN_SRR0,r12
704 REST_GPR(9, r11)
705 REST_GPR(12, r11)
706 lwz r11,GPR11(r11)
707 SYNC
708 RFI
709
710#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
711/* check if the exception happened in a restartable section */
7121: lis r3,exc_exit_restart_end@ha
713 addi r3,r3,exc_exit_restart_end@l
714 cmplw r12,r3
715 bge 3f
716 lis r4,exc_exit_restart@ha
717 addi r4,r4,exc_exit_restart@l
718 cmplw r12,r4
719 blt 3f
720 lis r3,fee_restarts@ha
721 tophys(r3,r3)
722 lwz r5,fee_restarts@l(r3)
723 addi r5,r5,1
724 stw r5,fee_restarts@l(r3)
725 mr r12,r4 /* restart at exc_exit_restart */
726 b 2b
727
Kumar Gala991eb432007-05-14 17:11:58 -0500728 .section .bss
729 .align 2
730fee_restarts:
731 .space 4
732 .previous
Paul Mackerras9994a332005-10-10 22:36:14 +1000733
734/* aargh, a nonrecoverable interrupt, panic */
735/* aargh, we don't know which trap this is */
736/* but the 601 doesn't implement the RI bit, so assume it's OK */
7373:
738BEGIN_FTR_SECTION
739 b 2b
740END_FTR_SECTION_IFSET(CPU_FTR_601)
741 li r10,-1
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000742 stw r10,_TRAP(r11)
Paul Mackerras9994a332005-10-10 22:36:14 +1000743 addi r3,r1,STACK_FRAME_OVERHEAD
744 lis r10,MSR_KERNEL@h
745 ori r10,r10,MSR_KERNEL@l
746 bl transfer_to_handler_full
747 .long nonrecoverable_exception
748 .long ret_from_except
749#endif
750
Paul Mackerras9994a332005-10-10 22:36:14 +1000751 .globl ret_from_except_full
752ret_from_except_full:
753 REST_NVGPRS(r1)
754 /* fall through */
755
756 .globl ret_from_except
757ret_from_except:
758 /* Hard-disable interrupts so that current_thread_info()->flags
759 * can't change between when we test it and when we return
760 * from the interrupt. */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000761 /* Note: We don't bother telling lockdep about it */
Paul Mackerras9994a332005-10-10 22:36:14 +1000762 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
763 SYNC /* Some chip revs have problems here... */
764 MTMSRD(r10) /* disable interrupts */
765
766 lwz r3,_MSR(r1) /* Returning to user mode? */
767 andi. r0,r3,MSR_PR
768 beq resume_kernel
769
770user_exc_return: /* r10 contains MSR_KERNEL here */
771 /* Check current_thread_info()->flags */
Stuart Yoder9778b692012-07-05 04:41:35 +0000772 CURRENT_THREAD_INFO(r9, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000773 lwz r9,TI_FLAGS(r9)
Roland McGrath7a101742008-04-28 17:30:37 +1000774 andi. r0,r9,_TIF_USER_WORK_MASK
Paul Mackerras9994a332005-10-10 22:36:14 +1000775 bne do_work
776
777restore_user:
778#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
Kumar Gala4eaddb42008-04-09 16:15:40 -0500779 /* Check whether this process has its own DBCR0 value. The internal
780 debug mode bit tells us that dbcr0 should be loaded. */
Paul Mackerras9994a332005-10-10 22:36:14 +1000781 lwz r0,THREAD+THREAD_DBCR0(r2)
Kumar Gala2325f0a2008-07-26 05:27:33 +1000782 andis. r10,r0,DBCR0_IDM@h
Paul Mackerras9994a332005-10-10 22:36:14 +1000783 bnel- load_dbcr0
784#endif
Christophe Leroyc223c902016-05-17 08:33:46 +0200785#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
786 CURRENT_THREAD_INFO(r9, r1)
787 ACCOUNT_CPU_USER_EXIT(r9, r10, r11)
788#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000789
Paul Mackerras9994a332005-10-10 22:36:14 +1000790 b restore
791
792/* N.B. the only way to get here is from the beq following ret_from_except. */
793resume_kernel:
Tiejun Chena9c4e542012-09-16 23:54:30 +0000794 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
Stuart Yoder9778b692012-07-05 04:41:35 +0000795 CURRENT_THREAD_INFO(r9, r1)
Tiejun Chena9c4e542012-09-16 23:54:30 +0000796 lwz r8,TI_FLAGS(r9)
Priyanka Jainf7b33672013-05-31 01:20:02 +0000797 andis. r0,r8,_TIF_EMULATE_STACK_STORE@h
Tiejun Chena9c4e542012-09-16 23:54:30 +0000798 beq+ 1f
799
800 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
801
802 lwz r3,GPR1(r1)
803 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
804 mr r4,r1 /* src: current exception frame */
805 mr r1,r3 /* Reroute the trampoline frame to r1 */
806
807 /* Copy from the original to the trampoline. */
808 li r5,INT_FRAME_SIZE/4 /* size: INT_FRAME_SIZE */
809 li r6,0 /* start offset: 0 */
810 mtctr r5
8112: lwzx r0,r6,r4
812 stwx r0,r6,r3
813 addi r6,r6,4
814 bdnz 2b
815
816 /* Do real store operation to complete stwu */
817 lwz r5,GPR1(r1)
818 stw r8,0(r5)
819
820 /* Clear _TIF_EMULATE_STACK_STORE flag */
821 lis r11,_TIF_EMULATE_STACK_STORE@h
822 addi r5,r9,TI_FLAGS
8230: lwarx r8,0,r5
824 andc r8,r8,r11
825#ifdef CONFIG_IBM405_ERR77
826 dcbt 0,r5
827#endif
828 stwcx. r8,0,r5
829 bne- 0b
8301:
831
832#ifdef CONFIG_PREEMPT
833 /* check current_thread_info->preempt_count */
Paul Mackerras9994a332005-10-10 22:36:14 +1000834 lwz r0,TI_PREEMPT(r9)
835 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
836 bne restore
Tiejun Chena9c4e542012-09-16 23:54:30 +0000837 andi. r8,r8,_TIF_NEED_RESCHED
Paul Mackerras9994a332005-10-10 22:36:14 +1000838 beq+ restore
Tiejun Chena9c4e542012-09-16 23:54:30 +0000839 lwz r3,_MSR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000840 andi. r0,r3,MSR_EE /* interrupts off? */
841 beq restore /* don't schedule if so */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000842#ifdef CONFIG_TRACE_IRQFLAGS
843 /* Lockdep thinks irqs are enabled, we need to call
844 * preempt_schedule_irq with IRQs off, so we inform lockdep
845 * now that we -did- turn them off already
846 */
847 bl trace_hardirqs_off
848#endif
Paul Mackerras9994a332005-10-10 22:36:14 +10008491: bl preempt_schedule_irq
Stuart Yoder9778b692012-07-05 04:41:35 +0000850 CURRENT_THREAD_INFO(r9, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000851 lwz r3,TI_FLAGS(r9)
852 andi. r0,r3,_TIF_NEED_RESCHED
853 bne- 1b
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000854#ifdef CONFIG_TRACE_IRQFLAGS
855 /* And now, to properly rebalance the above, we tell lockdep they
856 * are being turned back on, which will happen when we return
857 */
858 bl trace_hardirqs_on
859#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000860#endif /* CONFIG_PREEMPT */
861
862 /* interrupts are hard-disabled at this point */
863restore:
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100864#ifdef CONFIG_44x
Dave Kleikampe7f75ad2010-03-05 10:43:12 +0000865BEGIN_MMU_FTR_SECTION
866 b 1f
867END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
Benjamin Herrenschmidtb98ac052007-10-31 16:42:19 +1100868 lis r4,icache_44x_need_flush@ha
869 lwz r5,icache_44x_need_flush@l(r4)
870 cmplwi cr0,r5,0
871 beq+ 1f
872 li r6,0
873 iccci r0,r0
874 stw r6,icache_44x_need_flush@l(r4)
8751:
876#endif /* CONFIG_44x */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000877
878 lwz r9,_MSR(r1)
879#ifdef CONFIG_TRACE_IRQFLAGS
880 /* Lockdep doesn't know about the fact that IRQs are temporarily turned
881 * off in this assembly code while peeking at TI_FLAGS() and such. However
882 * we need to inform it if the exception turned interrupts off, and we
883 * are about to trun them back on.
884 *
885 * The problem here sadly is that we don't know whether the exceptions was
886 * one that turned interrupts off or not. So we always tell lockdep about
887 * turning them on here when we go back to wherever we came from with EE
888 * on, even if that may meen some redudant calls being tracked. Maybe later
889 * we could encode what the exception did somewhere or test the exception
890 * type in the pt_regs but that sounds overkill
891 */
892 andi. r10,r9,MSR_EE
893 beq 1f
Steven Rostedt06ca2182010-12-22 16:42:56 +0000894 /*
895 * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
896 * which is the stack frame here, we need to force a stack frame
897 * in case we came from user space.
898 */
899 stwu r1,-32(r1)
900 mflr r0
901 stw r0,4(r1)
902 stwu r1,-32(r1)
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000903 bl trace_hardirqs_on
Steven Rostedt06ca2182010-12-22 16:42:56 +0000904 lwz r1,0(r1)
905 lwz r1,0(r1)
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000906 lwz r9,_MSR(r1)
9071:
908#endif /* CONFIG_TRACE_IRQFLAGS */
909
Paul Mackerras9994a332005-10-10 22:36:14 +1000910 lwz r0,GPR0(r1)
911 lwz r2,GPR2(r1)
912 REST_4GPRS(3, r1)
913 REST_2GPRS(7, r1)
914
915 lwz r10,_XER(r1)
916 lwz r11,_CTR(r1)
917 mtspr SPRN_XER,r10
918 mtctr r11
919
920 PPC405_ERR77(0,r1)
Becky Bruceb64f87c2007-11-10 09:17:49 +1100921BEGIN_FTR_SECTION
922 lwarx r11,0,r1
923END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
Paul Mackerras9994a332005-10-10 22:36:14 +1000924 stwcx. r0,0,r1 /* to clear the reservation */
925
926#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
Paul Mackerras9994a332005-10-10 22:36:14 +1000927 andi. r10,r9,MSR_RI /* check if this exception occurred */
928 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
929
930 lwz r10,_CCR(r1)
931 lwz r11,_LINK(r1)
932 mtcrf 0xFF,r10
933 mtlr r11
934
935 /*
936 * Once we put values in SRR0 and SRR1, we are in a state
937 * where exceptions are not recoverable, since taking an
938 * exception will trash SRR0 and SRR1. Therefore we clear the
939 * MSR:RI bit to indicate this. If we do take an exception,
940 * we can't return to the point of the exception but we
941 * can restart the exception exit path at the label
942 * exc_exit_restart below. -- paulus
943 */
944 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
945 SYNC
946 MTMSRD(r10) /* clear the RI bit */
947 .globl exc_exit_restart
948exc_exit_restart:
Paul Mackerras9994a332005-10-10 22:36:14 +1000949 lwz r12,_NIP(r1)
950 FIX_SRR1(r9,r10)
951 mtspr SPRN_SRR0,r12
952 mtspr SPRN_SRR1,r9
953 REST_4GPRS(9, r1)
954 lwz r1,GPR1(r1)
955 .globl exc_exit_restart_end
956exc_exit_restart_end:
957 SYNC
958 RFI
959
960#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
961 /*
962 * This is a bit different on 4xx/Book-E because it doesn't have
963 * the RI bit in the MSR.
964 * The TLB miss handler checks if we have interrupted
965 * the exception exit path and restarts it if so
966 * (well maybe one day it will... :).
967 */
968 lwz r11,_LINK(r1)
969 mtlr r11
970 lwz r10,_CCR(r1)
971 mtcrf 0xff,r10
972 REST_2GPRS(9, r1)
973 .globl exc_exit_restart
974exc_exit_restart:
975 lwz r11,_NIP(r1)
976 lwz r12,_MSR(r1)
977exc_exit_start:
978 mtspr SPRN_SRR0,r11
979 mtspr SPRN_SRR1,r12
980 REST_2GPRS(11, r1)
981 lwz r1,GPR1(r1)
982 .globl exc_exit_restart_end
983exc_exit_restart_end:
984 PPC405_ERR77_SYNC
985 rfi
986 b . /* prevent prefetch past rfi */
987
988/*
989 * Returning from a critical interrupt in user mode doesn't need
990 * to be any different from a normal exception. For a critical
991 * interrupt in the kernel, we just return (without checking for
992 * preemption) since the interrupt may have happened at some crucial
993 * place (e.g. inside the TLB miss handler), and because we will be
994 * running with r1 pointing into critical_stack, not the current
995 * process's kernel stack (and therefore current_thread_info() will
996 * give the wrong answer).
997 * We have to restore various SPRs that may have been in use at the
998 * time of the critical interrupt.
999 *
1000 */
1001#ifdef CONFIG_40x
1002#define PPC_40x_TURN_OFF_MSR_DR \
1003 /* avoid any possible TLB misses here by turning off MSR.DR, we \
1004 * assume the instructions here are mapped by a pinned TLB entry */ \
1005 li r10,MSR_IR; \
1006 mtmsr r10; \
1007 isync; \
1008 tophys(r1, r1);
1009#else
1010#define PPC_40x_TURN_OFF_MSR_DR
1011#endif
1012
1013#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
1014 REST_NVGPRS(r1); \
1015 lwz r3,_MSR(r1); \
1016 andi. r3,r3,MSR_PR; \
1017 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
1018 bne user_exc_return; \
1019 lwz r0,GPR0(r1); \
1020 lwz r2,GPR2(r1); \
1021 REST_4GPRS(3, r1); \
1022 REST_2GPRS(7, r1); \
1023 lwz r10,_XER(r1); \
1024 lwz r11,_CTR(r1); \
1025 mtspr SPRN_XER,r10; \
1026 mtctr r11; \
1027 PPC405_ERR77(0,r1); \
1028 stwcx. r0,0,r1; /* to clear the reservation */ \
1029 lwz r11,_LINK(r1); \
1030 mtlr r11; \
1031 lwz r10,_CCR(r1); \
1032 mtcrf 0xff,r10; \
1033 PPC_40x_TURN_OFF_MSR_DR; \
1034 lwz r9,_DEAR(r1); \
1035 lwz r10,_ESR(r1); \
1036 mtspr SPRN_DEAR,r9; \
1037 mtspr SPRN_ESR,r10; \
1038 lwz r11,_NIP(r1); \
1039 lwz r12,_MSR(r1); \
1040 mtspr exc_lvl_srr0,r11; \
1041 mtspr exc_lvl_srr1,r12; \
1042 lwz r9,GPR9(r1); \
1043 lwz r12,GPR12(r1); \
1044 lwz r10,GPR10(r1); \
1045 lwz r11,GPR11(r1); \
1046 lwz r1,GPR1(r1); \
1047 PPC405_ERR77_SYNC; \
1048 exc_lvl_rfi; \
1049 b .; /* prevent prefetch past exc_lvl_rfi */
1050
Kumar Galafca622c2008-04-30 05:23:21 -05001051#define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \
1052 lwz r9,_##exc_lvl_srr0(r1); \
1053 lwz r10,_##exc_lvl_srr1(r1); \
1054 mtspr SPRN_##exc_lvl_srr0,r9; \
1055 mtspr SPRN_##exc_lvl_srr1,r10;
1056
Kumar Gala70fe3af2009-02-12 16:12:40 -06001057#if defined(CONFIG_PPC_BOOK3E_MMU)
Kumar Galafca622c2008-04-30 05:23:21 -05001058#ifdef CONFIG_PHYS_64BIT
1059#define RESTORE_MAS7 \
1060 lwz r11,MAS7(r1); \
1061 mtspr SPRN_MAS7,r11;
1062#else
1063#define RESTORE_MAS7
1064#endif /* CONFIG_PHYS_64BIT */
1065#define RESTORE_MMU_REGS \
1066 lwz r9,MAS0(r1); \
1067 lwz r10,MAS1(r1); \
1068 lwz r11,MAS2(r1); \
1069 mtspr SPRN_MAS0,r9; \
1070 lwz r9,MAS3(r1); \
1071 mtspr SPRN_MAS1,r10; \
1072 lwz r10,MAS6(r1); \
1073 mtspr SPRN_MAS2,r11; \
1074 mtspr SPRN_MAS3,r9; \
1075 mtspr SPRN_MAS6,r10; \
1076 RESTORE_MAS7;
1077#elif defined(CONFIG_44x)
1078#define RESTORE_MMU_REGS \
1079 lwz r9,MMUCR(r1); \
1080 mtspr SPRN_MMUCR,r9;
1081#else
1082#define RESTORE_MMU_REGS
1083#endif
1084
1085#ifdef CONFIG_40x
Paul Mackerras9994a332005-10-10 22:36:14 +10001086 .globl ret_from_crit_exc
1087ret_from_crit_exc:
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001088 mfspr r9,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -05001089 lis r10,saved_ksp_limit@ha;
1090 lwz r10,saved_ksp_limit@l(r10);
1091 tovirt(r9,r9);
1092 stw r10,KSP_LIMIT(r9)
1093 lis r9,crit_srr0@ha;
1094 lwz r9,crit_srr0@l(r9);
1095 lis r10,crit_srr1@ha;
1096 lwz r10,crit_srr1@l(r10);
1097 mtspr SPRN_SRR0,r9;
1098 mtspr SPRN_SRR1,r10;
Kumar Gala16c57b32009-02-10 20:10:44 +00001099 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
Kumar Galafca622c2008-04-30 05:23:21 -05001100#endif /* CONFIG_40x */
Paul Mackerras9994a332005-10-10 22:36:14 +10001101
1102#ifdef CONFIG_BOOKE
Kumar Galafca622c2008-04-30 05:23:21 -05001103 .globl ret_from_crit_exc
1104ret_from_crit_exc:
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001105 mfspr r9,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -05001106 lwz r10,SAVED_KSP_LIMIT(r1)
1107 stw r10,KSP_LIMIT(r9)
1108 RESTORE_xSRR(SRR0,SRR1);
1109 RESTORE_MMU_REGS;
Kumar Gala16c57b32009-02-10 20:10:44 +00001110 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
Kumar Galafca622c2008-04-30 05:23:21 -05001111
Paul Mackerras9994a332005-10-10 22:36:14 +10001112 .globl ret_from_debug_exc
1113ret_from_debug_exc:
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001114 mfspr r9,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -05001115 lwz r10,SAVED_KSP_LIMIT(r1)
1116 stw r10,KSP_LIMIT(r9)
1117 lwz r9,THREAD_INFO-THREAD(r9)
Stuart Yoder9778b692012-07-05 04:41:35 +00001118 CURRENT_THREAD_INFO(r10, r1)
Kumar Galafca622c2008-04-30 05:23:21 -05001119 lwz r10,TI_PREEMPT(r10)
1120 stw r10,TI_PREEMPT(r9)
1121 RESTORE_xSRR(SRR0,SRR1);
1122 RESTORE_xSRR(CSRR0,CSRR1);
1123 RESTORE_MMU_REGS;
Kumar Gala16c57b32009-02-10 20:10:44 +00001124 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
Paul Mackerras9994a332005-10-10 22:36:14 +10001125
1126 .globl ret_from_mcheck_exc
1127ret_from_mcheck_exc:
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001128 mfspr r9,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -05001129 lwz r10,SAVED_KSP_LIMIT(r1)
1130 stw r10,KSP_LIMIT(r9)
1131 RESTORE_xSRR(SRR0,SRR1);
1132 RESTORE_xSRR(CSRR0,CSRR1);
1133 RESTORE_xSRR(DSRR0,DSRR1);
1134 RESTORE_MMU_REGS;
Kumar Gala16c57b32009-02-10 20:10:44 +00001135 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
Paul Mackerras9994a332005-10-10 22:36:14 +10001136#endif /* CONFIG_BOOKE */
1137
1138/*
1139 * Load the DBCR0 value for a task that is being ptraced,
1140 * having first saved away the global DBCR0. Note that r0
1141 * has the dbcr0 value to set upon entry to this.
1142 */
1143load_dbcr0:
1144 mfmsr r10 /* first disable debug exceptions */
1145 rlwinm r10,r10,0,~MSR_DE
1146 mtmsr r10
1147 isync
1148 mfspr r10,SPRN_DBCR0
1149 lis r11,global_dbcr0@ha
1150 addi r11,r11,global_dbcr0@l
Kumar Gala4eaddb42008-04-09 16:15:40 -05001151#ifdef CONFIG_SMP
Stuart Yoder9778b692012-07-05 04:41:35 +00001152 CURRENT_THREAD_INFO(r9, r1)
Kumar Gala4eaddb42008-04-09 16:15:40 -05001153 lwz r9,TI_CPU(r9)
1154 slwi r9,r9,3
1155 add r11,r11,r9
1156#endif
Paul Mackerras9994a332005-10-10 22:36:14 +10001157 stw r10,0(r11)
1158 mtspr SPRN_DBCR0,r0
1159 lwz r10,4(r11)
1160 addi r10,r10,1
1161 stw r10,4(r11)
1162 li r11,-1
1163 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
1164 blr
1165
Kumar Gala991eb432007-05-14 17:11:58 -05001166 .section .bss
1167 .align 4
1168global_dbcr0:
Kumar Gala4eaddb42008-04-09 16:15:40 -05001169 .space 8*NR_CPUS
Kumar Gala991eb432007-05-14 17:11:58 -05001170 .previous
Paul Mackerras9994a332005-10-10 22:36:14 +10001171#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1172
1173do_work: /* r10 contains MSR_KERNEL here */
1174 andi. r0,r9,_TIF_NEED_RESCHED
1175 beq do_user_signal
1176
1177do_resched: /* r10 contains MSR_KERNEL here */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +00001178 /* Note: We don't need to inform lockdep that we are enabling
1179 * interrupts here. As far as it knows, they are already enabled
1180 */
Paul Mackerras9994a332005-10-10 22:36:14 +10001181 ori r10,r10,MSR_EE
1182 SYNC
1183 MTMSRD(r10) /* hard-enable interrupts */
1184 bl schedule
1185recheck:
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +00001186 /* Note: And we don't tell it we are disabling them again
1187 * neither. Those disable/enable cycles used to peek at
1188 * TI_FLAGS aren't advertised.
1189 */
Paul Mackerras9994a332005-10-10 22:36:14 +10001190 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1191 SYNC
1192 MTMSRD(r10) /* disable interrupts */
Stuart Yoder9778b692012-07-05 04:41:35 +00001193 CURRENT_THREAD_INFO(r9, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10001194 lwz r9,TI_FLAGS(r9)
1195 andi. r0,r9,_TIF_NEED_RESCHED
1196 bne- do_resched
Roland McGrath7a101742008-04-28 17:30:37 +10001197 andi. r0,r9,_TIF_USER_WORK_MASK
Paul Mackerras9994a332005-10-10 22:36:14 +10001198 beq restore_user
1199do_user_signal: /* r10 contains MSR_KERNEL here */
1200 ori r10,r10,MSR_EE
1201 SYNC
1202 MTMSRD(r10) /* hard-enable interrupts */
1203 /* save r13-r31 in the exception frame, if not already done */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +10001204 lwz r3,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10001205 andi. r0,r3,1
1206 beq 2f
1207 SAVE_NVGPRS(r1)
1208 rlwinm r3,r3,0,0,30
Paul Mackerrasd73e0c92005-10-28 22:45:25 +10001209 stw r3,_TRAP(r1)
Roland McGrath7d6d6372008-07-27 16:52:52 +100012102: addi r3,r1,STACK_FRAME_OVERHEAD
1211 mr r4,r9
Benjamin Herrenschmidt18b246f2012-02-22 16:48:32 +11001212 bl do_notify_resume
Paul Mackerras9994a332005-10-10 22:36:14 +10001213 REST_NVGPRS(r1)
1214 b recheck
1215
1216/*
1217 * We come here when we are at the end of handling an exception
1218 * that occurred at a place where taking an exception will lose
1219 * state information, such as the contents of SRR0 and SRR1.
1220 */
1221nonrecoverable:
1222 lis r10,exc_exit_restart_end@ha
1223 addi r10,r10,exc_exit_restart_end@l
1224 cmplw r12,r10
1225 bge 3f
1226 lis r11,exc_exit_restart@ha
1227 addi r11,r11,exc_exit_restart@l
1228 cmplw r12,r11
1229 blt 3f
1230 lis r10,ee_restarts@ha
1231 lwz r12,ee_restarts@l(r10)
1232 addi r12,r12,1
1233 stw r12,ee_restarts@l(r10)
1234 mr r12,r11 /* restart at exc_exit_restart */
1235 blr
12363: /* OK, we can't recover, kill this process */
1237 /* but the 601 doesn't implement the RI bit, so assume it's OK */
1238BEGIN_FTR_SECTION
1239 blr
1240END_FTR_SECTION_IFSET(CPU_FTR_601)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +10001241 lwz r3,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10001242 andi. r0,r3,1
1243 beq 4f
1244 SAVE_NVGPRS(r1)
1245 rlwinm r3,r3,0,0,30
Paul Mackerrasd73e0c92005-10-28 22:45:25 +10001246 stw r3,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +100012474: addi r3,r1,STACK_FRAME_OVERHEAD
1248 bl nonrecoverable_exception
1249 /* shouldn't return */
1250 b 4b
1251
Kumar Gala991eb432007-05-14 17:11:58 -05001252 .section .bss
1253 .align 2
1254ee_restarts:
1255 .space 4
1256 .previous
Paul Mackerras9994a332005-10-10 22:36:14 +10001257
1258/*
1259 * PROM code for specific machines follows. Put it
1260 * here so it's easy to add arch-specific sections later.
1261 * -- Cort
1262 */
Paul Mackerras033ef332005-10-26 17:05:24 +10001263#ifdef CONFIG_PPC_RTAS
Paul Mackerras9994a332005-10-10 22:36:14 +10001264/*
1265 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1266 * called with the MMU off.
1267 */
1268_GLOBAL(enter_rtas)
1269 stwu r1,-INT_FRAME_SIZE(r1)
1270 mflr r0
1271 stw r0,INT_FRAME_SIZE+4(r1)
David Gibsone58c3492006-01-13 14:56:25 +11001272 LOAD_REG_ADDR(r4, rtas)
Paul Mackerras9994a332005-10-10 22:36:14 +10001273 lis r6,1f@ha /* physical return address for rtas */
1274 addi r6,r6,1f@l
1275 tophys(r6,r6)
1276 tophys(r7,r1)
Paul Mackerras033ef332005-10-26 17:05:24 +10001277 lwz r8,RTASENTRY(r4)
1278 lwz r4,RTASBASE(r4)
Paul Mackerras9994a332005-10-10 22:36:14 +10001279 mfmsr r9
1280 stw r9,8(r1)
1281 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1282 SYNC /* disable interrupts so SRR0/1 */
1283 MTMSRD(r0) /* don't get trashed */
1284 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1285 mtlr r6
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001286 mtspr SPRN_SPRG_RTAS,r7
Paul Mackerras9994a332005-10-10 22:36:14 +10001287 mtspr SPRN_SRR0,r8
1288 mtspr SPRN_SRR1,r9
1289 RFI
12901: tophys(r9,r1)
1291 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
1292 lwz r9,8(r9) /* original msr value */
1293 FIX_SRR1(r9,r0)
1294 addi r1,r1,INT_FRAME_SIZE
1295 li r0,0
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001296 mtspr SPRN_SPRG_RTAS,r0
Paul Mackerras9994a332005-10-10 22:36:14 +10001297 mtspr SPRN_SRR0,r8
1298 mtspr SPRN_SRR1,r9
1299 RFI /* return to caller */
1300
1301 .globl machine_check_in_rtas
1302machine_check_in_rtas:
1303 twi 31,0,0
1304 /* XXX load up BATs and panic */
1305
Paul Mackerras033ef332005-10-26 17:05:24 +10001306#endif /* CONFIG_PPC_RTAS */
Steven Rostedt4e491d12008-05-14 23:49:44 -04001307
Steven Rostedt606576c2008-10-06 19:06:12 -04001308#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt4e491d12008-05-14 23:49:44 -04001309#ifdef CONFIG_DYNAMIC_FTRACE
1310_GLOBAL(mcount)
1311_GLOBAL(_mcount)
Steven Rostedtc7b0d1732008-11-20 13:18:55 -08001312 /*
1313 * It is required that _mcount on PPC32 must preserve the
1314 * link register. But we have r0 to play with. We use r0
1315 * to push the return address back to the caller of mcount
1316 * into the ctr register, restore the link register and
1317 * then jump back using the ctr register.
1318 */
1319 mflr r0
Steven Rostedt4e491d12008-05-14 23:49:44 -04001320 mtctr r0
Steven Rostedtc7b0d1732008-11-20 13:18:55 -08001321 lwz r0, 4(r1)
Steven Rostedt4e491d12008-05-14 23:49:44 -04001322 mtlr r0
Steven Rostedt4e491d12008-05-14 23:49:44 -04001323 bctr
1324
1325_GLOBAL(ftrace_caller)
Steven Rostedtbf528a32009-02-11 15:01:18 -05001326 MCOUNT_SAVE_FRAME
1327 /* r3 ends up with link register */
Abhishek Sagar395a59d2008-06-21 23:47:27 +05301328 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -04001329.globl ftrace_call
1330ftrace_call:
1331 bl ftrace_stub
1332 nop
Steven Rostedt60ce8f72009-02-11 20:06:43 -05001333#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1334.globl ftrace_graph_call
1335ftrace_graph_call:
1336 b ftrace_graph_stub
1337_GLOBAL(ftrace_graph_stub)
1338#endif
Steven Rostedtbf528a32009-02-11 15:01:18 -05001339 MCOUNT_RESTORE_FRAME
1340 /* old link register ends up in ctr reg */
Steven Rostedt4e491d12008-05-14 23:49:44 -04001341 bctr
1342#else
1343_GLOBAL(mcount)
1344_GLOBAL(_mcount)
Steven Rostedtbf528a32009-02-11 15:01:18 -05001345
1346 MCOUNT_SAVE_FRAME
Steven Rostedt4e491d12008-05-14 23:49:44 -04001347
Abhishek Sagar395a59d2008-06-21 23:47:27 +05301348 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -04001349 LOAD_REG_ADDR(r5, ftrace_trace_function)
Steven Rostedt4e491d12008-05-14 23:49:44 -04001350 lwz r5,0(r5)
Steven Rostedtccbfac22008-05-22 14:31:07 -04001351
Steven Rostedt4e491d12008-05-14 23:49:44 -04001352 mtctr r5
1353 bctrl
Steven Rostedt4e491d12008-05-14 23:49:44 -04001354 nop
1355
Steven Rostedtfad4f472009-02-11 19:10:57 -05001356#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1357 b ftrace_graph_caller
1358#endif
Steven Rostedtbf528a32009-02-11 15:01:18 -05001359 MCOUNT_RESTORE_FRAME
Steven Rostedt4e491d12008-05-14 23:49:44 -04001360 bctr
1361#endif
Al Viro9445aa12016-01-13 23:33:46 -05001362EXPORT_SYMBOL(_mcount)
Steven Rostedt4e491d12008-05-14 23:49:44 -04001363
1364_GLOBAL(ftrace_stub)
1365 blr
1366
Steven Rostedtfad4f472009-02-11 19:10:57 -05001367#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1368_GLOBAL(ftrace_graph_caller)
1369 /* load r4 with local address */
1370 lwz r4, 44(r1)
1371 subi r4, r4, MCOUNT_INSN_SIZE
1372
Anton Blanchardb3c18722014-09-17 17:07:04 +10001373 /* Grab the LR out of the caller stack frame */
1374 lwz r3,52(r1)
Steven Rostedtfad4f472009-02-11 19:10:57 -05001375
1376 bl prepare_ftrace_return
1377 nop
1378
Anton Blanchardb3c18722014-09-17 17:07:04 +10001379 /*
1380 * prepare_ftrace_return gives us the address we divert to.
1381 * Change the LR in the callers stack frame to this.
1382 */
1383 stw r3,52(r1)
1384
Steven Rostedtfad4f472009-02-11 19:10:57 -05001385 MCOUNT_RESTORE_FRAME
1386 /* old link register ends up in ctr reg */
1387 bctr
1388
1389_GLOBAL(return_to_handler)
1390 /* need to save return values */
1391 stwu r1, -32(r1)
1392 stw r3, 20(r1)
1393 stw r4, 16(r1)
1394 stw r31, 12(r1)
1395 mr r31, r1
1396
1397 bl ftrace_return_to_handler
1398 nop
1399
1400 /* return value has real return address */
1401 mtlr r3
1402
1403 lwz r3, 20(r1)
1404 lwz r4, 16(r1)
1405 lwz r31,12(r1)
1406 lwz r1, 0(r1)
1407
1408 /* Jump back to real return address */
1409 blr
1410#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1411
Jiri Slaby60878df2014-04-29 09:24:06 +02001412#endif /* CONFIG_FUNCTION_TRACER */