blob: 38f0a75014eb0eba97e0cb9f7bdd3ca01ab3d8bb [file] [log] [blame]
Paul Mackerras9994a332005-10-10 22:36:14 +10001/*
Paul Mackerras9994a332005-10-10 22:36:14 +10002 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
Paul Mackerras9994a332005-10-10 22:36:14 +100021#include <linux/errno.h>
Michael Ellermanc3525942015-07-23 20:21:01 +100022#include <linux/err.h>
Michael Ellerman85baa092016-03-24 22:04:05 +110023#include <linux/magic.h>
Paul Mackerras9994a332005-10-10 22:36:14 +100024#include <asm/unistd.h>
25#include <asm/processor.h>
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/thread_info.h>
Michael Ellerman98f6ded2019-04-11 21:46:15 +100029#include <asm/code-patching-asm.h>
Paul Mackerras9994a332005-10-10 22:36:14 +100030#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h>
32#include <asm/cputable.h>
Stephen Rothwell3f639ee2006-09-25 18:19:00 +100033#include <asm/firmware.h>
David Woodhouse007d88d2007-01-01 18:45:34 +000034#include <asm/bug.h>
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +100035#include <asm/ptrace.h>
Benjamin Herrenschmidt945feb12008-04-17 14:35:01 +100036#include <asm/irqflags.h>
Abhishek Sagar395a59d2008-06-21 23:47:27 +053037#include <asm/ftrace.h>
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +110038#include <asm/hw_irq.h>
Li Zhong5d1c5742013-05-13 16:16:43 +000039#include <asm/context_tracking.h>
Sam bobroffb4b56f92015-06-12 11:06:32 +100040#include <asm/tm.h>
Chris Smart8a649042016-04-26 10:28:50 +100041#include <asm/ppc-opcode.h>
Michael Ellerman86dfa512019-04-11 21:46:03 +100042#include <asm/barrier.h>
Al Viro9445aa12016-01-13 23:33:46 -050043#include <asm/export.h>
Nicholas Piggine41b3b82018-01-10 03:07:15 +110044#ifdef CONFIG_PPC_BOOK3S
45#include <asm/exception-64s.h>
46#else
47#include <asm/exception-64e.h>
48#endif
Paul Mackerras9994a332005-10-10 22:36:14 +100049
50/*
51 * System calls.
52 */
53 .section ".toc","aw"
Anton Blanchardc857c432014-02-04 16:05:53 +110054SYS_CALL_TABLE:
55 .tc sys_call_table[TC],sys_call_table
Paul Mackerras9994a332005-10-10 22:36:14 +100056
57/* This value is used to mark exception frames on the stack. */
58exception_marker:
Benjamin Herrenschmidtec2b36b2008-04-17 14:34:59 +100059 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
Paul Mackerras9994a332005-10-10 22:36:14 +100060
61 .section ".text"
62 .align 7
63
Paul Mackerras9994a332005-10-10 22:36:14 +100064 .globl system_call_common
65system_call_common:
Sam bobroffb4b56f92015-06-12 11:06:32 +100066#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
67BEGIN_FTR_SECTION
68 extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
69 bne tabort_syscall
70END_FTR_SECTION_IFSET(CPU_FTR_TM)
71#endif
Paul Mackerras9994a332005-10-10 22:36:14 +100072 andi. r10,r12,MSR_PR
73 mr r10,r1
74 addi r1,r1,-INT_FRAME_SIZE
75 beq- 1f
76 ld r1,PACAKSAVE(r13)
771: std r10,0(r1)
78 std r11,_NIP(r1)
79 std r12,_MSR(r1)
80 std r0,GPR0(r1)
81 std r10,GPR1(r1)
Haren Myneni5d75b262012-12-06 21:46:37 +000082 beq 2f /* if from kernel mode */
Diana Craciundd8bf942019-04-11 21:46:23 +100083#ifdef CONFIG_PPC_FSL_BOOK3E
84START_BTB_FLUSH_SECTION
85 BTB_FLUSH(r10)
86END_BTB_FLUSH_SECTION
87#endif
Christophe Leroyc223c902016-05-17 08:33:46 +020088 ACCOUNT_CPU_USER_ENTRY(r13, r10, r11)
Haren Myneni5d75b262012-12-06 21:46:37 +0000892: std r2,GPR2(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +100090 std r3,GPR3(r1)
Anton Blanchardfd6c40f2012-04-05 03:44:48 +000091 mfcr r2
Paul Mackerras9994a332005-10-10 22:36:14 +100092 std r4,GPR4(r1)
93 std r5,GPR5(r1)
94 std r6,GPR6(r1)
95 std r7,GPR7(r1)
96 std r8,GPR8(r1)
97 li r11,0
98 std r11,GPR9(r1)
99 std r11,GPR10(r1)
100 std r11,GPR11(r1)
101 std r11,GPR12(r1)
Anton Blanchard823df432012-04-04 18:24:29 +0000102 std r11,_XER(r1)
Anton Blanchard82087412012-04-04 18:26:39 +0000103 std r11,_CTR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000104 std r9,GPR13(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000105 mflr r10
Anton Blanchardfd6c40f2012-04-05 03:44:48 +0000106 /*
107 * This clears CR0.SO (bit 28), which is the error indication on
108 * return from this system call.
109 */
110 rldimi r2,r11,28,(63-28)
Paul Mackerras9994a332005-10-10 22:36:14 +1000111 li r11,0xc01
Paul Mackerras9994a332005-10-10 22:36:14 +1000112 std r10,_LINK(r1)
113 std r11,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000114 std r3,ORIG_GPR3(r1)
Anton Blanchardfd6c40f2012-04-05 03:44:48 +0000115 std r2,_CCR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000116 ld r2,PACATOC(r13)
117 addi r9,r1,STACK_FRAME_OVERHEAD
118 ld r11,exception_marker@toc(r2)
119 std r11,-16(r9) /* "regshere" marker */
Frederic Weisbeckerabf917c2012-07-25 07:56:04 +0200120#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR)
Paul Mackerrascf9efce2010-08-26 19:56:43 +0000121BEGIN_FW_FTR_SECTION
122 beq 33f
123 /* if from user, see if there are any DTL entries to process */
124 ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */
125 ld r11,PACA_DTL_RIDX(r13) /* get log read index */
Anton Blanchard7ffcf8e2013-08-07 02:01:46 +1000126 addi r10,r10,LPPACA_DTLIDX
127 LDX_BE r10,0,r10 /* get log write index */
Paul Mackerrascf9efce2010-08-26 19:56:43 +0000128 cmpd cr1,r11,r10
129 beq+ cr1,33f
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100130 bl accumulate_stolen_time
Paul Mackerrascf9efce2010-08-26 19:56:43 +0000131 REST_GPR(0,r1)
132 REST_4GPRS(3,r1)
133 REST_2GPRS(7,r1)
134 addi r9,r1,STACK_FRAME_OVERHEAD
13533:
136END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
Frederic Weisbeckerabf917c2012-07-25 07:56:04 +0200137#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */
Paul Mackerrascf9efce2010-08-26 19:56:43 +0000138
Benjamin Herrenschmidt1421ae02012-03-01 15:40:23 +1100139 /*
140 * A syscall should always be called with interrupts enabled
141 * so we just unconditionally hard-enable here. When some kind
142 * of irq tracing is used, we additionally check that condition
143 * is correct
144 */
145#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
146 lbz r10,PACASOFTIRQEN(r13)
147 xori r10,r10,1
1481: tdnei r10,0
149 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
150#endif
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000151
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000152#ifdef CONFIG_PPC_BOOK3E
153 wrteei 1
154#else
Nicholas Piggin49d09bf2016-09-15 19:04:46 +1000155 li r11,MSR_RI
Paul Mackerras9994a332005-10-10 22:36:14 +1000156 ori r11,r11,MSR_EE
157 mtmsrd r11,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000158#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000159
Benjamin Herrenschmidt1421ae02012-03-01 15:40:23 +1100160 /* We do need to set SOFTE in the stack frame or the return
161 * from interrupt will be painful
162 */
163 li r10,1
164 std r10,SOFTE(r1)
165
Stuart Yoder9778b692012-07-05 04:41:35 +0000166 CURRENT_THREAD_INFO(r11, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000167 ld r10,TI_FLAGS(r11)
Michael Ellerman10ea8342015-01-15 12:01:42 +1100168 andi. r11,r10,_TIF_SYSCALL_DOTRACE
Michael Ellermand3837412015-07-23 20:21:02 +1000169 bne syscall_dotrace /* does not return */
Paul Mackerras9994a332005-10-10 22:36:14 +1000170 cmpldi 0,r0,NR_syscalls
171 bge- syscall_enosys
172
173system_call: /* label this so stack traces look sane */
174/*
175 * Need to vector to 32 Bit or default sys_call_table here,
176 * based on caller's run-mode / personality.
177 */
Anton Blanchardc857c432014-02-04 16:05:53 +1100178 ld r11,SYS_CALL_TABLE@toc(2)
Paul Mackerras9994a332005-10-10 22:36:14 +1000179 andi. r10,r10,_TIF_32BIT
180 beq 15f
181 addi r11,r11,8 /* use 32-bit syscall entries */
182 clrldi r3,r3,32
183 clrldi r4,r4,32
184 clrldi r5,r5,32
185 clrldi r6,r6,32
186 clrldi r7,r7,32
187 clrldi r8,r8,32
18815:
189 slwi r0,r0,4
Michael Ellerman86dfa512019-04-11 21:46:03 +1000190
191 barrier_nospec_asm
192 /*
193 * Prevent the load of the handler below (based on the user-passed
194 * system call number) being speculatively executed until the test
195 * against NR_syscalls and branch to .Lsyscall_enosys above has
196 * committed.
197 */
198
Anton Blanchardcc7efbf2014-02-04 16:07:47 +1100199 ldx r12,r11,r0 /* Fetch system call handler [ptr] */
200 mtctr r12
Paul Mackerras9994a332005-10-10 22:36:14 +1000201 bctrl /* Call handler */
202
Michael Ellerman4c3b2162014-12-05 21:16:59 +1100203.Lsyscall_exit:
Paul Mackerras9994a332005-10-10 22:36:14 +1000204 std r3,RESULT(r1)
Stuart Yoder9778b692012-07-05 04:41:35 +0000205 CURRENT_THREAD_INFO(r12, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000206
Paul Mackerras9994a332005-10-10 22:36:14 +1000207 ld r8,_MSR(r1)
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000208#ifdef CONFIG_PPC_BOOK3S
209 /* No MSR:RI on BookE */
Paul Mackerras9994a332005-10-10 22:36:14 +1000210 andi. r10,r8,MSR_RI
211 beq- unrecov_restore
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000212#endif
Benjamin Herrenschmidt1421ae02012-03-01 15:40:23 +1100213 /*
214 * Disable interrupts so current_thread_info()->flags can't change,
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000215 * and so that we don't get interrupted after loading SRR0/1.
216 */
217#ifdef CONFIG_PPC_BOOK3E
218 wrteei 0
219#else
Anton Blanchardac1dc362012-05-29 12:22:00 +0000220 /*
221 * For performance reasons we clear RI the same time that we
222 * clear EE. We only need to clear RI just before we restore r13
223 * below, but batching it with EE saves us one expensive mtmsrd call.
224 * We have to be careful to restore RI if we branch anywhere from
225 * here (eg syscall_exit_work).
226 */
Nicholas Piggin49d09bf2016-09-15 19:04:46 +1000227 li r11,0
Anton Blanchardac1dc362012-05-29 12:22:00 +0000228 mtmsrd r11,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000229#endif /* CONFIG_PPC_BOOK3E */
230
Paul Mackerras9994a332005-10-10 22:36:14 +1000231 ld r9,TI_FLAGS(r12)
Michael Ellermanc3525942015-07-23 20:21:01 +1000232 li r11,-MAX_ERRNO
Michael Ellerman10ea8342015-01-15 12:01:42 +1100233 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
Paul Mackerras9994a332005-10-10 22:36:14 +1000234 bne- syscall_exit_work
Cyril Bur70fe3d92016-02-29 17:53:47 +1100235
236 andi. r0,r8,MSR_FP
237 beq 2f
238#ifdef CONFIG_ALTIVEC
239 andis. r0,r8,MSR_VEC@h
240 bne 3f
241#endif
2422: addi r3,r1,STACK_FRAME_OVERHEAD
Cyril Bur6e669f02016-03-16 13:29:30 +1100243#ifdef CONFIG_PPC_BOOK3S
Nicholas Piggin49d09bf2016-09-15 19:04:46 +1000244 li r10,MSR_RI
Cyril Bur6e669f02016-03-16 13:29:30 +1100245 mtmsrd r10,1 /* Restore RI */
246#endif
Cyril Bur70fe3d92016-02-29 17:53:47 +1100247 bl restore_math
Cyril Bur6e669f02016-03-16 13:29:30 +1100248#ifdef CONFIG_PPC_BOOK3S
Nicholas Piggin49d09bf2016-09-15 19:04:46 +1000249 li r11,0
Cyril Bur6e669f02016-03-16 13:29:30 +1100250 mtmsrd r11,1
251#endif
Cyril Bur70fe3d92016-02-29 17:53:47 +1100252 ld r8,_MSR(r1)
253 ld r3,RESULT(r1)
254 li r11,-MAX_ERRNO
255
2563: cmpld r3,r11
David Woodhouse401d1f02005-11-15 18:52:18 +0000257 ld r5,_CCR(r1)
258 bge- syscall_error
Anton Blanchardd14299d2012-04-04 18:23:27 +0000259.Lsyscall_error_cont:
Paul Mackerras9994a332005-10-10 22:36:14 +1000260 ld r7,_NIP(r1)
Anton Blanchardf89451f2010-08-11 01:40:27 +0000261BEGIN_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000262 stdcx. r0,0,r1 /* to clear the reservation */
Anton Blanchardf89451f2010-08-11 01:40:27 +0000263END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
Paul Mackerras9994a332005-10-10 22:36:14 +1000264 andi. r6,r8,MSR_PR
265 ld r4,_LINK(r1)
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000266
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100267 beq- 1f
Christophe Leroyc223c902016-05-17 08:33:46 +0200268 ACCOUNT_CPU_USER_EXIT(r13, r11, r12)
Michael Ellermand030a4b2015-11-25 14:25:17 +1100269
270BEGIN_FTR_SECTION
271 HMT_MEDIUM_LOW
272END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
273
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100274 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
Nicholas Piggin00e40622018-01-10 03:07:15 +1100275 ld r2,GPR2(r1)
276 ld r1,GPR1(r1)
277 mtlr r4
278 mtcr r5
279 mtspr SPRN_SRR0,r7
280 mtspr SPRN_SRR1,r8
281 RFI_TO_USER
282 b . /* prevent speculative execution */
283
284 /* exit to kernel */
Paul Mackerras9994a332005-10-10 22:36:14 +10002851: ld r2,GPR2(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000286 ld r1,GPR1(r1)
287 mtlr r4
288 mtcr r5
289 mtspr SPRN_SRR0,r7
290 mtspr SPRN_SRR1,r8
Nicholas Piggin00e40622018-01-10 03:07:15 +1100291 RFI_TO_KERNEL
Paul Mackerras9994a332005-10-10 22:36:14 +1000292 b . /* prevent speculative execution */
293
David Woodhouse401d1f02005-11-15 18:52:18 +0000294syscall_error:
Paul Mackerras9994a332005-10-10 22:36:14 +1000295 oris r5,r5,0x1000 /* Set SO bit in CR */
David Woodhouse401d1f02005-11-15 18:52:18 +0000296 neg r3,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000297 std r5,_CCR(r1)
Anton Blanchardd14299d2012-04-04 18:23:27 +0000298 b .Lsyscall_error_cont
David Woodhouse401d1f02005-11-15 18:52:18 +0000299
Paul Mackerras9994a332005-10-10 22:36:14 +1000300/* Traced system call support */
301syscall_dotrace:
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100302 bl save_nvgprs
Paul Mackerras9994a332005-10-10 22:36:14 +1000303 addi r3,r1,STACK_FRAME_OVERHEAD
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100304 bl do_syscall_trace_enter
Michael Ellermand3837412015-07-23 20:21:02 +1000305
Roland McGrath4f72c422008-07-27 16:51:03 +1000306 /*
Michael Ellermand3837412015-07-23 20:21:02 +1000307 * We use the return value of do_syscall_trace_enter() as the syscall
308 * number. If the syscall was rejected for any reason do_syscall_trace_enter()
309 * returns an invalid syscall number and the test below against
310 * NR_syscalls will fail.
Roland McGrath4f72c422008-07-27 16:51:03 +1000311 */
312 mr r0,r3
Michael Ellermand3837412015-07-23 20:21:02 +1000313
314 /* Restore argument registers just clobbered and/or possibly changed. */
Paul Mackerras9994a332005-10-10 22:36:14 +1000315 ld r3,GPR3(r1)
316 ld r4,GPR4(r1)
317 ld r5,GPR5(r1)
318 ld r6,GPR6(r1)
319 ld r7,GPR7(r1)
320 ld r8,GPR8(r1)
Michael Ellermand3837412015-07-23 20:21:02 +1000321
322 /* Repopulate r9 and r10 for the system_call path */
Paul Mackerras9994a332005-10-10 22:36:14 +1000323 addi r9,r1,STACK_FRAME_OVERHEAD
Stuart Yoder9778b692012-07-05 04:41:35 +0000324 CURRENT_THREAD_INFO(r10, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000325 ld r10,TI_FLAGS(r10)
Michael Ellermand3837412015-07-23 20:21:02 +1000326
327 cmpldi r0,NR_syscalls
328 blt+ system_call
329
330 /* Return code is already in r3 thanks to do_syscall_trace_enter() */
331 b .Lsyscall_exit
332
Paul Mackerras9994a332005-10-10 22:36:14 +1000333
David Woodhouse401d1f02005-11-15 18:52:18 +0000334syscall_enosys:
335 li r3,-ENOSYS
Michael Ellerman4c3b2162014-12-05 21:16:59 +1100336 b .Lsyscall_exit
David Woodhouse401d1f02005-11-15 18:52:18 +0000337
338syscall_exit_work:
Anton Blanchardac1dc362012-05-29 12:22:00 +0000339#ifdef CONFIG_PPC_BOOK3S
Nicholas Piggin49d09bf2016-09-15 19:04:46 +1000340 li r10,MSR_RI
Anton Blanchardac1dc362012-05-29 12:22:00 +0000341 mtmsrd r10,1 /* Restore RI */
342#endif
David Woodhouse401d1f02005-11-15 18:52:18 +0000343 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
344 If TIF_NOERROR is set, just save r3 as it is. */
345
346 andi. r0,r9,_TIF_RESTOREALL
Paul Mackerras1bd79332006-03-08 13:24:22 +1100347 beq+ 0f
348 REST_NVGPRS(r1)
349 b 2f
Michael Ellermanc3525942015-07-23 20:21:01 +10003500: cmpld r3,r11 /* r11 is -MAX_ERRNO */
David Woodhouse401d1f02005-11-15 18:52:18 +0000351 blt+ 1f
352 andi. r0,r9,_TIF_NOERROR
353 bne- 1f
354 ld r5,_CCR(r1)
355 neg r3,r3
356 oris r5,r5,0x1000 /* Set SO bit in CR */
357 std r5,_CCR(r1)
3581: std r3,GPR3(r1)
3592: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
360 beq 4f
361
Paul Mackerras1bd79332006-03-08 13:24:22 +1100362 /* Clear per-syscall TIF flags if any are set. */
David Woodhouse401d1f02005-11-15 18:52:18 +0000363
364 li r11,_TIF_PERSYSCALL_MASK
365 addi r12,r12,TI_FLAGS
3663: ldarx r10,0,r12
367 andc r10,r10,r11
368 stdcx. r10,0,r12
369 bne- 3b
370 subi r12,r12,TI_FLAGS
Paul Mackerras1bd79332006-03-08 13:24:22 +1100371
3724: /* Anything else left to do? */
Michael Ellermand8725ce2015-11-25 14:25:18 +1100373BEGIN_FTR_SECTION
374 lis r3,INIT_PPR@highest /* Set thread.ppr = 3 */
375 ld r10,PACACURRENT(r13)
376 sldi r3,r3,32 /* bits 11-13 are used for ppr */
377 std r3,TASKTHREADPPR(r10)
378END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
379
Michael Ellerman10ea8342015-01-15 12:01:42 +1100380 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100381 beq ret_from_except_lite
David Woodhouse401d1f02005-11-15 18:52:18 +0000382
383 /* Re-enable interrupts */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000384#ifdef CONFIG_PPC_BOOK3E
385 wrteei 1
386#else
Nicholas Piggin49d09bf2016-09-15 19:04:46 +1000387 li r10,MSR_RI
David Woodhouse401d1f02005-11-15 18:52:18 +0000388 ori r10,r10,MSR_EE
389 mtmsrd r10,1
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000390#endif /* CONFIG_PPC_BOOK3E */
David Woodhouse401d1f02005-11-15 18:52:18 +0000391
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100392 bl save_nvgprs
Paul Mackerras9994a332005-10-10 22:36:14 +1000393 addi r3,r1,STACK_FRAME_OVERHEAD
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100394 bl do_syscall_trace_leave
395 b ret_from_except
Paul Mackerras9994a332005-10-10 22:36:14 +1000396
Sam bobroffb4b56f92015-06-12 11:06:32 +1000397#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
398tabort_syscall:
399 /* Firstly we need to enable TM in the kernel */
400 mfmsr r10
Nicholas Piggincc7786d2016-07-25 14:26:51 +1000401 li r9, 1
402 rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
Sam bobroffb4b56f92015-06-12 11:06:32 +1000403 mtmsrd r10, 0
404
405 /* tabort, this dooms the transaction, nothing else */
Nicholas Piggincc7786d2016-07-25 14:26:51 +1000406 li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
407 TABORT(R9)
Sam bobroffb4b56f92015-06-12 11:06:32 +1000408
409 /*
410 * Return directly to userspace. We have corrupted user register state,
411 * but userspace will never see that register state. Execution will
412 * resume after the tbegin of the aborted transaction with the
413 * checkpointed register state.
414 */
Nicholas Piggincc7786d2016-07-25 14:26:51 +1000415 li r9, MSR_RI
416 andc r10, r10, r9
Sam bobroffb4b56f92015-06-12 11:06:32 +1000417 mtmsrd r10, 1
418 mtspr SPRN_SRR0, r11
419 mtspr SPRN_SRR1, r12
Nicholas Pigginefe8bc02018-02-22 23:35:44 +1100420 RFI_TO_USER
Sam bobroffb4b56f92015-06-12 11:06:32 +1000421 b . /* prevent speculative execution */
422#endif
423
Paul Mackerras9994a332005-10-10 22:36:14 +1000424/* Save non-volatile GPRs, if not already saved. */
425_GLOBAL(save_nvgprs)
426 ld r11,_TRAP(r1)
427 andi. r0,r11,1
428 beqlr-
429 SAVE_NVGPRS(r1)
430 clrrdi r0,r11,1
431 std r0,_TRAP(r1)
432 blr
433
David Woodhouse401d1f02005-11-15 18:52:18 +0000434
Paul Mackerras9994a332005-10-10 22:36:14 +1000435/*
436 * The sigsuspend and rt_sigsuspend system calls can call do_signal
437 * and thus put the process into the stopped state where we might
438 * want to examine its user state with ptrace. Therefore we need
439 * to save all the nonvolatile registers (r14 - r31) before calling
440 * the C code. Similarly, fork, vfork and clone need the full
441 * register state on the stack so that it can be copied to the child.
442 */
Paul Mackerras9994a332005-10-10 22:36:14 +1000443
444_GLOBAL(ppc_fork)
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100445 bl save_nvgprs
446 bl sys_fork
Michael Ellerman4c3b2162014-12-05 21:16:59 +1100447 b .Lsyscall_exit
Paul Mackerras9994a332005-10-10 22:36:14 +1000448
449_GLOBAL(ppc_vfork)
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100450 bl save_nvgprs
451 bl sys_vfork
Michael Ellerman4c3b2162014-12-05 21:16:59 +1100452 b .Lsyscall_exit
Paul Mackerras9994a332005-10-10 22:36:14 +1000453
454_GLOBAL(ppc_clone)
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100455 bl save_nvgprs
456 bl sys_clone
Michael Ellerman4c3b2162014-12-05 21:16:59 +1100457 b .Lsyscall_exit
Paul Mackerras9994a332005-10-10 22:36:14 +1000458
Paul Mackerras1bd79332006-03-08 13:24:22 +1100459_GLOBAL(ppc32_swapcontext)
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100460 bl save_nvgprs
461 bl compat_sys_swapcontext
Michael Ellerman4c3b2162014-12-05 21:16:59 +1100462 b .Lsyscall_exit
Paul Mackerras1bd79332006-03-08 13:24:22 +1100463
464_GLOBAL(ppc64_swapcontext)
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100465 bl save_nvgprs
466 bl sys_swapcontext
Michael Ellerman4c3b2162014-12-05 21:16:59 +1100467 b .Lsyscall_exit
Paul Mackerras1bd79332006-03-08 13:24:22 +1100468
Michael Ellerman529d2352015-03-28 21:35:16 +1100469_GLOBAL(ppc_switch_endian)
470 bl save_nvgprs
471 bl sys_switch_endian
472 b .Lsyscall_exit
473
Paul Mackerras9994a332005-10-10 22:36:14 +1000474_GLOBAL(ret_from_fork)
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100475 bl schedule_tail
Paul Mackerras9994a332005-10-10 22:36:14 +1000476 REST_NVGPRS(r1)
477 li r3,0
Michael Ellerman4c3b2162014-12-05 21:16:59 +1100478 b .Lsyscall_exit
Paul Mackerras9994a332005-10-10 22:36:14 +1000479
Al Viro58254e12012-09-12 18:32:42 -0400480_GLOBAL(ret_from_kernel_thread)
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100481 bl schedule_tail
Al Viro58254e12012-09-12 18:32:42 -0400482 REST_NVGPRS(r1)
Al Viro58254e12012-09-12 18:32:42 -0400483 mtlr r14
484 mr r3,r15
Michael Ellermanf55d9662016-06-06 22:26:10 +0530485#ifdef PPC64_ELF_ABI_v2
Anton Blanchard7cedd602014-02-04 16:08:51 +1100486 mr r12,r14
487#endif
Al Viro58254e12012-09-12 18:32:42 -0400488 blrl
489 li r3,0
Michael Ellerman4c3b2162014-12-05 21:16:59 +1100490 b .Lsyscall_exit
Al Virobe6abfa2012-08-31 15:48:05 -0400491
Michael Ellerman98f6ded2019-04-11 21:46:15 +1000492#ifdef CONFIG_PPC_BOOK3S_64
493
494#define FLUSH_COUNT_CACHE \
4951: nop; \
496 patch_site 1b, patch__call_flush_count_cache
497
498
499#define BCCTR_FLUSH .long 0x4c400420
500
501.macro nops number
502 .rept \number
503 nop
504 .endr
505.endm
506
507.balign 32
508.global flush_count_cache
509flush_count_cache:
510 /* Save LR into r9 */
511 mflr r9
512
Michael Ellerman113408c2019-11-13 21:05:41 +1100513 // Flush the link stack
Michael Ellerman98f6ded2019-04-11 21:46:15 +1000514 .rept 64
515 bl .+4
516 .endr
517 b 1f
518 nops 6
519
520 .balign 32
521 /* Restore LR */
5221: mtlr r9
Michael Ellerman113408c2019-11-13 21:05:41 +1100523
524 // If we're just flushing the link stack, return here
5253: nop
526 patch_site 3b patch__flush_link_stack_return
527
Michael Ellerman98f6ded2019-04-11 21:46:15 +1000528 li r9,0x7fff
529 mtctr r9
530
531 BCCTR_FLUSH
532
5332: nop
534 patch_site 2b patch__flush_count_cache_return
535
536 nops 3
537
538 .rept 278
539 .balign 32
540 BCCTR_FLUSH
541 nops 7
542 .endr
543
544 blr
545#else
546#define FLUSH_COUNT_CACHE
547#endif /* CONFIG_PPC_BOOK3S_64 */
548
Paul Mackerras9994a332005-10-10 22:36:14 +1000549/*
550 * This routine switches between two different tasks. The process
551 * state of one is saved on its kernel stack. Then the state
552 * of the other is restored from its kernel stack. The memory
553 * management hardware is updated to the second process's state.
554 * Finally, we can return to the second process, via ret_from_except.
555 * On entry, r3 points to the THREAD for the current task, r4
556 * points to the THREAD for the new task.
557 *
558 * Note: there are two ways to get to the "going out" portion
559 * of this code; either by coming in via the entry (_switch)
560 * or via "fork" which must set up an environment equivalent
561 * to the "_switch" path. If you change this you'll have to change
562 * the fork code also.
563 *
564 * The code which creates the new task context is in 'copy_thread'
Jon Mason2ef94812006-01-23 10:58:20 -0600565 * in arch/powerpc/kernel/process.c
Paul Mackerras9994a332005-10-10 22:36:14 +1000566 */
567 .align 7
568_GLOBAL(_switch)
569 mflr r0
570 std r0,16(r1)
571 stdu r1,-SWITCH_FRAME_SIZE(r1)
572 /* r3-r13 are caller saved -- Cort */
573 SAVE_8GPRS(14, r1)
574 SAVE_10GPRS(22, r1)
Anton Blanchard68bfa962015-10-29 11:43:56 +1100575 std r0,_NIP(r1) /* Return to switch caller */
Paul Mackerras9994a332005-10-10 22:36:14 +1000576 mfcr r23
577 std r23,_CCR(r1)
578 std r1,KSP(r3) /* Set old stack pointer */
579
Michael Ellerman98f6ded2019-04-11 21:46:15 +1000580 FLUSH_COUNT_CACHE
581
Paul Mackerras9994a332005-10-10 22:36:14 +1000582#ifdef CONFIG_SMP
583 /* We need a sync somewhere here to make sure that if the
584 * previous task gets rescheduled on another CPU, it sees all
585 * stores it has performed on this one.
586 */
587 sync
588#endif /* CONFIG_SMP */
589
Anton Blanchardf89451f2010-08-11 01:40:27 +0000590 /*
591 * If we optimise away the clear of the reservation in system
592 * calls because we know the CPU tracks the address of the
593 * reservation, then we need to clear it here to cover the
594 * case that the kernel context switch path has no larx
595 * instructions.
596 */
597BEGIN_FTR_SECTION
598 ldarx r6,0,r1
599END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
600
Chris Smart8a649042016-04-26 10:28:50 +1000601BEGIN_FTR_SECTION
602/*
603 * A cp_abort (copy paste abort) here ensures that when context switching, a
604 * copy from one process can't leak into the paste of another.
605 */
606 PPC_CP_ABORT
607END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
608
Michael Neulinga5153482013-05-29 19:34:27 +0000609#ifdef CONFIG_PPC_BOOK3S
610/* Cancel all explict user streams as they will have no use after context
611 * switch and will stop the HW from creating streams itself
612 */
613 DCBT_STOP_ALL_STREAM_IDS(r6)
614#endif
615
Paul Mackerras9994a332005-10-10 22:36:14 +1000616 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
617 std r6,PACACURRENT(r13) /* Set new 'current' */
618
619 ld r8,KSP(r4) /* new stack pointer */
Aneesh Kumar K.Vcaca2852016-04-29 23:26:07 +1000620#ifdef CONFIG_PPC_STD_MMU_64
621BEGIN_MMU_FTR_SECTION
622 b 2f
Aneesh Kumar K.V5a25b6f2016-07-27 13:19:01 +1000623END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
Paul Mackerras9994a332005-10-10 22:36:14 +1000624BEGIN_FTR_SECTION
625 clrrdi r6,r8,28 /* get its ESID */
626 clrrdi r9,r1,28 /* get current sp ESID */
Michael Ellerman13b3d132014-07-10 12:29:20 +1000627FTR_SECTION_ELSE
Paul Mackerras1189be62007-10-11 20:37:10 +1000628 clrrdi r6,r8,40 /* get its 1T ESID */
629 clrrdi r9,r1,40 /* get current sp 1T ESID */
Michael Ellerman13b3d132014-07-10 12:29:20 +1000630ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
Paul Mackerras9994a332005-10-10 22:36:14 +1000631 clrldi. r0,r6,2 /* is new ESID c00000000? */
632 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
633 cror eq,4*cr1+eq,eq
634 beq 2f /* if yes, don't slbie it */
635
636 /* Bolt in the new stack SLB entry */
637 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
638 oris r0,r6,(SLB_ESID_V)@h
639 ori r0,r0,(SLB_NUM_BOLTED-1)@l
Paul Mackerras1189be62007-10-11 20:37:10 +1000640BEGIN_FTR_SECTION
641 li r9,MMU_SEGSIZE_1T /* insert B field */
642 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
643 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
Matt Evans44ae3ab2011-04-06 19:48:50 +0000644END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
Michael Neuling2f6093c2006-08-07 16:19:19 +1000645
Michael Neuling00efee72007-08-24 16:58:37 +1000646 /* Update the last bolted SLB. No write barriers are needed
647 * here, provided we only update the current CPU's SLB shadow
648 * buffer.
649 */
Michael Neuling2f6093c2006-08-07 16:19:19 +1000650 ld r9,PACA_SLBSHADOWPTR(r13)
Michael Neuling11a27ad2006-08-09 17:00:30 +1000651 li r12,0
Anton Blanchard7ffcf8e2013-08-07 02:01:46 +1000652 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
653 li r12,SLBSHADOW_STACKVSID
654 STDX_BE r7,r12,r9 /* Save VSID */
655 li r12,SLBSHADOW_STACKESID
656 STDX_BE r0,r12,r9 /* Save ESID */
Michael Neuling2f6093c2006-08-07 16:19:19 +1000657
Matt Evans44ae3ab2011-04-06 19:48:50 +0000658 /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
Olof Johanssonf66bce52007-10-16 00:58:59 +1000659 * we have 1TB segments, the only CPUs known to have the errata
660 * only support less than 1TB of system memory and we'll never
661 * actually hit this code path.
662 */
663
Aneesh Kumar K.V10e46042018-05-30 18:48:04 +0530664 isync
Paul Mackerras9994a332005-10-10 22:36:14 +1000665 slbie r6
666 slbie r6 /* Workaround POWER5 < DD2.1 issue */
667 slbmte r7,r0
668 isync
Paul Mackerras9994a332005-10-10 22:36:14 +10006692:
Aneesh Kumar K.Vcaca2852016-04-29 23:26:07 +1000670#endif /* CONFIG_PPC_STD_MMU_64 */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000671
Stuart Yoder9778b692012-07-05 04:41:35 +0000672 CURRENT_THREAD_INFO(r7, r8) /* base of new stack */
Paul Mackerras9994a332005-10-10 22:36:14 +1000673 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
674 because we don't need to leave the 288-byte ABI gap at the
675 top of the kernel stack. */
676 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
677
678 mr r1,r8 /* start using new stack pointer */
679 std r7,PACAKSAVE(r13)
680
Anton Blanchard71433282012-09-03 16:51:10 +0000681 ld r6,_CCR(r1)
682 mtcrf 0xFF,r6
683
Paul Mackerras9994a332005-10-10 22:36:14 +1000684 /* r3-r13 are destroyed -- Cort */
685 REST_8GPRS(14, r1)
686 REST_10GPRS(22, r1)
687
688 /* convert old thread to its task_struct for return value */
689 addi r3,r3,-THREAD
690 ld r7,_NIP(r1) /* Return to _switch caller in new task */
691 mtlr r7
692 addi r1,r1,SWITCH_FRAME_SIZE
693 blr
694
695 .align 7
696_GLOBAL(ret_from_except)
697 ld r11,_TRAP(r1)
698 andi. r0,r11,1
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100699 bne ret_from_except_lite
Paul Mackerras9994a332005-10-10 22:36:14 +1000700 REST_NVGPRS(r1)
701
702_GLOBAL(ret_from_except_lite)
703 /*
704 * Disable interrupts so that current_thread_info()->flags
705 * can't change between when we test it and when we return
706 * from the interrupt.
707 */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000708#ifdef CONFIG_PPC_BOOK3E
709 wrteei 0
710#else
Nicholas Piggin49d09bf2016-09-15 19:04:46 +1000711 li r10,MSR_RI
Benjamin Herrenschmidtd9ada912012-03-02 11:33:52 +1100712 mtmsrd r10,1 /* Update machine state */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000713#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000714
Stuart Yoder9778b692012-07-05 04:41:35 +0000715 CURRENT_THREAD_INFO(r9, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000716 ld r3,_MSR(r1)
Bharat Bhushan13d543c2013-05-22 09:50:59 +0530717#ifdef CONFIG_PPC_BOOK3E
718 ld r10,PACACURRENT(r13)
719#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000720 ld r4,TI_FLAGS(r9)
Paul Mackerras9994a332005-10-10 22:36:14 +1000721 andi. r3,r3,MSR_PR
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000722 beq resume_kernel
Bharat Bhushan13d543c2013-05-22 09:50:59 +0530723#ifdef CONFIG_PPC_BOOK3E
724 lwz r3,(THREAD+THREAD_DBCR0)(r10)
725#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +1000726
727 /* Check current_thread_info()->flags */
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000728 andi. r0,r4,_TIF_USER_WORK_MASK
Bharat Bhushan13d543c2013-05-22 09:50:59 +0530729 bne 1f
Cyril Bur70fe3d92016-02-29 17:53:47 +1100730#ifdef CONFIG_PPC_BOOK3E
Bharat Bhushan13d543c2013-05-22 09:50:59 +0530731 /*
732 * Check to see if the dbcr0 register is set up to debug.
733 * Use the internal debug mode bit to do this.
734 */
735 andis. r0,r3,DBCR0_IDM@h
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000736 beq restore
Bharat Bhushan13d543c2013-05-22 09:50:59 +0530737 mfmsr r0
738 rlwinm r0,r0,0,~MSR_DE /* Clear MSR.DE */
739 mtmsr r0
740 mtspr SPRN_DBCR0,r3
741 li r10, -1
742 mtspr SPRN_DBSR,r10
743 b restore
744#else
Cyril Bur70fe3d92016-02-29 17:53:47 +1100745 addi r3,r1,STACK_FRAME_OVERHEAD
746 bl restore_math
747 b restore
Bharat Bhushan13d543c2013-05-22 09:50:59 +0530748#endif
7491: andi. r0,r4,_TIF_NEED_RESCHED
750 beq 2f
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100751 bl restore_interrupts
Li Zhong5d1c5742013-05-13 16:16:43 +0000752 SCHEDULE_USER
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100753 b ret_from_except_lite
Paul Mackerrasd31626f2014-01-13 15:56:29 +11007542:
755#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
756 andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM
757 bne 3f /* only restore TM if nothing else to do */
758 addi r3,r1,STACK_FRAME_OVERHEAD
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100759 bl restore_tm_state
Paul Mackerrasd31626f2014-01-13 15:56:29 +1100760 b restore
7613:
762#endif
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100763 bl save_nvgprs
Anton Blanchard808be312014-10-31 16:50:57 +1100764 /*
765 * Use a non volatile GPR to save and restore our thread_info flags
766 * across the call to restore_interrupts.
767 */
768 mr r30,r4
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100769 bl restore_interrupts
Anton Blanchard808be312014-10-31 16:50:57 +1100770 mr r4,r30
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000771 addi r3,r1,STACK_FRAME_OVERHEAD
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100772 bl do_notify_resume
773 b ret_from_except
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000774
775resume_kernel:
Tiejun Chena9c4e542012-09-16 23:54:30 +0000776 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
Kevin Hao0edfdd12013-09-26 16:41:34 +0800777 andis. r8,r4,_TIF_EMULATE_STACK_STORE@h
Tiejun Chena9c4e542012-09-16 23:54:30 +0000778 beq+ 1f
779
780 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
781
Ravi Bangoria1bd55ab2017-04-11 10:38:13 +0530782 ld r3,GPR1(r1)
Tiejun Chena9c4e542012-09-16 23:54:30 +0000783 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
784 mr r4,r1 /* src: current exception frame */
785 mr r1,r3 /* Reroute the trampoline frame to r1 */
786
787 /* Copy from the original to the trampoline. */
788 li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */
789 li r6,0 /* start offset: 0 */
790 mtctr r5
7912: ldx r0,r6,r4
792 stdx r0,r6,r3
793 addi r6,r6,8
794 bdnz 2b
795
Ravi Bangoria1bd55ab2017-04-11 10:38:13 +0530796 /* Do real store operation to complete stdu */
797 ld r5,GPR1(r1)
Tiejun Chena9c4e542012-09-16 23:54:30 +0000798 std r8,0(r5)
799
800 /* Clear _TIF_EMULATE_STACK_STORE flag */
801 lis r11,_TIF_EMULATE_STACK_STORE@h
802 addi r5,r9,TI_FLAGS
Kevin Haod8b92292013-04-09 22:31:24 +00008030: ldarx r4,0,r5
Tiejun Chena9c4e542012-09-16 23:54:30 +0000804 andc r4,r4,r11
805 stdcx. r4,0,r5
806 bne- 0b
8071:
808
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000809#ifdef CONFIG_PREEMPT
810 /* Check if we need to preempt */
811 andi. r0,r4,_TIF_NEED_RESCHED
812 beq+ restore
813 /* Check that preempt_count() == 0 and interrupts are enabled */
814 lwz r8,TI_PREEMPT(r9)
815 cmpwi cr1,r8,0
816 ld r0,SOFTE(r1)
817 cmpdi r0,0
818 crandc eq,cr1*4+eq,eq
819 bne restore
820
821 /*
822 * Here we are preempting the current task. We want to make
Tiejun Chende021bb2013-07-16 11:09:30 +0800823 * sure we are soft-disabled first and reconcile irq state.
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000824 */
Tiejun Chende021bb2013-07-16 11:09:30 +0800825 RECONCILE_IRQ_STATE(r3,r4)
Anton Blanchardb1576fe2014-02-04 16:04:35 +11008261: bl preempt_schedule_irq
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000827
828 /* Re-test flags and eventually loop */
Stuart Yoder9778b692012-07-05 04:41:35 +0000829 CURRENT_THREAD_INFO(r9, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000830 ld r4,TI_FLAGS(r9)
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000831 andi. r0,r4,_TIF_NEED_RESCHED
832 bne 1b
Tiejun Chen572177d2013-01-06 00:49:34 +0000833
834 /*
835 * arch_local_irq_restore() from preempt_schedule_irq above may
836 * enable hard interrupt but we really should disable interrupts
837 * when we return from the interrupt, and so that we don't get
838 * interrupted after loading SRR0/1.
839 */
840#ifdef CONFIG_PPC_BOOK3E
841 wrteei 0
842#else
Nicholas Piggin49d09bf2016-09-15 19:04:46 +1000843 li r10,MSR_RI
Tiejun Chen572177d2013-01-06 00:49:34 +0000844 mtmsrd r10,1 /* Update machine state */
845#endif /* CONFIG_PPC_BOOK3E */
Tiejun Chenc58ce2b2012-06-06 20:56:43 +0000846#endif /* CONFIG_PREEMPT */
Paul Mackerras9994a332005-10-10 22:36:14 +1000847
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100848 .globl fast_exc_return_irq
849fast_exc_return_irq:
Paul Mackerras9994a332005-10-10 22:36:14 +1000850restore:
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100851 /*
Benjamin Herrenschmidt7c0482e2012-05-10 16:12:38 +0000852 * This is the main kernel exit path. First we check if we
853 * are about to re-enable interrupts
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100854 */
Michael Ellerman01f3880d2008-07-16 14:21:34 +1000855 ld r5,SOFTE(r1)
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100856 lbz r6,PACASOFTIRQEN(r13)
Benjamin Herrenschmidt7c0482e2012-05-10 16:12:38 +0000857 cmpwi cr0,r5,0
858 beq restore_irq_off
Paul Mackerras9994a332005-10-10 22:36:14 +1000859
Benjamin Herrenschmidt7c0482e2012-05-10 16:12:38 +0000860 /* We are enabling, were we already enabled ? Yes, just return */
861 cmpwi cr0,r6,1
862 beq cr0,do_restore
Paul Mackerrasb0a779d2006-10-18 10:11:22 +1000863
Benjamin Herrenschmidt7c0482e2012-05-10 16:12:38 +0000864 /*
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100865 * We are about to soft-enable interrupts (we are hard disabled
866 * at this point). We check if there's anything that needs to
867 * be replayed first.
868 */
869 lbz r0,PACAIRQHAPPENED(r13)
870 cmpwi cr0,r0,0
871 bne- restore_check_irq_replay
872
873 /*
874 * Get here when nothing happened while soft-disabled, just
875 * soft-enable and move-on. We will hard-enable as a side
876 * effect of rfi
877 */
878restore_no_replay:
879 TRACE_ENABLE_INTS
880 li r0,1
881 stb r0,PACASOFTIRQEN(r13);
882
883 /*
884 * Final return path. BookE is handled in a different file
885 */
Benjamin Herrenschmidt7c0482e2012-05-10 16:12:38 +0000886do_restore:
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000887#ifdef CONFIG_PPC_BOOK3E
Anton Blanchardb1576fe2014-02-04 16:04:35 +1100888 b exception_return_book3e
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000889#else
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100890 /*
891 * Clear the reservation. If we know the CPU tracks the address of
892 * the reservation then we can potentially save some cycles and use
893 * a larx. On POWER6 and POWER7 this is significantly faster.
894 */
895BEGIN_FTR_SECTION
896 stdcx. r0,0,r1 /* to clear the reservation */
897FTR_SECTION_ELSE
898 ldarx r4,0,r1
899ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
900
901 /*
902 * Some code path such as load_up_fpu or altivec return directly
903 * here. They run entirely hard disabled and do not alter the
904 * interrupt state. They also don't use lwarx/stwcx. and thus
905 * are known not to leave dangling reservations.
906 */
907 .globl fast_exception_return
908fast_exception_return:
909 ld r3,_MSR(r1)
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100910 ld r4,_CTR(r1)
911 ld r0,_LINK(r1)
912 mtctr r4
913 mtlr r0
914 ld r4,_XER(r1)
915 mtspr SPRN_XER,r4
916
917 REST_8GPRS(5, r1)
918
919 andi. r0,r3,MSR_RI
920 beq- unrecov_restore
921
Benjamin Herrenschmidt0c4888e2013-11-05 16:33:22 +1100922 /* Load PPR from thread struct before we clear MSR:RI */
923BEGIN_FTR_SECTION
924 ld r2,PACACURRENT(r13)
925 ld r2,TASKTHREADPPR(r2)
926END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
927
Anton Blanchardf89451f2010-08-11 01:40:27 +0000928 /*
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100929 * Clear RI before restoring r13. If we are returning to
930 * userspace and we take an exception after restoring r13,
931 * we end up corrupting the userspace r13 value.
932 */
Nicholas Piggin49d09bf2016-09-15 19:04:46 +1000933 li r4,0
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100934 mtmsrd r4,1
Paul Mackerras9994a332005-10-10 22:36:14 +1000935
Michael Neulingafc07702013-02-13 16:21:34 +0000936#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
937 /* TM debug */
938 std r3, PACATMSCRATCH(r13) /* Stash returned-to MSR */
939#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000940 /*
941 * r13 is our per cpu area, only restore it if we are returning to
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100942 * userspace the value stored in the stack frame may belong to
943 * another CPU.
Paul Mackerras9994a332005-10-10 22:36:14 +1000944 */
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100945 andi. r0,r3,MSR_PR
Paul Mackerras9994a332005-10-10 22:36:14 +1000946 beq 1f
Benjamin Herrenschmidt0c4888e2013-11-05 16:33:22 +1100947BEGIN_FTR_SECTION
948 mtspr SPRN_PPR,r2 /* Restore PPR */
949END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Christophe Leroyc223c902016-05-17 08:33:46 +0200950 ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
Paul Mackerras9994a332005-10-10 22:36:14 +1000951 REST_GPR(13, r1)
Nicholas Piggin9d914322018-01-10 03:07:15 +1100952
Paul Mackerrase56a6e22007-02-07 13:13:26 +1100953 mtspr SPRN_SRR1,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000954
955 ld r2,_CCR(r1)
956 mtcrf 0xFF,r2
957 ld r2,_NIP(r1)
958 mtspr SPRN_SRR0,r2
959
960 ld r0,GPR0(r1)
961 ld r2,GPR2(r1)
962 ld r3,GPR3(r1)
963 ld r4,GPR4(r1)
964 ld r1,GPR1(r1)
Nicholas Piggin9d914322018-01-10 03:07:15 +1100965 RFI_TO_USER
966 b . /* prevent speculative execution */
Paul Mackerras9994a332005-10-10 22:36:14 +1000967
Nicholas Piggin9d914322018-01-10 03:07:15 +11009681: mtspr SPRN_SRR1,r3
969
970 ld r2,_CCR(r1)
971 mtcrf 0xFF,r2
972 ld r2,_NIP(r1)
973 mtspr SPRN_SRR0,r2
974
975 ld r0,GPR0(r1)
976 ld r2,GPR2(r1)
977 ld r3,GPR3(r1)
978 ld r4,GPR4(r1)
979 ld r1,GPR1(r1)
980 RFI_TO_KERNEL
Paul Mackerras9994a332005-10-10 22:36:14 +1000981 b . /* prevent speculative execution */
982
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +0000983#endif /* CONFIG_PPC_BOOK3E */
984
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100985 /*
Benjamin Herrenschmidt7c0482e2012-05-10 16:12:38 +0000986 * We are returning to a context with interrupts soft disabled.
987 *
988 * However, we may also about to hard enable, so we need to
989 * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
990 * or that bit can get out of sync and bad things will happen
991 */
992restore_irq_off:
993 ld r3,_MSR(r1)
994 lbz r7,PACAIRQHAPPENED(r13)
995 andi. r0,r3,MSR_EE
996 beq 1f
997 rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS
998 stb r7,PACAIRQHAPPENED(r13)
9991: li r0,0
1000 stb r0,PACASOFTIRQEN(r13);
1001 TRACE_DISABLE_INTS
1002 b do_restore
1003
1004 /*
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +11001005 * Something did happen, check if a re-emit is needed
1006 * (this also clears paca->irq_happened)
1007 */
1008restore_check_irq_replay:
1009 /* XXX: We could implement a fast path here where we check
1010 * for irq_happened being just 0x01, in which case we can
1011 * clear it and return. That means that we would potentially
1012 * miss a decrementer having wrapped all the way around.
1013 *
1014 * Still, this might be useful for things like hash_page
1015 */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11001016 bl __check_irq_replay
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +11001017 cmpwi cr0,r3,0
1018 beq restore_no_replay
1019
1020 /*
1021 * We need to re-emit an interrupt. We do so by re-using our
1022 * existing exception frame. We first change the trap value,
1023 * but we need to ensure we preserve the low nibble of it
1024 */
1025 ld r4,_TRAP(r1)
1026 clrldi r4,r4,60
1027 or r4,r4,r3
1028 std r4,_TRAP(r1)
1029
1030 /*
1031 * Then find the right handler and call it. Interrupts are
1032 * still soft-disabled and we keep them that way.
1033 */
1034 cmpwi cr0,r3,0x500
1035 bne 1f
1036 addi r3,r1,STACK_FRAME_OVERHEAD;
Anton Blanchardb1576fe2014-02-04 16:04:35 +11001037 bl do_IRQ
1038 b ret_from_except
Mahesh Salgaonkar0869b6f2014-07-29 18:40:01 +053010391: cmpwi cr0,r3,0xe60
1040 bne 1f
1041 addi r3,r1,STACK_FRAME_OVERHEAD;
1042 bl handle_hmi_exception
1043 b ret_from_except
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +110010441: cmpwi cr0,r3,0x900
1045 bne 1f
1046 addi r3,r1,STACK_FRAME_OVERHEAD;
Anton Blanchardb1576fe2014-02-04 16:04:35 +11001047 bl timer_interrupt
1048 b ret_from_except
Ian Munsiefe9e1d52012-11-14 18:49:48 +00001049#ifdef CONFIG_PPC_DOORBELL
10501:
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +11001051#ifdef CONFIG_PPC_BOOK3E
Ian Munsiefe9e1d52012-11-14 18:49:48 +00001052 cmpwi cr0,r3,0x280
1053#else
1054 BEGIN_FTR_SECTION
1055 cmpwi cr0,r3,0xe80
1056 FTR_SECTION_ELSE
1057 cmpwi cr0,r3,0xa00
1058 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
1059#endif /* CONFIG_PPC_BOOK3E */
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +11001060 bne 1f
1061 addi r3,r1,STACK_FRAME_OVERHEAD;
Anton Blanchardb1576fe2014-02-04 16:04:35 +11001062 bl doorbell_exception
1063 b ret_from_except
Ian Munsiefe9e1d52012-11-14 18:49:48 +00001064#endif /* CONFIG_PPC_DOORBELL */
Anton Blanchardb1576fe2014-02-04 16:04:35 +110010651: b ret_from_except /* What else to do here ? */
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +11001066
Paul Mackerras9994a332005-10-10 22:36:14 +10001067unrecov_restore:
1068 addi r3,r1,STACK_FRAME_OVERHEAD
Anton Blanchardb1576fe2014-02-04 16:04:35 +11001069 bl unrecoverable_exception
Paul Mackerras9994a332005-10-10 22:36:14 +10001070 b unrecov_restore
1071
1072#ifdef CONFIG_PPC_RTAS
1073/*
1074 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1075 * called with the MMU off.
1076 *
1077 * In addition, we need to be in 32b mode, at least for now.
1078 *
1079 * Note: r3 is an input parameter to rtas, so don't trash it...
1080 */
1081_GLOBAL(enter_rtas)
1082 mflr r0
1083 std r0,16(r1)
1084 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
1085
1086 /* Because RTAS is running in 32b mode, it clobbers the high order half
1087 * of all registers that it saves. We therefore save those registers
1088 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
1089 */
1090 SAVE_GPR(2, r1) /* Save the TOC */
1091 SAVE_GPR(13, r1) /* Save paca */
1092 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
1093 SAVE_10GPRS(22, r1) /* ditto */
1094
1095 mfcr r4
1096 std r4,_CCR(r1)
1097 mfctr r5
1098 std r5,_CTR(r1)
1099 mfspr r6,SPRN_XER
1100 std r6,_XER(r1)
1101 mfdar r7
1102 std r7,_DAR(r1)
1103 mfdsisr r8
1104 std r8,_DSISR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10001105
Mike Kravetz9fe901d2006-03-27 15:20:00 -08001106 /* Temporary workaround to clear CR until RTAS can be modified to
1107 * ignore all bits.
1108 */
1109 li r0,0
1110 mtcr r0
1111
David Woodhouse007d88d2007-01-01 18:45:34 +00001112#ifdef CONFIG_BUG
Paul Mackerras9994a332005-10-10 22:36:14 +10001113 /* There is no way it is acceptable to get here with interrupts enabled,
1114 * check it with the asm equivalent of WARN_ON
1115 */
Paul Mackerrasd04c56f2006-10-04 16:47:49 +10001116 lbz r0,PACASOFTIRQEN(r13)
Paul Mackerras9994a332005-10-10 22:36:14 +100011171: tdnei r0,0
David Woodhouse007d88d2007-01-01 18:45:34 +00001118 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
1119#endif
1120
Paul Mackerrasd04c56f2006-10-04 16:47:49 +10001121 /* Hard-disable interrupts */
1122 mfmsr r6
1123 rldicl r7,r6,48,1
1124 rotldi r7,r7,16
1125 mtmsrd r7,1
1126
Paul Mackerras9994a332005-10-10 22:36:14 +10001127 /* Unfortunately, the stack pointer and the MSR are also clobbered,
1128 * so they are saved in the PACA which allows us to restore
1129 * our original state after RTAS returns.
1130 */
1131 std r1,PACAR1(r13)
1132 std r6,PACASAVEDMSR(r13)
1133
1134 /* Setup our real return addr */
Anton Blanchardad0289e2014-02-04 16:04:52 +11001135 LOAD_REG_ADDR(r4,rtas_return_loc)
David Gibsone58c3492006-01-13 14:56:25 +11001136 clrldi r4,r4,2 /* convert to realmode address */
Paul Mackerras9994a332005-10-10 22:36:14 +10001137 mtlr r4
1138
1139 li r0,0
1140 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
1141 andc r0,r6,r0
1142
1143 li r9,1
1144 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
Benjamin Herrenschmidt5c0484e2013-09-23 12:04:45 +10001145 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
Paul Mackerras9994a332005-10-10 22:36:14 +10001146 andc r6,r0,r9
Paul Mackerras9994a332005-10-10 22:36:14 +10001147 sync /* disable interrupts so SRR0/1 */
1148 mtmsrd r0 /* don't get trashed */
1149
David Gibsone58c3492006-01-13 14:56:25 +11001150 LOAD_REG_ADDR(r4, rtas)
Paul Mackerras9994a332005-10-10 22:36:14 +10001151 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
1152 ld r4,RTASBASE(r4) /* get the rtas->base value */
1153
1154 mtspr SPRN_SRR0,r5
1155 mtspr SPRN_SRR1,r6
Nicholas Pigginefe8bc02018-02-22 23:35:44 +11001156 RFI_TO_KERNEL
Paul Mackerras9994a332005-10-10 22:36:14 +10001157 b . /* prevent speculative execution */
1158
Anton Blanchardad0289e2014-02-04 16:04:52 +11001159rtas_return_loc:
Benjamin Herrenschmidt5c0484e2013-09-23 12:04:45 +10001160 FIXUP_ENDIAN
1161
Paul Mackerras9994a332005-10-10 22:36:14 +10001162 /* relocation is off at this point */
Benjamin Herrenschmidt2dd60d72011-01-20 17:50:21 +11001163 GET_PACA(r4)
David Gibsone58c3492006-01-13 14:56:25 +11001164 clrldi r4,r4,2 /* convert to realmode address */
Paul Mackerras9994a332005-10-10 22:36:14 +10001165
Paul Mackerrase31aa452008-08-30 11:41:12 +10001166 bcl 20,31,$+4
11670: mflr r3
Anton Blanchardad0289e2014-02-04 16:04:52 +11001168 ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */
Paul Mackerrase31aa452008-08-30 11:41:12 +10001169
Paul Mackerras9994a332005-10-10 22:36:14 +10001170 mfmsr r6
1171 li r0,MSR_RI
1172 andc r6,r6,r0
1173 sync
1174 mtmsrd r6
1175
1176 ld r1,PACAR1(r4) /* Restore our SP */
Paul Mackerras9994a332005-10-10 22:36:14 +10001177 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
1178
1179 mtspr SPRN_SRR0,r3
1180 mtspr SPRN_SRR1,r4
Nicholas Pigginefe8bc02018-02-22 23:35:44 +11001181 RFI_TO_KERNEL
Paul Mackerras9994a332005-10-10 22:36:14 +10001182 b . /* prevent speculative execution */
1183
Paul Mackerrase31aa452008-08-30 11:41:12 +10001184 .align 3
Anton Blanchardad0289e2014-02-04 16:04:52 +110011851: .llong rtas_restore_regs
Paul Mackerrase31aa452008-08-30 11:41:12 +10001186
Anton Blanchardad0289e2014-02-04 16:04:52 +11001187rtas_restore_regs:
Paul Mackerras9994a332005-10-10 22:36:14 +10001188 /* relocation is on at this point */
1189 REST_GPR(2, r1) /* Restore the TOC */
1190 REST_GPR(13, r1) /* Restore paca */
1191 REST_8GPRS(14, r1) /* Restore the non-volatiles */
1192 REST_10GPRS(22, r1) /* ditto */
1193
Benjamin Herrenschmidt2dd60d72011-01-20 17:50:21 +11001194 GET_PACA(r13)
Paul Mackerras9994a332005-10-10 22:36:14 +10001195
1196 ld r4,_CCR(r1)
1197 mtcr r4
1198 ld r5,_CTR(r1)
1199 mtctr r5
1200 ld r6,_XER(r1)
1201 mtspr SPRN_XER,r6
1202 ld r7,_DAR(r1)
1203 mtdar r7
1204 ld r8,_DSISR(r1)
1205 mtdsisr r8
Paul Mackerras9994a332005-10-10 22:36:14 +10001206
1207 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
1208 ld r0,16(r1) /* get return address */
1209
1210 mtlr r0
1211 blr /* return to caller */
1212
1213#endif /* CONFIG_PPC_RTAS */
1214
Paul Mackerras9994a332005-10-10 22:36:14 +10001215_GLOBAL(enter_prom)
1216 mflr r0
1217 std r0,16(r1)
1218 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
1219
1220 /* Because PROM is running in 32b mode, it clobbers the high order half
1221 * of all registers that it saves. We therefore save those registers
1222 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
1223 */
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +00001224 SAVE_GPR(2, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10001225 SAVE_GPR(13, r1)
1226 SAVE_8GPRS(14, r1)
1227 SAVE_10GPRS(22, r1)
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +00001228 mfcr r10
Paul Mackerras9994a332005-10-10 22:36:14 +10001229 mfmsr r11
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +00001230 std r10,_CCR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10001231 std r11,_MSR(r1)
1232
Benjamin Herrenschmidt5c0484e2013-09-23 12:04:45 +10001233 /* Put PROM address in SRR0 */
1234 mtsrr0 r4
Paul Mackerras9994a332005-10-10 22:36:14 +10001235
Benjamin Herrenschmidt5c0484e2013-09-23 12:04:45 +10001236 /* Setup our trampoline return addr in LR */
1237 bcl 20,31,$+4
12380: mflr r4
1239 addi r4,r4,(1f - 0b)
1240 mtlr r4
1241
1242 /* Prepare a 32-bit mode big endian MSR
Paul Mackerras9994a332005-10-10 22:36:14 +10001243 */
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +00001244#ifdef CONFIG_PPC_BOOK3E
1245 rlwinm r11,r11,0,1,31
Benjamin Herrenschmidt5c0484e2013-09-23 12:04:45 +10001246 mtsrr1 r11
1247 rfi
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +00001248#else /* CONFIG_PPC_BOOK3E */
Benjamin Herrenschmidt5c0484e2013-09-23 12:04:45 +10001249 LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
1250 andc r11,r11,r12
1251 mtsrr1 r11
Nicholas Pigginefe8bc02018-02-22 23:35:44 +11001252 RFI_TO_KERNEL
Benjamin Herrenschmidt2d27cfd2009-07-23 23:15:59 +00001253#endif /* CONFIG_PPC_BOOK3E */
Paul Mackerras9994a332005-10-10 22:36:14 +10001254
Benjamin Herrenschmidt5c0484e2013-09-23 12:04:45 +100012551: /* Return from OF */
1256 FIXUP_ENDIAN
Paul Mackerras9994a332005-10-10 22:36:14 +10001257
1258 /* Just make sure that r1 top 32 bits didn't get
1259 * corrupt by OF
1260 */
1261 rldicl r1,r1,0,32
1262
1263 /* Restore the MSR (back to 64 bits) */
1264 ld r0,_MSR(r1)
Benjamin Herrenschmidt6c171992009-07-23 23:15:07 +00001265 MTMSRD(r0)
Paul Mackerras9994a332005-10-10 22:36:14 +10001266 isync
1267
1268 /* Restore other registers */
1269 REST_GPR(2, r1)
1270 REST_GPR(13, r1)
1271 REST_8GPRS(14, r1)
1272 REST_10GPRS(22, r1)
1273 ld r4,_CCR(r1)
1274 mtcr r4
Paul Mackerras9994a332005-10-10 22:36:14 +10001275
1276 addi r1,r1,PROM_FRAME_SIZE
1277 ld r0,16(r1)
1278 mtlr r0
1279 blr
Steven Rostedt4e491d12008-05-14 23:49:44 -04001280
Steven Rostedt606576c2008-10-06 19:06:12 -04001281#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt4e491d12008-05-14 23:49:44 -04001282#ifdef CONFIG_DYNAMIC_FTRACE
1283_GLOBAL(mcount)
1284_GLOBAL(_mcount)
Al Viro9445aa12016-01-13 23:33:46 -05001285EXPORT_SYMBOL(_mcount)
Torsten Duwe15308662016-03-03 15:26:59 +11001286 mflr r12
1287 mtctr r12
1288 mtlr r0
1289 bctr
Steven Rostedt4e491d12008-05-14 23:49:44 -04001290
Torsten Duwe15308662016-03-03 15:26:59 +11001291#ifndef CC_USING_MPROFILE_KERNEL
Anton Blanchard5e666842014-04-04 09:06:33 +11001292_GLOBAL_TOC(ftrace_caller)
Steven Rostedt4e491d12008-05-14 23:49:44 -04001293 /* Taken from output of objdump from lib64/glibc */
1294 mflr r3
1295 ld r11, 0(r1)
1296 stdu r1, -112(r1)
1297 std r3, 128(r1)
1298 ld r4, 16(r11)
Abhishek Sagar395a59d2008-06-21 23:47:27 +05301299 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -04001300.globl ftrace_call
1301ftrace_call:
1302 bl ftrace_stub
1303 nop
Steven Rostedt46542882009-02-10 22:19:54 -08001304#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1305.globl ftrace_graph_call
1306ftrace_graph_call:
1307 b ftrace_graph_stub
1308_GLOBAL(ftrace_graph_stub)
1309#endif
Steven Rostedt4e491d12008-05-14 23:49:44 -04001310 ld r0, 128(r1)
1311 mtlr r0
1312 addi r1, r1, 112
Torsten Duwe15308662016-03-03 15:26:59 +11001313
1314#else /* CC_USING_MPROFILE_KERNEL */
1315/*
1316 *
1317 * ftrace_caller() is the function that replaces _mcount() when ftrace is
1318 * active.
1319 *
1320 * We arrive here after a function A calls function B, and we are the trace
1321 * function for B. When we enter r1 points to A's stack frame, B has not yet
1322 * had a chance to allocate one yet.
1323 *
1324 * Additionally r2 may point either to the TOC for A, or B, depending on
1325 * whether B did a TOC setup sequence before calling us.
1326 *
1327 * On entry the LR points back to the _mcount() call site, and r0 holds the
1328 * saved LR as it was on entry to B, ie. the original return address at the
1329 * call site in A.
1330 *
1331 * Our job is to save the register state into a struct pt_regs (on the stack)
1332 * and then arrange for the ftrace function to be called.
1333 */
1334_GLOBAL(ftrace_caller)
1335 /* Save the original return address in A's stack frame */
1336 std r0,LRSAVE(r1)
1337
1338 /* Create our stack frame + pt_regs */
1339 stdu r1,-SWITCH_FRAME_SIZE(r1)
1340
1341 /* Save all gprs to pt_regs */
Naveen N. Raoc76655f2017-06-01 16:18:16 +05301342 SAVE_GPR(0, r1)
1343 SAVE_10GPRS(2, r1)
1344 SAVE_10GPRS(12, r1)
1345 SAVE_10GPRS(22, r1)
1346
1347 /* Save previous stack pointer (r1) */
1348 addi r8, r1, SWITCH_FRAME_SIZE
1349 std r8, GPR1(r1)
Torsten Duwe15308662016-03-03 15:26:59 +11001350
1351 /* Load special regs for save below */
1352 mfmsr r8
1353 mfctr r9
1354 mfxer r10
1355 mfcr r11
1356
1357 /* Get the _mcount() call site out of LR */
1358 mflr r7
1359 /* Save it as pt_regs->nip & pt_regs->link */
1360 std r7, _NIP(r1)
1361 std r7, _LINK(r1)
1362
1363 /* Save callee's TOC in the ABI compliant location */
1364 std r2, 24(r1)
1365 ld r2,PACATOC(r13) /* get kernel TOC in r2 */
1366
1367 addis r3,r2,function_trace_op@toc@ha
1368 addi r3,r3,function_trace_op@toc@l
1369 ld r5,0(r3)
1370
Michael Ellerman85baa092016-03-24 22:04:05 +11001371#ifdef CONFIG_LIVEPATCH
1372 mr r14,r7 /* remember old NIP */
1373#endif
Torsten Duwe15308662016-03-03 15:26:59 +11001374 /* Calculate ip from nip-4 into r3 for call below */
1375 subi r3, r7, MCOUNT_INSN_SIZE
1376
1377 /* Put the original return address in r4 as parent_ip */
1378 mr r4, r0
1379
1380 /* Save special regs */
1381 std r8, _MSR(r1)
1382 std r9, _CTR(r1)
1383 std r10, _XER(r1)
1384 std r11, _CCR(r1)
1385
1386 /* Load &pt_regs in r6 for call below */
1387 addi r6, r1 ,STACK_FRAME_OVERHEAD
1388
1389 /* ftrace_call(r3, r4, r5, r6) */
1390.globl ftrace_call
1391ftrace_call:
1392 bl ftrace_stub
1393 nop
1394
1395 /* Load ctr with the possibly modified NIP */
1396 ld r3, _NIP(r1)
1397 mtctr r3
Michael Ellerman85baa092016-03-24 22:04:05 +11001398#ifdef CONFIG_LIVEPATCH
1399 cmpd r14,r3 /* has NIP been altered? */
1400#endif
Torsten Duwe15308662016-03-03 15:26:59 +11001401
1402 /* Restore gprs */
Naveen N. Raoc76655f2017-06-01 16:18:16 +05301403 REST_GPR(0,r1)
1404 REST_10GPRS(2,r1)
1405 REST_10GPRS(12,r1)
1406 REST_10GPRS(22,r1)
Torsten Duwe15308662016-03-03 15:26:59 +11001407
1408 /* Restore callee's TOC */
1409 ld r2, 24(r1)
1410
1411 /* Pop our stack frame */
1412 addi r1, r1, SWITCH_FRAME_SIZE
1413
1414 /* Restore original LR for return to B */
1415 ld r0, LRSAVE(r1)
1416 mtlr r0
1417
Michael Ellerman85baa092016-03-24 22:04:05 +11001418#ifdef CONFIG_LIVEPATCH
1419 /* Based on the cmpd above, if the NIP was altered handle livepatch */
1420 bne- livepatch_handler
1421#endif
1422
Torsten Duwe15308662016-03-03 15:26:59 +11001423#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1424 stdu r1, -112(r1)
1425.globl ftrace_graph_call
1426ftrace_graph_call:
1427 b ftrace_graph_stub
1428_GLOBAL(ftrace_graph_stub)
1429 addi r1, r1, 112
1430#endif
1431
1432 ld r0,LRSAVE(r1) /* restore callee's lr at _mcount site */
1433 mtlr r0
1434 bctr /* jump after _mcount site */
1435#endif /* CC_USING_MPROFILE_KERNEL */
1436
Steven Rostedt4e491d12008-05-14 23:49:44 -04001437_GLOBAL(ftrace_stub)
1438 blr
Michael Ellerman85baa092016-03-24 22:04:05 +11001439
1440#ifdef CONFIG_LIVEPATCH
1441 /*
1442 * This function runs in the mcount context, between two functions. As
1443 * such it can only clobber registers which are volatile and used in
1444 * function linkage.
1445 *
1446 * We get here when a function A, calls another function B, but B has
1447 * been live patched with a new function C.
1448 *
1449 * On entry:
1450 * - we have no stack frame and can not allocate one
1451 * - LR points back to the original caller (in A)
1452 * - CTR holds the new NIP in C
1453 * - r0 & r12 are free
1454 *
1455 * r0 can't be used as the base register for a DS-form load or store, so
1456 * we temporarily shuffle r1 (stack pointer) into r0 and then put it back.
1457 */
1458livepatch_handler:
1459 CURRENT_THREAD_INFO(r12, r1)
1460
1461 /* Save stack pointer into r0 */
1462 mr r0, r1
1463
1464 /* Allocate 3 x 8 bytes */
1465 ld r1, TI_livepatch_sp(r12)
1466 addi r1, r1, 24
1467 std r1, TI_livepatch_sp(r12)
1468
1469 /* Save toc & real LR on livepatch stack */
1470 std r2, -24(r1)
1471 mflr r12
1472 std r12, -16(r1)
1473
1474 /* Store stack end marker */
1475 lis r12, STACK_END_MAGIC@h
1476 ori r12, r12, STACK_END_MAGIC@l
1477 std r12, -8(r1)
1478
1479 /* Restore real stack pointer */
1480 mr r1, r0
1481
1482 /* Put ctr in r12 for global entry and branch there */
1483 mfctr r12
1484 bctrl
1485
1486 /*
1487 * Now we are returning from the patched function to the original
1488 * caller A. We are free to use r0 and r12, and we can use r2 until we
1489 * restore it.
1490 */
1491
1492 CURRENT_THREAD_INFO(r12, r1)
1493
1494 /* Save stack pointer into r0 */
1495 mr r0, r1
1496
1497 ld r1, TI_livepatch_sp(r12)
1498
1499 /* Check stack marker hasn't been trashed */
1500 lis r2, STACK_END_MAGIC@h
1501 ori r2, r2, STACK_END_MAGIC@l
1502 ld r12, -8(r1)
15031: tdne r12, r2
1504 EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
1505
1506 /* Restore LR & toc from livepatch stack */
1507 ld r12, -16(r1)
1508 mtlr r12
1509 ld r2, -24(r1)
1510
1511 /* Pop livepatch stack frame */
1512 CURRENT_THREAD_INFO(r12, r0)
1513 subi r1, r1, 24
1514 std r1, TI_livepatch_sp(r12)
1515
1516 /* Restore real stack pointer */
1517 mr r1, r0
1518
1519 /* Return to original caller of live patched function */
1520 blr
1521#endif
1522
1523
Steven Rostedt4e491d12008-05-14 23:49:44 -04001524#else
Anton Blanchard5e666842014-04-04 09:06:33 +11001525_GLOBAL_TOC(_mcount)
Al Viro9445aa12016-01-13 23:33:46 -05001526EXPORT_SYMBOL(_mcount)
Steven Rostedt4e491d12008-05-14 23:49:44 -04001527 /* Taken from output of objdump from lib64/glibc */
1528 mflr r3
1529 ld r11, 0(r1)
1530 stdu r1, -112(r1)
1531 std r3, 128(r1)
1532 ld r4, 16(r11)
1533
Abhishek Sagar395a59d2008-06-21 23:47:27 +05301534 subi r3, r3, MCOUNT_INSN_SIZE
Steven Rostedt4e491d12008-05-14 23:49:44 -04001535 LOAD_REG_ADDR(r5,ftrace_trace_function)
1536 ld r5,0(r5)
1537 ld r5,0(r5)
1538 mtctr r5
1539 bctrl
Steven Rostedt4e491d12008-05-14 23:49:44 -04001540 nop
Steven Rostedt6794c782009-02-09 21:10:27 -08001541
1542
1543#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1544 b ftrace_graph_caller
1545#endif
Steven Rostedt4e491d12008-05-14 23:49:44 -04001546 ld r0, 128(r1)
1547 mtlr r0
1548 addi r1, r1, 112
1549_GLOBAL(ftrace_stub)
1550 blr
1551
Steven Rostedt6794c782009-02-09 21:10:27 -08001552#endif /* CONFIG_DYNAMIC_FTRACE */
1553
1554#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Torsten Duwe15308662016-03-03 15:26:59 +11001555#ifndef CC_USING_MPROFILE_KERNEL
Steven Rostedt46542882009-02-10 22:19:54 -08001556_GLOBAL(ftrace_graph_caller)
Steven Rostedt6794c782009-02-09 21:10:27 -08001557 /* load r4 with local address */
1558 ld r4, 128(r1)
1559 subi r4, r4, MCOUNT_INSN_SIZE
1560
Anton Blanchardb3c18722014-09-17 17:07:04 +10001561 /* Grab the LR out of the caller stack frame */
Steven Rostedt6794c782009-02-09 21:10:27 -08001562 ld r11, 112(r1)
Anton Blanchardb3c18722014-09-17 17:07:04 +10001563 ld r3, 16(r11)
Steven Rostedt6794c782009-02-09 21:10:27 -08001564
Anton Blanchardb1576fe2014-02-04 16:04:35 +11001565 bl prepare_ftrace_return
Steven Rostedt6794c782009-02-09 21:10:27 -08001566 nop
1567
Anton Blanchardb3c18722014-09-17 17:07:04 +10001568 /*
1569 * prepare_ftrace_return gives us the address we divert to.
1570 * Change the LR in the callers stack frame to this.
1571 */
1572 ld r11, 112(r1)
1573 std r3, 16(r11)
1574
Steven Rostedt6794c782009-02-09 21:10:27 -08001575 ld r0, 128(r1)
1576 mtlr r0
1577 addi r1, r1, 112
1578 blr
1579
Torsten Duwe15308662016-03-03 15:26:59 +11001580#else /* CC_USING_MPROFILE_KERNEL */
1581_GLOBAL(ftrace_graph_caller)
1582 /* with -mprofile-kernel, parameter regs are still alive at _mcount */
1583 std r10, 104(r1)
1584 std r9, 96(r1)
1585 std r8, 88(r1)
1586 std r7, 80(r1)
1587 std r6, 72(r1)
1588 std r5, 64(r1)
1589 std r4, 56(r1)
1590 std r3, 48(r1)
1591
1592 /* Save callee's TOC in the ABI compliant location */
1593 std r2, 24(r1)
1594 ld r2, PACATOC(r13) /* get kernel TOC in r2 */
1595
1596 mfctr r4 /* ftrace_caller has moved local addr here */
1597 std r4, 40(r1)
1598 mflr r3 /* ftrace_caller has restored LR from stack */
1599 subi r4, r4, MCOUNT_INSN_SIZE
1600
1601 bl prepare_ftrace_return
1602 nop
1603
1604 /*
1605 * prepare_ftrace_return gives us the address we divert to.
1606 * Change the LR to this.
1607 */
1608 mtlr r3
1609
1610 ld r0, 40(r1)
1611 mtctr r0
1612 ld r10, 104(r1)
1613 ld r9, 96(r1)
1614 ld r8, 88(r1)
1615 ld r7, 80(r1)
1616 ld r6, 72(r1)
1617 ld r5, 64(r1)
1618 ld r4, 56(r1)
1619 ld r3, 48(r1)
1620
1621 /* Restore callee's TOC */
1622 ld r2, 24(r1)
1623
1624 addi r1, r1, 112
1625 mflr r0
1626 std r0, LRSAVE(r1)
1627 bctr
1628#endif /* CC_USING_MPROFILE_KERNEL */
1629
Steven Rostedt6794c782009-02-09 21:10:27 -08001630_GLOBAL(return_to_handler)
1631 /* need to save return values */
1632 std r4, -32(r1)
1633 std r3, -24(r1)
1634 /* save TOC */
1635 std r2, -16(r1)
1636 std r31, -8(r1)
1637 mr r31, r1
1638 stdu r1, -112(r1)
1639
Steven Rostedtbb725342009-02-11 12:45:49 -08001640 /*
Anton Blanchard7d56c652014-09-17 17:07:03 +10001641 * We might be called from a module.
Steven Rostedtbb725342009-02-11 12:45:49 -08001642 * Switch to our TOC to run inside the core kernel.
1643 */
Steven Rostedtbe10ab12009-09-15 08:30:14 -07001644 ld r2, PACATOC(r13)
Steven Rostedt6794c782009-02-09 21:10:27 -08001645
Anton Blanchardb1576fe2014-02-04 16:04:35 +11001646 bl ftrace_return_to_handler
Steven Rostedt6794c782009-02-09 21:10:27 -08001647 nop
1648
1649 /* return value has real return address */
1650 mtlr r3
1651
1652 ld r1, 0(r1)
1653 ld r4, -32(r1)
1654 ld r3, -24(r1)
1655 ld r2, -16(r1)
1656 ld r31, -8(r1)
1657
1658 /* Jump back to real return address */
1659 blr
1660#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1661#endif /* CONFIG_FUNCTION_TRACER */