blob: 0a66fa1db63a2890b2761d77f269a29450ffaab5 [file] [log] [blame]
Catalin Marinasb3901d52012-03-05 11:49:28 +00001/*
2 * Based on arch/arm/kernel/process.c
3 *
4 * Original Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
6 * Copyright (C) 2012 ARM Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <stdarg.h>
22
AKASHI Takahirofd92d4a2014-04-30 10:51:32 +010023#include <linux/compat.h>
Ard Biesheuvel60c0d452015-03-06 15:49:24 +010024#include <linux/efi.h>
Catalin Marinasb3901d52012-03-05 11:49:28 +000025#include <linux/export.h>
26#include <linux/sched.h>
27#include <linux/kernel.h>
28#include <linux/mm.h>
29#include <linux/stddef.h>
30#include <linux/unistd.h>
31#include <linux/user.h>
32#include <linux/delay.h>
33#include <linux/reboot.h>
34#include <linux/interrupt.h>
35#include <linux/kallsyms.h>
36#include <linux/init.h>
37#include <linux/cpu.h>
38#include <linux/elfcore.h>
39#include <linux/pm.h>
40#include <linux/tick.h>
41#include <linux/utsname.h>
42#include <linux/uaccess.h>
43#include <linux/random.h>
44#include <linux/hw_breakpoint.h>
45#include <linux/personality.h>
46#include <linux/notifier.h>
Jisheng Zhang096b3222015-09-16 22:23:21 +080047#include <trace/events/power.h>
Mark Rutlandb51386b2016-11-03 20:23:13 +000048#include <linux/percpu.h>
Catalin Marinasb3901d52012-03-05 11:49:28 +000049
James Morse57f49592016-02-05 14:58:48 +000050#include <asm/alternative.h>
Catalin Marinasb3901d52012-03-05 11:49:28 +000051#include <asm/compat.h>
52#include <asm/cacheflush.h>
James Morsed0854412016-10-18 11:27:48 +010053#include <asm/exec.h>
Will Deaconec45d1c2013-01-17 12:31:45 +000054#include <asm/fpsimd.h>
55#include <asm/mmu_context.h>
Catalin Marinasb3901d52012-03-05 11:49:28 +000056#include <asm/processor.h>
57#include <asm/stacktrace.h>
Catalin Marinasb3901d52012-03-05 11:49:28 +000058
Laura Abbottc0c264a2014-06-25 23:55:03 +010059#ifdef CONFIG_CC_STACKPROTECTOR
60#include <linux/stackprotector.h>
61unsigned long __stack_chk_guard __read_mostly;
62EXPORT_SYMBOL(__stack_chk_guard);
63#endif
64
Catalin Marinasb3901d52012-03-05 11:49:28 +000065/*
66 * Function pointers to optional machine specific functions
67 */
68void (*pm_power_off)(void);
69EXPORT_SYMBOL_GPL(pm_power_off);
70
Catalin Marinasb0946fc2013-07-23 11:05:10 +010071void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
Catalin Marinasb3901d52012-03-05 11:49:28 +000072
Catalin Marinasb3901d52012-03-05 11:49:28 +000073/*
74 * This is our default idle handler.
75 */
Thomas Gleixner00872982013-03-21 22:49:39 +010076void arch_cpu_idle(void)
Catalin Marinasb3901d52012-03-05 11:49:28 +000077{
78 /*
79 * This should do all the clock switching and wait for interrupt
80 * tricks
81 */
Jisheng Zhang096b3222015-09-16 22:23:21 +080082 trace_cpu_idle_rcuidle(1, smp_processor_id());
Nicolas Pitre69905662014-02-17 10:59:30 -050083 cpu_do_idle();
84 local_irq_enable();
Jisheng Zhang096b3222015-09-16 22:23:21 +080085 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
Catalin Marinasb3901d52012-03-05 11:49:28 +000086}
87
Patrick Cain699c8982014-06-13 15:18:58 -070088void arch_cpu_idle_enter(void)
89{
90 idle_notifier_call_chain(IDLE_START);
91}
92
93void arch_cpu_idle_exit(void)
94{
95 idle_notifier_call_chain(IDLE_END);
96}
97
Mark Rutland9327e2c2013-10-24 20:30:18 +010098#ifdef CONFIG_HOTPLUG_CPU
99void arch_cpu_idle_dead(void)
100{
101 cpu_die();
102}
103#endif
104
Arun KS90f51a02014-05-07 02:41:22 +0100105/*
106 * Called by kexec, immediately prior to machine_kexec().
107 *
108 * This must completely disable all secondary CPUs; simply causing those CPUs
109 * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
110 * kexec'd kernel to use any and all RAM as it sees fit, without having to
111 * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
112 * functionality embodied in disable_nonboot_cpus() to achieve this.
113 */
Catalin Marinasb3901d52012-03-05 11:49:28 +0000114void machine_shutdown(void)
115{
Arun KS90f51a02014-05-07 02:41:22 +0100116 disable_nonboot_cpus();
Catalin Marinasb3901d52012-03-05 11:49:28 +0000117}
118
Arun KS90f51a02014-05-07 02:41:22 +0100119/*
120 * Halting simply requires that the secondary CPUs stop performing any
121 * activity (executing tasks, handling interrupts). smp_send_stop()
122 * achieves this.
123 */
Catalin Marinasb3901d52012-03-05 11:49:28 +0000124void machine_halt(void)
125{
Arun KSb9acc492014-05-07 02:41:23 +0100126 local_irq_disable();
Arun KS90f51a02014-05-07 02:41:22 +0100127 smp_send_stop();
Catalin Marinasb3901d52012-03-05 11:49:28 +0000128 while (1);
129}
130
Arun KS90f51a02014-05-07 02:41:22 +0100131/*
132 * Power-off simply requires that the secondary CPUs stop performing any
133 * activity (executing tasks, handling interrupts). smp_send_stop()
134 * achieves this. When the system power is turned off, it will take all CPUs
135 * with it.
136 */
Catalin Marinasb3901d52012-03-05 11:49:28 +0000137void machine_power_off(void)
138{
Arun KSb9acc492014-05-07 02:41:23 +0100139 local_irq_disable();
Arun KS90f51a02014-05-07 02:41:22 +0100140 smp_send_stop();
Catalin Marinasb3901d52012-03-05 11:49:28 +0000141 if (pm_power_off)
142 pm_power_off();
143}
144
Arun KS90f51a02014-05-07 02:41:22 +0100145/*
146 * Restart requires that the secondary CPUs stop performing any activity
Mark Rutland68234df2015-04-20 10:24:35 +0100147 * while the primary CPU resets the system. Systems with multiple CPUs must
Arun KS90f51a02014-05-07 02:41:22 +0100148 * provide a HW restart implementation, to ensure that all CPUs reset at once.
149 * This is required so that any code running after reset on the primary CPU
150 * doesn't have to co-ordinate with other CPUs to ensure they aren't still
151 * executing pre-reset code, and using RAM that the primary CPU's code wishes
152 * to use. Implementing such co-ordination would be essentially impossible.
153 */
Catalin Marinasb3901d52012-03-05 11:49:28 +0000154void machine_restart(char *cmd)
155{
Catalin Marinasb3901d52012-03-05 11:49:28 +0000156 /* Disable interrupts first */
157 local_irq_disable();
Arun KSb9acc492014-05-07 02:41:23 +0100158 smp_send_stop();
Catalin Marinasb3901d52012-03-05 11:49:28 +0000159
Ard Biesheuvel60c0d452015-03-06 15:49:24 +0100160 /*
161 * UpdateCapsule() depends on the system being reset via
162 * ResetSystem().
163 */
164 if (efi_enabled(EFI_RUNTIME_SERVICES))
165 efi_reboot(reboot_mode, NULL);
166
Catalin Marinasb3901d52012-03-05 11:49:28 +0000167 /* Now call the architecture specific reboot code. */
Catalin Marinasaa1e8ec2013-02-28 18:14:37 +0000168 if (arm_pm_restart)
Marc Zyngierff701302013-07-11 12:13:00 +0100169 arm_pm_restart(reboot_mode, cmd);
Guenter Roeck1c7ffc32014-09-26 00:03:16 +0000170 else
171 do_kernel_restart(cmd);
Catalin Marinasb3901d52012-03-05 11:49:28 +0000172
173 /*
174 * Whoops - the architecture was unable to reboot.
175 */
176 printk("Reboot failed -- System halted\n");
177 while (1);
178}
179
Greg Hackmannf1ec6c82014-09-09 17:36:05 -0700180/*
181 * dump a block of kernel memory from around the given address
182 */
183static void show_data(unsigned long addr, int nbytes, const char *name)
184{
185 int i, j;
186 int nlines;
187 u32 *p;
188
189 /*
190 * don't attempt to dump non-kernel addresses or
191 * values that are probably just small negative numbers
192 */
Runmin Wang52642102017-02-10 11:59:33 -0800193 if (addr < KIMAGE_VADDR || addr > -256UL)
Greg Hackmannf1ec6c82014-09-09 17:36:05 -0700194 return;
195
196 printk("\n%s: %#lx:\n", name, addr);
197
198 /*
199 * round address down to a 32 bit boundary
200 * and always dump a multiple of 32 bytes
201 */
202 p = (u32 *)(addr & ~(sizeof(u32) - 1));
203 nbytes += (addr & (sizeof(u32) - 1));
204 nlines = (nbytes + 31) / 32;
205
206
207 for (i = 0; i < nlines; i++) {
208 /*
209 * just display low 16 bits of address to keep
210 * each line of the dump < 80 characters
211 */
212 printk("%04lx ", (unsigned long)p & 0xffff);
213 for (j = 0; j < 8; j++) {
214 u32 data;
215 if (probe_kernel_address(p, data)) {
Ji Zhang13b40d32018-01-26 15:04:26 +0800216 pr_cont(" ********");
Greg Hackmannf1ec6c82014-09-09 17:36:05 -0700217 } else {
Trilok Soniea8a5ce2016-05-01 15:38:50 -0700218 pr_cont(" %08x", data);
Greg Hackmannf1ec6c82014-09-09 17:36:05 -0700219 }
220 ++p;
221 }
Trilok Soniea8a5ce2016-05-01 15:38:50 -0700222 pr_cont("\n");
Greg Hackmannf1ec6c82014-09-09 17:36:05 -0700223 }
224}
225
226static void show_extra_register_data(struct pt_regs *regs, int nbytes)
227{
228 mm_segment_t fs;
Greg Hackmannf1ec6c82014-09-09 17:36:05 -0700229
230 fs = get_fs();
231 set_fs(KERNEL_DS);
232 show_data(regs->pc - nbytes, nbytes * 2, "PC");
233 show_data(regs->regs[30] - nbytes, nbytes * 2, "LR");
234 show_data(regs->sp - nbytes, nbytes * 2, "SP");
Greg Hackmannf1ec6c82014-09-09 17:36:05 -0700235 set_fs(fs);
236}
237
Catalin Marinasb3901d52012-03-05 11:49:28 +0000238void __show_regs(struct pt_regs *regs)
239{
Catalin Marinas6ca68e82013-09-17 18:49:46 +0100240 int i, top_reg;
241 u64 lr, sp;
242
243 if (compat_user_mode(regs)) {
244 lr = regs->compat_lr;
245 sp = regs->compat_sp;
246 top_reg = 12;
247 } else {
248 lr = regs->regs[30];
249 sp = regs->sp;
250 top_reg = 29;
251 }
Catalin Marinasb3901d52012-03-05 11:49:28 +0000252
Tejun Heoa43cb952013-04-30 15:27:17 -0700253 show_regs_print_info(KERN_DEFAULT);
Catalin Marinasb3901d52012-03-05 11:49:28 +0000254 print_symbol("PC is at %s\n", instruction_pointer(regs));
Catalin Marinas6ca68e82013-09-17 18:49:46 +0100255 print_symbol("LR is at %s\n", lr);
Catalin Marinasb3901d52012-03-05 11:49:28 +0000256 printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n",
Catalin Marinas6ca68e82013-09-17 18:49:46 +0100257 regs->pc, lr, regs->pstate);
258 printk("sp : %016llx\n", sp);
Mark Rutlanddb4b0712016-10-20 12:23:16 +0100259
260 i = top_reg;
261
262 while (i >= 0) {
Catalin Marinasb3901d52012-03-05 11:49:28 +0000263 printk("x%-2d: %016llx ", i, regs->regs[i]);
Mark Rutlanddb4b0712016-10-20 12:23:16 +0100264 i--;
265
266 if (i % 2 == 0) {
267 pr_cont("x%-2d: %016llx ", i, regs->regs[i]);
268 i--;
269 }
270
271 pr_cont("\n");
Catalin Marinasb3901d52012-03-05 11:49:28 +0000272 }
Greg Hackmannf1ec6c82014-09-09 17:36:05 -0700273 if (!user_mode(regs))
Trilok Soni0aa47212016-09-20 14:55:35 -0700274 show_extra_register_data(regs, 64);
Catalin Marinasb3901d52012-03-05 11:49:28 +0000275}
276
277void show_regs(struct pt_regs * regs)
278{
Catalin Marinasb3901d52012-03-05 11:49:28 +0000279 __show_regs(regs);
280}
281
Will Deaconeb35bdd2014-09-11 14:38:16 +0100282static void tls_thread_flush(void)
283{
Mark Rutlandadf75892016-09-08 13:55:38 +0100284 write_sysreg(0, tpidr_el0);
Will Deaconeb35bdd2014-09-11 14:38:16 +0100285
286 if (is_compat_task()) {
287 current->thread.tp_value = 0;
288
289 /*
290 * We need to ensure ordering between the shadow state and the
291 * hardware state, so that we don't corrupt the hardware state
292 * with a stale shadow state during context switch.
293 */
294 barrier();
Mark Rutlandadf75892016-09-08 13:55:38 +0100295 write_sysreg(0, tpidrro_el0);
Will Deaconeb35bdd2014-09-11 14:38:16 +0100296 }
297}
298
Catalin Marinasb3901d52012-03-05 11:49:28 +0000299void flush_thread(void)
300{
301 fpsimd_flush_thread();
Will Deaconeb35bdd2014-09-11 14:38:16 +0100302 tls_thread_flush();
Catalin Marinasb3901d52012-03-05 11:49:28 +0000303 flush_ptrace_hw_breakpoint(current);
304}
305
306void release_thread(struct task_struct *dead_task)
307{
308}
309
310int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
311{
Janet Liu6eb6c802015-06-11 12:04:32 +0800312 if (current->mm)
313 fpsimd_preserve_current_state();
Catalin Marinasb3901d52012-03-05 11:49:28 +0000314 *dst = *src;
315 return 0;
316}
317
318asmlinkage void ret_from_fork(void) asm("ret_from_fork");
319
320int copy_thread(unsigned long clone_flags, unsigned long stack_start,
Al Viroafa86fc2012-10-22 22:51:14 -0400321 unsigned long stk_sz, struct task_struct *p)
Catalin Marinasb3901d52012-03-05 11:49:28 +0000322{
323 struct pt_regs *childregs = task_pt_regs(p);
Catalin Marinasb3901d52012-03-05 11:49:28 +0000324
Catalin Marinasb3901d52012-03-05 11:49:28 +0000325 memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
Catalin Marinasb3901d52012-03-05 11:49:28 +0000326
Dave Martina3922412017-12-05 14:56:42 +0000327 /*
328 * In case p was allocated the same task_struct pointer as some
329 * other recently-exited task, make sure p is disassociated from
330 * any cpu that may have run that now-exited task recently.
331 * Otherwise we could erroneously skip reloading the FPSIMD
332 * registers for p.
333 */
334 fpsimd_flush_task_state(p);
335
Al Viro9ac08002012-10-21 15:56:52 -0400336 if (likely(!(p->flags & PF_KTHREAD))) {
337 *childregs = *current_pt_regs();
Catalin Marinasc34501d2012-10-05 12:31:20 +0100338 childregs->regs[0] = 0;
Will Deacond00a3812015-05-27 15:39:40 +0100339
340 /*
341 * Read the current TLS pointer from tpidr_el0 as it may be
342 * out-of-sync with the saved value.
343 */
Mark Rutlandadf75892016-09-08 13:55:38 +0100344 *task_user_tls(p) = read_sysreg(tpidr_el0);
Will Deacond00a3812015-05-27 15:39:40 +0100345
346 if (stack_start) {
347 if (is_compat_thread(task_thread_info(p)))
Al Viroe0fd18c2012-10-18 00:55:54 -0400348 childregs->compat_sp = stack_start;
Will Deacond00a3812015-05-27 15:39:40 +0100349 else
Al Viroe0fd18c2012-10-18 00:55:54 -0400350 childregs->sp = stack_start;
Catalin Marinasc34501d2012-10-05 12:31:20 +0100351 }
Will Deacond00a3812015-05-27 15:39:40 +0100352
Catalin Marinasc34501d2012-10-05 12:31:20 +0100353 /*
354 * If a TLS pointer was passed to clone (4th argument), use it
355 * for the new thread.
356 */
357 if (clone_flags & CLONE_SETTLS)
Will Deacond00a3812015-05-27 15:39:40 +0100358 p->thread.tp_value = childregs->regs[3];
Catalin Marinasc34501d2012-10-05 12:31:20 +0100359 } else {
360 memset(childregs, 0, sizeof(struct pt_regs));
361 childregs->pstate = PSR_MODE_EL1h;
James Morse57f49592016-02-05 14:58:48 +0000362 if (IS_ENABLED(CONFIG_ARM64_UAO) &&
Suzuki K Poulosefe64d7d2016-11-08 13:56:20 +0000363 cpus_have_const_cap(ARM64_HAS_UAO))
James Morse57f49592016-02-05 14:58:48 +0000364 childregs->pstate |= PSR_UAO_BIT;
Catalin Marinasc34501d2012-10-05 12:31:20 +0100365 p->thread.cpu_context.x19 = stack_start;
366 p->thread.cpu_context.x20 = stk_sz;
367 }
368 p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
369 p->thread.cpu_context.sp = (unsigned long)childregs;
Catalin Marinasb3901d52012-03-05 11:49:28 +0000370
371 ptrace_hw_copy_thread(p);
372
373 return 0;
374}
375
376static void tls_thread_switch(struct task_struct *next)
377{
Will Deaconc2074132017-11-14 14:33:28 +0000378 unsigned long tpidr;
Catalin Marinasb3901d52012-03-05 11:49:28 +0000379
Mark Rutlandadf75892016-09-08 13:55:38 +0100380 tpidr = read_sysreg(tpidr_el0);
Will Deacond00a3812015-05-27 15:39:40 +0100381 *task_user_tls(current) = tpidr;
Catalin Marinasb3901d52012-03-05 11:49:28 +0000382
Will Deaconc2074132017-11-14 14:33:28 +0000383 if (is_compat_thread(task_thread_info(next)))
384 write_sysreg(next->thread.tp_value, tpidrro_el0);
385 else if (!arm64_kernel_unmapped_at_el0())
386 write_sysreg(0, tpidrro_el0);
Catalin Marinasb3901d52012-03-05 11:49:28 +0000387
Will Deaconc2074132017-11-14 14:33:28 +0000388 write_sysreg(*task_user_tls(next), tpidr_el0);
Catalin Marinasb3901d52012-03-05 11:49:28 +0000389}
390
James Morse57f49592016-02-05 14:58:48 +0000391/* Restore the UAO state depending on next's addr_limit */
James Morsed0854412016-10-18 11:27:48 +0100392void uao_thread_switch(struct task_struct *next)
James Morse57f49592016-02-05 14:58:48 +0000393{
Catalin Marinase9506312016-02-18 15:50:04 +0000394 if (IS_ENABLED(CONFIG_ARM64_UAO)) {
395 if (task_thread_info(next)->addr_limit == KERNEL_DS)
396 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
397 else
398 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO));
399 }
James Morse57f49592016-02-05 14:58:48 +0000400}
401
Catalin Marinasb3901d52012-03-05 11:49:28 +0000402/*
Mark Rutlandb51386b2016-11-03 20:23:13 +0000403 * We store our current task in sp_el0, which is clobbered by userspace. Keep a
404 * shadow copy so that we can restore this upon entry from userspace.
405 *
406 * This is *only* for exception entry from EL0, and is not valid until we
407 * __switch_to() a user task.
408 */
409DEFINE_PER_CPU(struct task_struct *, __entry_task);
410
411static void entry_task_switch(struct task_struct *next)
412{
413 __this_cpu_write(__entry_task, next);
414}
415
416/*
Catalin Marinasb3901d52012-03-05 11:49:28 +0000417 * Thread switching.
418 */
419struct task_struct *__switch_to(struct task_struct *prev,
420 struct task_struct *next)
421{
422 struct task_struct *last;
423
424 fpsimd_thread_switch(next);
425 tls_thread_switch(next);
426 hw_breakpoint_thread_switch(next);
Christopher Covington33257322013-04-03 19:01:01 +0100427 contextidr_thread_switch(next);
Mark Rutlandb51386b2016-11-03 20:23:13 +0000428 entry_task_switch(next);
James Morse57f49592016-02-05 14:58:48 +0000429 uao_thread_switch(next);
Catalin Marinasb3901d52012-03-05 11:49:28 +0000430
Catalin Marinas5108c672013-04-24 14:47:02 +0100431 /*
432 * Complete any pending TLB or cache maintenance on this CPU in case
433 * the thread migrates to a different CPU.
434 */
Will Deacon98f76852014-05-02 16:24:10 +0100435 dsb(ish);
Catalin Marinasb3901d52012-03-05 11:49:28 +0000436
437 /* the actual thread switch */
438 last = cpu_switch_to(prev, next);
439
440 return last;
441}
442
Catalin Marinasb3901d52012-03-05 11:49:28 +0000443unsigned long get_wchan(struct task_struct *p)
444{
445 struct stackframe frame;
Mark Rutlandbf9ef242016-11-03 20:23:08 +0000446 unsigned long stack_page, ret = 0;
Catalin Marinasb3901d52012-03-05 11:49:28 +0000447 int count = 0;
448 if (!p || p == current || p->state == TASK_RUNNING)
449 return 0;
450
Mark Rutlandbf9ef242016-11-03 20:23:08 +0000451 stack_page = (unsigned long)try_get_task_stack(p);
452 if (!stack_page)
453 return 0;
454
Catalin Marinasb3901d52012-03-05 11:49:28 +0000455 frame.fp = thread_saved_fp(p);
456 frame.sp = thread_saved_sp(p);
457 frame.pc = thread_saved_pc(p);
AKASHI Takahiro20380bb2015-12-15 17:33:41 +0900458#ifdef CONFIG_FUNCTION_GRAPH_TRACER
459 frame.graph = p->curr_ret_stack;
460#endif
Catalin Marinasb3901d52012-03-05 11:49:28 +0000461 do {
Konstantin Khlebnikov408c3652013-12-05 13:30:10 +0000462 if (frame.sp < stack_page ||
463 frame.sp >= stack_page + THREAD_SIZE ||
AKASHI Takahirofe13f952015-12-15 17:33:40 +0900464 unwind_frame(p, &frame))
Mark Rutlandbf9ef242016-11-03 20:23:08 +0000465 goto out;
466 if (!in_sched_functions(frame.pc)) {
467 ret = frame.pc;
468 goto out;
469 }
Catalin Marinasb3901d52012-03-05 11:49:28 +0000470 } while (count ++ < 16);
Mark Rutlandbf9ef242016-11-03 20:23:08 +0000471
472out:
473 put_task_stack(p);
474 return ret;
Catalin Marinasb3901d52012-03-05 11:49:28 +0000475}
476
477unsigned long arch_align_stack(unsigned long sp)
478{
479 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
480 sp -= get_random_int() & ~PAGE_MASK;
481 return sp & ~0xf;
482}
483
Catalin Marinasb3901d52012-03-05 11:49:28 +0000484unsigned long arch_randomize_brk(struct mm_struct *mm)
485{
Kees Cook61462c82016-05-10 10:55:49 -0700486 if (is_compat_task())
Jason Cooperfa5114c2016-10-11 13:54:02 -0700487 return randomize_page(mm->brk, 0x02000000);
Kees Cook61462c82016-05-10 10:55:49 -0700488 else
Jason Cooperfa5114c2016-10-11 13:54:02 -0700489 return randomize_page(mm->brk, 0x40000000);
Catalin Marinasb3901d52012-03-05 11:49:28 +0000490}