blob: 33d3251ecd37a257c2cb435a973e612738e0a216 [file] [log] [blame]
Ralf Baechle6f6c3c332011-05-19 09:21:33 +01001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2011 Wind River Systems,
7 * written by Ralf Baechle <ralf@linux-mips.org>
8 */
Kevin Cernekee16650102011-06-18 11:28:48 -07009#include <linux/compiler.h>
Paul Burtondb3fb452017-08-23 11:17:49 -070010#include <linux/elf-randomize.h>
Ralf Baechle6f6c3c332011-05-19 09:21:33 +010011#include <linux/errno.h>
12#include <linux/mm.h>
13#include <linux/mman.h>
Paul Gortmakerd9ba5772016-08-21 15:58:14 -040014#include <linux/export.h>
Jian Pengd0be89f2011-05-17 12:27:49 -070015#include <linux/personality.h>
Ralf Baechle6f6c3c332011-05-19 09:21:33 +010016#include <linux/random.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010017#include <linux/sched/signal.h>
Ingo Molnar01042602017-02-08 18:51:31 +010018#include <linux/sched/mm.h>
Ralf Baechle6f6c3c332011-05-19 09:21:33 +010019
20unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
Ralf Baechle6f6c3c332011-05-19 09:21:33 +010021EXPORT_SYMBOL(shm_align_mask);
22
Jian Pengd0be89f2011-05-17 12:27:49 -070023/* gap between mmap and stack */
24#define MIN_GAP (128*1024*1024UL)
Kevin Cernekee16650102011-06-18 11:28:48 -070025#define MAX_GAP ((TASK_SIZE)/6*5)
Jian Pengd0be89f2011-05-17 12:27:49 -070026
27static int mmap_is_legacy(void)
28{
29 if (current->personality & ADDR_COMPAT_LAYOUT)
30 return 1;
31
32 if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
33 return 1;
34
35 return sysctl_legacy_va_layout;
36}
37
38static unsigned long mmap_base(unsigned long rnd)
39{
40 unsigned long gap = rlimit(RLIMIT_STACK);
41
42 if (gap < MIN_GAP)
43 gap = MIN_GAP;
44 else if (gap > MAX_GAP)
45 gap = MAX_GAP;
46
47 return PAGE_ALIGN(TASK_SIZE - gap - rnd);
48}
49
Kevin Cernekee16650102011-06-18 11:28:48 -070050#define COLOUR_ALIGN(addr, pgoff) \
Ralf Baechle6f6c3c332011-05-19 09:21:33 +010051 ((((addr) + shm_align_mask) & ~shm_align_mask) + \
52 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
53
Jian Pengd0be89f2011-05-17 12:27:49 -070054enum mmap_allocation_direction {UP, DOWN};
55
Kevin Cernekee16650102011-06-18 11:28:48 -070056static unsigned long arch_get_unmapped_area_common(struct file *filp,
Jian Pengd0be89f2011-05-17 12:27:49 -070057 unsigned long addr0, unsigned long len, unsigned long pgoff,
58 unsigned long flags, enum mmap_allocation_direction dir)
Ralf Baechle6f6c3c332011-05-19 09:21:33 +010059{
Jian Pengd0be89f2011-05-17 12:27:49 -070060 struct mm_struct *mm = current->mm;
61 struct vm_area_struct *vma;
62 unsigned long addr = addr0;
Ralf Baechle6f6c3c332011-05-19 09:21:33 +010063 int do_color_align;
Michel Lespinasseb6661862012-12-11 16:02:06 -080064 struct vm_unmapped_area_info info;
Ralf Baechle6f6c3c332011-05-19 09:21:33 +010065
Jian Pengd0be89f2011-05-17 12:27:49 -070066 if (unlikely(len > TASK_SIZE))
Ralf Baechle6f6c3c332011-05-19 09:21:33 +010067 return -ENOMEM;
68
69 if (flags & MAP_FIXED) {
Jian Pengd0be89f2011-05-17 12:27:49 -070070 /* Even MAP_FIXED mappings must reside within TASK_SIZE */
Ralf Baechle6f6c3c332011-05-19 09:21:33 +010071 if (TASK_SIZE - len < addr)
72 return -EINVAL;
73
74 /*
75 * We do not accept a shared mapping if it would violate
76 * cache aliasing constraints.
77 */
78 if ((flags & MAP_SHARED) &&
79 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
80 return -EINVAL;
81 return addr;
82 }
83
84 do_color_align = 0;
85 if (filp || (flags & MAP_SHARED))
86 do_color_align = 1;
Jian Pengd0be89f2011-05-17 12:27:49 -070087
88 /* requesting a specific address */
Ralf Baechle6f6c3c332011-05-19 09:21:33 +010089 if (addr) {
90 if (do_color_align)
91 addr = COLOUR_ALIGN(addr, pgoff);
92 else
93 addr = PAGE_ALIGN(addr);
Ralf Baechle6f6c3c332011-05-19 09:21:33 +010094
Jian Pengd0be89f2011-05-17 12:27:49 -070095 vma = find_vma(mm, addr);
96 if (TASK_SIZE - len >= addr &&
Hugh Dickins1be71072017-06-19 04:03:24 -070097 (!vma || addr + len <= vm_start_gap(vma)))
Ralf Baechle6f6c3c332011-05-19 09:21:33 +010098 return addr;
Ralf Baechle6f6c3c332011-05-19 09:21:33 +010099 }
Jian Pengd0be89f2011-05-17 12:27:49 -0700100
Michel Lespinasseb6661862012-12-11 16:02:06 -0800101 info.length = len;
102 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
103 info.align_offset = pgoff << PAGE_SHIFT;
Jian Pengd0be89f2011-05-17 12:27:49 -0700104
Michel Lespinasseb6661862012-12-11 16:02:06 -0800105 if (dir == DOWN) {
106 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
107 info.low_limit = PAGE_SIZE;
108 info.high_limit = mm->mmap_base;
109 addr = vm_unmapped_area(&info);
Jian Pengd0be89f2011-05-17 12:27:49 -0700110
Michel Lespinasseb6661862012-12-11 16:02:06 -0800111 if (!(addr & ~PAGE_MASK))
112 return addr;
Jian Pengd0be89f2011-05-17 12:27:49 -0700113
Jian Pengd0be89f2011-05-17 12:27:49 -0700114 /*
115 * A failed mmap() very likely causes application failure,
116 * so fall back to the bottom-up function here. This scenario
117 * can happen with large stack limits and large mmap()
118 * allocations.
119 */
Jian Pengd0be89f2011-05-17 12:27:49 -0700120 }
Michel Lespinasseb6661862012-12-11 16:02:06 -0800121
122 info.flags = 0;
123 info.low_limit = mm->mmap_base;
124 info.high_limit = TASK_SIZE;
125 return vm_unmapped_area(&info);
Jian Pengd0be89f2011-05-17 12:27:49 -0700126}
127
128unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
129 unsigned long len, unsigned long pgoff, unsigned long flags)
130{
Kevin Cernekee16650102011-06-18 11:28:48 -0700131 return arch_get_unmapped_area_common(filp,
Jian Pengd0be89f2011-05-17 12:27:49 -0700132 addr0, len, pgoff, flags, UP);
133}
134
135/*
136 * There is no need to export this but sched.h declares the function as
137 * extern so making it static here results in an error.
138 */
139unsigned long arch_get_unmapped_area_topdown(struct file *filp,
140 unsigned long addr0, unsigned long len, unsigned long pgoff,
141 unsigned long flags)
142{
Kevin Cernekee16650102011-06-18 11:28:48 -0700143 return arch_get_unmapped_area_common(filp,
Jian Pengd0be89f2011-05-17 12:27:49 -0700144 addr0, len, pgoff, flags, DOWN);
Ralf Baechle6f6c3c332011-05-19 09:21:33 +0100145}
146
Kees Cook2b68f6c2015-04-14 15:48:00 -0700147unsigned long arch_mmap_rnd(void)
Kees Cook1f0569d2015-04-14 15:47:51 -0700148{
149 unsigned long rnd;
150
Matt Redfearn109c32f2016-11-24 17:32:45 +0000151#ifdef CONFIG_COMPAT
Kees Cook1f0569d2015-04-14 15:47:51 -0700152 if (TASK_IS_32BIT_ADDR)
Matt Redfearn109c32f2016-11-24 17:32:45 +0000153 rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
Kees Cook1f0569d2015-04-14 15:47:51 -0700154 else
Matt Redfearn109c32f2016-11-24 17:32:45 +0000155#endif /* CONFIG_COMPAT */
156 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
Kees Cook1f0569d2015-04-14 15:47:51 -0700157
Matt Redfearn109c32f2016-11-24 17:32:45 +0000158 return rnd << PAGE_SHIFT;
Kees Cook1f0569d2015-04-14 15:47:51 -0700159}
160
Ralf Baechle6f6c3c332011-05-19 09:21:33 +0100161void arch_pick_mmap_layout(struct mm_struct *mm)
162{
163 unsigned long random_factor = 0UL;
164
Kees Cook1f0569d2015-04-14 15:47:51 -0700165 if (current->flags & PF_RANDOMIZE)
Kees Cook2b68f6c2015-04-14 15:48:00 -0700166 random_factor = arch_mmap_rnd();
Ralf Baechle6f6c3c332011-05-19 09:21:33 +0100167
Jian Pengd0be89f2011-05-17 12:27:49 -0700168 if (mmap_is_legacy()) {
169 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
170 mm->get_unmapped_area = arch_get_unmapped_area;
Jian Pengd0be89f2011-05-17 12:27:49 -0700171 } else {
172 mm->mmap_base = mmap_base(random_factor);
173 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
Jian Pengd0be89f2011-05-17 12:27:49 -0700174 }
Ralf Baechle6f6c3c332011-05-19 09:21:33 +0100175}
176
177static inline unsigned long brk_rnd(void)
178{
Daniel Cashman5ef11c32016-02-26 15:19:37 -0800179 unsigned long rnd = get_random_long();
Ralf Baechle6f6c3c332011-05-19 09:21:33 +0100180
181 rnd = rnd << PAGE_SHIFT;
182 /* 8MB for 32bit, 256MB for 64bit */
183 if (TASK_IS_32BIT_ADDR)
184 rnd = rnd & 0x7ffffful;
185 else
186 rnd = rnd & 0xffffffful;
187
188 return rnd;
189}
190
191unsigned long arch_randomize_brk(struct mm_struct *mm)
192{
193 unsigned long base = mm->brk;
194 unsigned long ret;
195
196 ret = PAGE_ALIGN(base + brk_rnd());
197
198 if (ret < mm->brk)
199 return mm->brk;
200
201 return ret;
202}
Steven Rostedt196897a2013-01-25 17:13:15 +0000203
204int __virt_addr_valid(const volatile void *kaddr)
205{
206 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
207}
208EXPORT_SYMBOL_GPL(__virt_addr_valid);