blob: b68af0564a423cf757d9b3c5f2522520c7c306b1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * flexible mmap layout support
3 *
4 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
5 * All Rights Reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 *
22 * Started by Ingo Molnar <mingo@elte.hu>
23 */
24
25#include <linux/personality.h>
26#include <linux/mm.h>
Martin Schwidefsky638ad342011-10-30 15:17:13 +010027#include <linux/mman.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
Heiko Carstensdf1ca532011-01-12 09:55:27 +010029#include <linux/random.h>
Heiko Carstens048cd4e2012-02-27 10:01:52 +010030#include <linux/compat.h>
Martin Schwidefsky1f6b83e2015-01-14 17:51:17 +010031#include <linux/security.h>
Martin Schwidefsky6252d702008-02-09 18:24:37 +010032#include <asm/pgalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Martin Schwidefsky1f6b83e2015-01-14 17:51:17 +010034unsigned long mmap_rnd_mask;
Heiko Carstens3ddb1b72015-03-16 12:44:10 +010035static unsigned long mmap_align_mask;
Martin Schwidefsky1f6b83e2015-01-14 17:51:17 +010036
Heiko Carstens9046e402011-01-12 09:55:22 +010037static unsigned long stack_maxrandom_size(void)
38{
39 if (!(current->flags & PF_RANDOMIZE))
40 return 0;
41 if (current->personality & ADDR_NO_RANDOMIZE)
42 return 0;
43 return STACK_RND_MASK << PAGE_SHIFT;
44}
45
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/*
47 * Top of mmap area (just below the process stack).
48 *
Heiko Carstens9e78a132011-01-12 09:55:23 +010049 * Leave at least a ~32 MB hole.
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 */
Heiko Carstens9e78a132011-01-12 09:55:23 +010051#define MIN_GAP (32*1024*1024)
Martin Schwidefskyf481bfa2009-03-18 13:27:36 +010052#define MAX_GAP (STACK_TOP/6*5)
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
Heiko Carstens1060f622011-01-12 09:55:26 +010054static inline int mmap_is_legacy(void)
55{
56 if (current->personality & ADDR_COMPAT_LAYOUT)
57 return 1;
58 if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
59 return 1;
60 return sysctl_legacy_va_layout;
61}
62
Heiko Carstensdf1ca532011-01-12 09:55:27 +010063static unsigned long mmap_rnd(void)
64{
65 if (!(current->flags & PF_RANDOMIZE))
66 return 0;
Martin Schwidefsky1f6b83e2015-01-14 17:51:17 +010067 if (is_32bit_task())
68 return (get_random_int() & 0x7ff) << PAGE_SHIFT;
69 else
70 return (get_random_int() & mmap_rnd_mask) << PAGE_SHIFT;
Heiko Carstensdf1ca532011-01-12 09:55:27 +010071}
72
Heiko Carstens7aba8422013-11-12 15:07:55 -080073static unsigned long mmap_base_legacy(void)
74{
75 return TASK_UNMAPPED_BASE + mmap_rnd();
76}
77
Linus Torvalds1da177e2005-04-16 15:20:36 -070078static inline unsigned long mmap_base(void)
79{
Jiri Slabya58c26b2010-01-13 20:44:33 +010080 unsigned long gap = rlimit(RLIMIT_STACK);
Linus Torvalds1da177e2005-04-16 15:20:36 -070081
82 if (gap < MIN_GAP)
83 gap = MIN_GAP;
84 else if (gap > MAX_GAP)
85 gap = MAX_GAP;
Heiko Carstensdf1ca532011-01-12 09:55:27 +010086 gap &= PAGE_MASK;
87 return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070088}
89
Martin Schwidefsky1f6b83e2015-01-14 17:51:17 +010090unsigned long
91arch_get_unmapped_area(struct file *filp, unsigned long addr,
92 unsigned long len, unsigned long pgoff, unsigned long flags)
93{
94 struct mm_struct *mm = current->mm;
95 struct vm_area_struct *vma;
96 struct vm_unmapped_area_info info;
97 int do_color_align;
98
99 if (len > TASK_SIZE - mmap_min_addr)
100 return -ENOMEM;
101
102 if (flags & MAP_FIXED)
103 return addr;
104
105 if (addr) {
106 addr = PAGE_ALIGN(addr);
107 vma = find_vma(mm, addr);
108 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
109 (!vma || addr + len <= vma->vm_start))
110 return addr;
111 }
112
113 do_color_align = 0;
114 if (filp || (flags & MAP_SHARED))
115 do_color_align = !is_32bit_task();
116
117 info.flags = 0;
118 info.length = len;
119 info.low_limit = mm->mmap_base;
120 info.high_limit = TASK_SIZE;
121 info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
122 info.align_offset = pgoff << PAGE_SHIFT;
123 return vm_unmapped_area(&info);
124}
125
126unsigned long
127arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
128 const unsigned long len, const unsigned long pgoff,
129 const unsigned long flags)
130{
131 struct vm_area_struct *vma;
132 struct mm_struct *mm = current->mm;
133 unsigned long addr = addr0;
134 struct vm_unmapped_area_info info;
135 int do_color_align;
136
137 /* requested length too big for entire address space */
138 if (len > TASK_SIZE - mmap_min_addr)
139 return -ENOMEM;
140
141 if (flags & MAP_FIXED)
142 return addr;
143
144 /* requesting a specific address */
145 if (addr) {
146 addr = PAGE_ALIGN(addr);
147 vma = find_vma(mm, addr);
148 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
149 (!vma || addr + len <= vma->vm_start))
150 return addr;
151 }
152
153 do_color_align = 0;
154 if (filp || (flags & MAP_SHARED))
155 do_color_align = !is_32bit_task();
156
157 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
158 info.length = len;
159 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
160 info.high_limit = mm->mmap_base;
161 info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
162 info.align_offset = pgoff << PAGE_SHIFT;
163 addr = vm_unmapped_area(&info);
164
165 /*
166 * A failed mmap() very likely causes application failure,
167 * so fall back to the bottom-up function here. This scenario
168 * can happen with large stack limits and large mmap()
169 * allocations.
170 */
171 if (addr & ~PAGE_MASK) {
172 VM_BUG_ON(addr != -ENOMEM);
173 info.flags = 0;
174 info.low_limit = TASK_UNMAPPED_BASE;
175 info.high_limit = TASK_SIZE;
176 addr = vm_unmapped_area(&info);
177 }
178
179 return addr;
180}
181
182unsigned long randomize_et_dyn(void)
183{
184 unsigned long base;
185
Martin Schwidefsky4ba28152015-02-12 14:17:52 +0100186 base = STACK_TOP / 3 * 2;
187 if (!is_32bit_task())
188 /* Align to 4GB */
189 base &= ~((1UL << 32) - 1);
Martin Schwidefsky1f6b83e2015-01-14 17:51:17 +0100190 return base + mmap_rnd();
191}
192
Hendrik Brueckner486c0a02013-02-11 14:29:49 +0100193int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
Martin Schwidefsky0fb1d9b2009-03-18 13:27:37 +0100194{
Hendrik Brueckner486c0a02013-02-11 14:29:49 +0100195 if (is_compat_task() || (TASK_SIZE >= (1UL << 53)))
196 return 0;
197 if (!(flags & MAP_FIXED))
198 addr = 0;
Martin Schwidefsky10607862013-10-28 14:48:30 +0100199 if ((addr + len) >= TASK_SIZE)
200 return crst_table_upgrade(current->mm, 1UL << 53);
Martin Schwidefsky0fb1d9b2009-03-18 13:27:37 +0100201 return 0;
202}
203
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100204static unsigned long
205s390_get_unmapped_area(struct file *filp, unsigned long addr,
206 unsigned long len, unsigned long pgoff, unsigned long flags)
207{
208 struct mm_struct *mm = current->mm;
Martin Schwidefsky0fb1d9b2009-03-18 13:27:37 +0100209 unsigned long area;
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100210 int rc;
211
Martin Schwidefsky0fb1d9b2009-03-18 13:27:37 +0100212 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
213 if (!(area & ~PAGE_MASK))
214 return area;
Heiko Carstens77575912009-06-12 10:26:25 +0200215 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
Martin Schwidefsky0fb1d9b2009-03-18 13:27:37 +0100216 /* Upgrade the page table to 4 levels and retry. */
217 rc = crst_table_upgrade(mm, 1UL << 53);
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100218 if (rc)
219 return (unsigned long) rc;
Martin Schwidefsky0fb1d9b2009-03-18 13:27:37 +0100220 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100221 }
Martin Schwidefsky0fb1d9b2009-03-18 13:27:37 +0100222 return area;
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100223}
224
225static unsigned long
Martin Schwidefsky0fb1d9b2009-03-18 13:27:37 +0100226s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100227 const unsigned long len, const unsigned long pgoff,
228 const unsigned long flags)
229{
230 struct mm_struct *mm = current->mm;
Martin Schwidefsky0fb1d9b2009-03-18 13:27:37 +0100231 unsigned long area;
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100232 int rc;
233
Martin Schwidefsky0fb1d9b2009-03-18 13:27:37 +0100234 area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
235 if (!(area & ~PAGE_MASK))
236 return area;
Heiko Carstens77575912009-06-12 10:26:25 +0200237 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
Martin Schwidefsky0fb1d9b2009-03-18 13:27:37 +0100238 /* Upgrade the page table to 4 levels and retry. */
239 rc = crst_table_upgrade(mm, 1UL << 53);
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100240 if (rc)
241 return (unsigned long) rc;
Martin Schwidefsky0fb1d9b2009-03-18 13:27:37 +0100242 area = arch_get_unmapped_area_topdown(filp, addr, len,
243 pgoff, flags);
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100244 }
Martin Schwidefsky0fb1d9b2009-03-18 13:27:37 +0100245 return area;
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100246}
247/*
248 * This function, called very early during the creation of a new
249 * process VM image, sets up which VM layout function to use:
250 */
251void arch_pick_mmap_layout(struct mm_struct *mm)
252{
253 /*
254 * Fall back to the standard layout if the personality
255 * bit is set, or if the expected stack growth is unlimited:
256 */
257 if (mmap_is_legacy()) {
Heiko Carstens7aba8422013-11-12 15:07:55 -0800258 mm->mmap_base = mmap_base_legacy();
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100259 mm->get_unmapped_area = s390_get_unmapped_area;
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100260 } else {
261 mm->mmap_base = mmap_base();
262 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100263 }
264}
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100265
Martin Schwidefsky1f6b83e2015-01-14 17:51:17 +0100266static int __init setup_mmap_rnd(void)
267{
268 struct cpuid cpu_id;
269
270 get_cpu_id(&cpu_id);
271 switch (cpu_id.machine) {
272 case 0x9672:
273 case 0x2064:
274 case 0x2066:
275 case 0x2084:
276 case 0x2086:
277 case 0x2094:
278 case 0x2096:
279 case 0x2097:
280 case 0x2098:
281 case 0x2817:
282 case 0x2818:
283 case 0x2827:
284 case 0x2828:
285 mmap_rnd_mask = 0x7ffUL;
286 mmap_align_mask = 0UL;
287 break;
288 case 0x2964: /* z13 */
289 default:
290 mmap_rnd_mask = 0x3ff80UL;
291 mmap_align_mask = 0x7fUL;
292 break;
293 }
294 return 0;
295}
296early_initcall(setup_mmap_rnd);