blob: 89eff9e7b0b53267f433bdd89ad1fa2b495de002 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/s390/mm/mmap.c
3 *
4 * flexible mmap layout support
5 *
6 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
7 * All Rights Reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 *
24 * Started by Ingo Molnar <mingo@elte.hu>
25 */
26
27#include <linux/personality.h>
28#include <linux/mm.h>
29#include <linux/module.h>
Martin Schwidefsky6252d702008-02-09 18:24:37 +010030#include <asm/pgalloc.h>
Heiko Carstens77575912009-06-12 10:26:25 +020031#include <asm/compat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Heiko Carstens9046e402011-01-12 09:55:22 +010033static unsigned long stack_maxrandom_size(void)
34{
35 if (!(current->flags & PF_RANDOMIZE))
36 return 0;
37 if (current->personality & ADDR_NO_RANDOMIZE)
38 return 0;
39 return STACK_RND_MASK << PAGE_SHIFT;
40}
41
Linus Torvalds1da177e2005-04-16 15:20:36 -070042/*
43 * Top of mmap area (just below the process stack).
44 *
Heiko Carstens9e78a132011-01-12 09:55:23 +010045 * Leave at least a ~32 MB hole.
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 */
Heiko Carstens9e78a132011-01-12 09:55:23 +010047#define MIN_GAP (32*1024*1024)
Martin Schwidefskyf481bfa2009-03-18 13:27:36 +010048#define MAX_GAP (STACK_TOP/6*5)
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
50static inline unsigned long mmap_base(void)
51{
Jiri Slabya58c26b2010-01-13 20:44:33 +010052 unsigned long gap = rlimit(RLIMIT_STACK);
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
54 if (gap < MIN_GAP)
55 gap = MIN_GAP;
56 else if (gap > MAX_GAP)
57 gap = MAX_GAP;
58
Heiko Carstens9046e402011-01-12 09:55:22 +010059 return STACK_TOP - stack_maxrandom_size() - (gap & PAGE_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060}
61
62static inline int mmap_is_legacy(void)
63{
Martin Schwidefsky347a8dc2006-01-06 00:19:28 -080064#ifdef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 /*
66 * Force standard allocation for 64 bit programs.
67 */
Heiko Carstens77575912009-06-12 10:26:25 +020068 if (!is_compat_task())
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 return 1;
70#endif
71 return sysctl_legacy_va_layout ||
72 (current->personality & ADDR_COMPAT_LAYOUT) ||
Jiri Slabya58c26b2010-01-13 20:44:33 +010073 rlimit(RLIMIT_STACK) == RLIM_INFINITY;
Linus Torvalds1da177e2005-04-16 15:20:36 -070074}
75
Martin Schwidefsky6252d702008-02-09 18:24:37 +010076#ifndef CONFIG_64BIT
77
Linus Torvalds1da177e2005-04-16 15:20:36 -070078/*
79 * This function, called very early during the creation of a new
80 * process VM image, sets up which VM layout function to use:
81 */
82void arch_pick_mmap_layout(struct mm_struct *mm)
83{
84 /*
85 * Fall back to the standard layout if the personality
86 * bit is set, or if the expected stack growth is unlimited:
87 */
88 if (mmap_is_legacy()) {
89 mm->mmap_base = TASK_UNMAPPED_BASE;
90 mm->get_unmapped_area = arch_get_unmapped_area;
91 mm->unmap_area = arch_unmap_area;
92 } else {
93 mm->mmap_base = mmap_base();
94 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
95 mm->unmap_area = arch_unmap_area_topdown;
96 }
97}
98EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
99
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100100#else
101
Martin Schwidefsky0fb1d9b2009-03-18 13:27:37 +0100102int s390_mmap_check(unsigned long addr, unsigned long len)
103{
Heiko Carstens77575912009-06-12 10:26:25 +0200104 if (!is_compat_task() &&
Martin Schwidefsky0fb1d9b2009-03-18 13:27:37 +0100105 len >= TASK_SIZE && TASK_SIZE < (1UL << 53))
106 return crst_table_upgrade(current->mm, 1UL << 53);
107 return 0;
108}
109
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100110static unsigned long
111s390_get_unmapped_area(struct file *filp, unsigned long addr,
112 unsigned long len, unsigned long pgoff, unsigned long flags)
113{
114 struct mm_struct *mm = current->mm;
Martin Schwidefsky0fb1d9b2009-03-18 13:27:37 +0100115 unsigned long area;
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100116 int rc;
117
Martin Schwidefsky0fb1d9b2009-03-18 13:27:37 +0100118 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
119 if (!(area & ~PAGE_MASK))
120 return area;
Heiko Carstens77575912009-06-12 10:26:25 +0200121 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
Martin Schwidefsky0fb1d9b2009-03-18 13:27:37 +0100122 /* Upgrade the page table to 4 levels and retry. */
123 rc = crst_table_upgrade(mm, 1UL << 53);
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100124 if (rc)
125 return (unsigned long) rc;
Martin Schwidefsky0fb1d9b2009-03-18 13:27:37 +0100126 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100127 }
Martin Schwidefsky0fb1d9b2009-03-18 13:27:37 +0100128 return area;
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100129}
130
131static unsigned long
Martin Schwidefsky0fb1d9b2009-03-18 13:27:37 +0100132s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100133 const unsigned long len, const unsigned long pgoff,
134 const unsigned long flags)
135{
136 struct mm_struct *mm = current->mm;
Martin Schwidefsky0fb1d9b2009-03-18 13:27:37 +0100137 unsigned long area;
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100138 int rc;
139
Martin Schwidefsky0fb1d9b2009-03-18 13:27:37 +0100140 area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
141 if (!(area & ~PAGE_MASK))
142 return area;
Heiko Carstens77575912009-06-12 10:26:25 +0200143 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
Martin Schwidefsky0fb1d9b2009-03-18 13:27:37 +0100144 /* Upgrade the page table to 4 levels and retry. */
145 rc = crst_table_upgrade(mm, 1UL << 53);
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100146 if (rc)
147 return (unsigned long) rc;
Martin Schwidefsky0fb1d9b2009-03-18 13:27:37 +0100148 area = arch_get_unmapped_area_topdown(filp, addr, len,
149 pgoff, flags);
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100150 }
Martin Schwidefsky0fb1d9b2009-03-18 13:27:37 +0100151 return area;
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100152}
153/*
154 * This function, called very early during the creation of a new
155 * process VM image, sets up which VM layout function to use:
156 */
157void arch_pick_mmap_layout(struct mm_struct *mm)
158{
159 /*
160 * Fall back to the standard layout if the personality
161 * bit is set, or if the expected stack growth is unlimited:
162 */
163 if (mmap_is_legacy()) {
164 mm->mmap_base = TASK_UNMAPPED_BASE;
165 mm->get_unmapped_area = s390_get_unmapped_area;
166 mm->unmap_area = arch_unmap_area;
167 } else {
168 mm->mmap_base = mmap_base();
169 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
170 mm->unmap_area = arch_unmap_area_topdown;
171 }
172}
173EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
174
175#endif