blob: 10062ceadd1cc40ca48cc59e83f2deb6fc3e07da [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/arm/mm/mmap.c
3 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <linux/fs.h>
5#include <linux/mm.h>
6#include <linux/mman.h>
7#include <linux/shm.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +04008#include <linux/sched.h>
Russell King09d9bae2008-09-05 14:08:44 +01009#include <linux/io.h>
Nicolas Pitredf5419a2011-04-13 04:57:17 +010010#include <linux/personality.h>
Nicolas Pitrecc92c282010-06-14 21:16:19 -040011#include <linux/random.h>
Rob Herring41dfaa92011-11-22 04:01:06 +010012#include <asm/cachetype.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
14#define COLOUR_ALIGN(addr,pgoff) \
15 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
16 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
17
Rob Herring7dbaa462011-11-22 04:01:07 +010018/* gap between mmap and stack */
19#define MIN_GAP (128*1024*1024UL)
20#define MAX_GAP ((TASK_SIZE)/6*5)
21
22static int mmap_is_legacy(void)
23{
24 if (current->personality & ADDR_COMPAT_LAYOUT)
25 return 1;
26
27 if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
28 return 1;
29
30 return sysctl_legacy_va_layout;
31}
32
33static unsigned long mmap_base(unsigned long rnd)
34{
35 unsigned long gap = rlimit(RLIMIT_STACK);
36
37 if (gap < MIN_GAP)
38 gap = MIN_GAP;
39 else if (gap > MAX_GAP)
40 gap = MAX_GAP;
41
42 return PAGE_ALIGN(TASK_SIZE - gap - rnd);
43}
44
Linus Torvalds1da177e2005-04-16 15:20:36 -070045/*
46 * We need to ensure that shared mappings are correctly aligned to
47 * avoid aliasing issues with VIPT caches. We need to ensure that
48 * a specific page of an object is always mapped at a multiple of
49 * SHMLBA bytes.
50 *
51 * We unconditionally provide this function for all cases, however
52 * in the VIVT case, we optimise out the alignment rules.
53 */
54unsigned long
55arch_get_unmapped_area(struct file *filp, unsigned long addr,
56 unsigned long len, unsigned long pgoff, unsigned long flags)
57{
58 struct mm_struct *mm = current->mm;
59 struct vm_area_struct *vma;
Rob Herring41dfaa92011-11-22 04:01:06 +010060 int do_align = 0;
61 int aliasing = cache_is_vipt_aliasing();
Michel Lespinasse394ef642012-12-11 16:02:10 -080062 struct vm_unmapped_area_info info;
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 /*
65 * We only need to do colour alignment if either the I or D
Rob Herring41dfaa92011-11-22 04:01:06 +010066 * caches alias.
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 */
Rob Herring41dfaa92011-11-22 04:01:06 +010068 if (aliasing)
69 do_align = filp || (flags & MAP_SHARED);
Linus Torvalds1da177e2005-04-16 15:20:36 -070070
71 /*
Benjamin Herrenschmidtacec0ac2007-05-06 14:50:07 -070072 * We enforce the MAP_FIXED case.
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 */
74 if (flags & MAP_FIXED) {
Al Viroe77414e2009-12-05 15:10:44 -050075 if (aliasing && flags & MAP_SHARED &&
76 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 return -EINVAL;
78 return addr;
79 }
80
81 if (len > TASK_SIZE)
82 return -ENOMEM;
83
84 if (addr) {
85 if (do_align)
86 addr = COLOUR_ALIGN(addr, pgoff);
87 else
88 addr = PAGE_ALIGN(addr);
89
90 vma = find_vma(mm, addr);
91 if (TASK_SIZE - len >= addr &&
92 (!vma || addr + len <= vma->vm_start))
93 return addr;
94 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070095
Michel Lespinasse394ef642012-12-11 16:02:10 -080096 info.flags = 0;
97 info.length = len;
98 info.low_limit = mm->mmap_base;
99 info.high_limit = TASK_SIZE;
100 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
101 info.align_offset = pgoff << PAGE_SHIFT;
102 return vm_unmapped_area(&info);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103}
104
Rob Herring7dbaa462011-11-22 04:01:07 +0100105unsigned long
106arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
107 const unsigned long len, const unsigned long pgoff,
108 const unsigned long flags)
109{
110 struct vm_area_struct *vma;
111 struct mm_struct *mm = current->mm;
112 unsigned long addr = addr0;
113 int do_align = 0;
114 int aliasing = cache_is_vipt_aliasing();
Michel Lespinasse394ef642012-12-11 16:02:10 -0800115 struct vm_unmapped_area_info info;
Rob Herring7dbaa462011-11-22 04:01:07 +0100116
117 /*
118 * We only need to do colour alignment if either the I or D
119 * caches alias.
120 */
121 if (aliasing)
122 do_align = filp || (flags & MAP_SHARED);
123
124 /* requested length too big for entire address space */
125 if (len > TASK_SIZE)
126 return -ENOMEM;
127
128 if (flags & MAP_FIXED) {
129 if (aliasing && flags & MAP_SHARED &&
130 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
131 return -EINVAL;
132 return addr;
133 }
134
135 /* requesting a specific address */
136 if (addr) {
137 if (do_align)
138 addr = COLOUR_ALIGN(addr, pgoff);
139 else
140 addr = PAGE_ALIGN(addr);
141 vma = find_vma(mm, addr);
142 if (TASK_SIZE - len >= addr &&
143 (!vma || addr + len <= vma->vm_start))
144 return addr;
145 }
146
Michel Lespinasse394ef642012-12-11 16:02:10 -0800147 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
148 info.length = len;
149 info.low_limit = PAGE_SIZE;
150 info.high_limit = mm->mmap_base;
151 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
152 info.align_offset = pgoff << PAGE_SHIFT;
153 addr = vm_unmapped_area(&info);
Rob Herring7dbaa462011-11-22 04:01:07 +0100154
Rob Herring7dbaa462011-11-22 04:01:07 +0100155 /*
156 * A failed mmap() very likely causes application failure,
157 * so fall back to the bottom-up function here. This scenario
158 * can happen with large stack limits and large mmap()
159 * allocations.
160 */
Michel Lespinasse394ef642012-12-11 16:02:10 -0800161 if (addr & ~PAGE_MASK) {
162 VM_BUG_ON(addr != -ENOMEM);
163 info.flags = 0;
164 info.low_limit = mm->mmap_base;
165 info.high_limit = TASK_SIZE;
166 addr = vm_unmapped_area(&info);
167 }
Rob Herring7dbaa462011-11-22 04:01:07 +0100168
169 return addr;
170}
171
172void arch_pick_mmap_layout(struct mm_struct *mm)
173{
174 unsigned long random_factor = 0UL;
175
176 /* 8 bits of randomness in 20 address space bits */
177 if ((current->flags & PF_RANDOMIZE) &&
178 !(current->personality & ADDR_NO_RANDOMIZE))
179 random_factor = (get_random_int() % (1 << 8)) << PAGE_SHIFT;
180
181 if (mmap_is_legacy()) {
182 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
183 mm->get_unmapped_area = arch_get_unmapped_area;
184 mm->unmap_area = arch_unmap_area;
185 } else {
186 mm->mmap_base = mmap_base(random_factor);
187 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
188 mm->unmap_area = arch_unmap_area_topdown;
189 }
190}
Lennert Buytenhek51635ad2006-09-16 10:50:22 +0100191
192/*
193 * You really shouldn't be using read() or write() on /dev/mem. This
194 * might go away in the future.
195 */
Cyril Chemparathy7e6735c2012-09-12 14:05:58 -0400196int valid_phys_addr_range(phys_addr_t addr, size_t size)
Lennert Buytenhek51635ad2006-09-16 10:50:22 +0100197{
Alexandre Rusev9ae3ae02008-02-26 18:42:10 +0100198 if (addr < PHYS_OFFSET)
199 return 0;
Greg Ungerer6806bfe2009-10-02 00:45:28 +0100200 if (addr + size > __pa(high_memory - 1) + 1)
Lennert Buytenhek51635ad2006-09-16 10:50:22 +0100201 return 0;
202
203 return 1;
204}
205
206/*
207 * We don't use supersection mappings for mmap() on /dev/mem, which
208 * means that we can't map the memory area above the 4G barrier into
209 * userspace.
210 */
211int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
212{
213 return !(pfn + (size >> PAGE_SHIFT) > 0x00100000);
214}
Nicolas Pitre087aaff2010-09-22 18:34:36 -0400215
216#ifdef CONFIG_STRICT_DEVMEM
217
218#include <linux/ioport.h>
219
220/*
221 * devmem_is_allowed() checks to see if /dev/mem access to a certain
222 * address is valid. The argument is a physical page number.
223 * We mimic x86 here by disallowing access to system RAM as well as
224 * device-exclusive MMIO regions. This effectively disable read()/write()
225 * on /dev/mem.
226 */
227int devmem_is_allowed(unsigned long pfn)
228{
229 if (iomem_is_exclusive(pfn << PAGE_SHIFT))
230 return 0;
231 if (!page_is_ram(pfn))
232 return 1;
233 return 0;
234}
235
236#endif