blob: ae3c20a9556e92e5e1ce6db4c9b4dd1e0095a303 [file] [log] [blame]
Ralf Baechle6f6c3c32011-05-19 09:21:33 +01001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2011 Wind River Systems,
7 * written by Ralf Baechle <ralf@linux-mips.org>
8 */
9#include <linux/errno.h>
10#include <linux/mm.h>
11#include <linux/mman.h>
12#include <linux/module.h>
13#include <linux/random.h>
14#include <linux/sched.h>
15
16unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
17
18EXPORT_SYMBOL(shm_align_mask);
19
20#define COLOUR_ALIGN(addr,pgoff) \
21 ((((addr) + shm_align_mask) & ~shm_align_mask) + \
22 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
23
24unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
25 unsigned long len, unsigned long pgoff, unsigned long flags)
26{
27 struct vm_area_struct * vmm;
28 int do_color_align;
29
30 if (len > TASK_SIZE)
31 return -ENOMEM;
32
33 if (flags & MAP_FIXED) {
34 /* Even MAP_FIXED mappings must reside within TASK_SIZE. */
35 if (TASK_SIZE - len < addr)
36 return -EINVAL;
37
38 /*
39 * We do not accept a shared mapping if it would violate
40 * cache aliasing constraints.
41 */
42 if ((flags & MAP_SHARED) &&
43 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
44 return -EINVAL;
45 return addr;
46 }
47
48 do_color_align = 0;
49 if (filp || (flags & MAP_SHARED))
50 do_color_align = 1;
51 if (addr) {
52 if (do_color_align)
53 addr = COLOUR_ALIGN(addr, pgoff);
54 else
55 addr = PAGE_ALIGN(addr);
56 vmm = find_vma(current->mm, addr);
57 if (TASK_SIZE - len >= addr &&
58 (!vmm || addr + len <= vmm->vm_start))
59 return addr;
60 }
61 addr = current->mm->mmap_base;
62 if (do_color_align)
63 addr = COLOUR_ALIGN(addr, pgoff);
64 else
65 addr = PAGE_ALIGN(addr);
66
67 for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
68 /* At this point: (!vmm || addr < vmm->vm_end). */
69 if (TASK_SIZE - len < addr)
70 return -ENOMEM;
71 if (!vmm || addr + len <= vmm->vm_start)
72 return addr;
73 addr = vmm->vm_end;
74 if (do_color_align)
75 addr = COLOUR_ALIGN(addr, pgoff);
76 }
77}
78
79void arch_pick_mmap_layout(struct mm_struct *mm)
80{
81 unsigned long random_factor = 0UL;
82
83 if (current->flags & PF_RANDOMIZE) {
84 random_factor = get_random_int();
85 random_factor = random_factor << PAGE_SHIFT;
86 if (TASK_IS_32BIT_ADDR)
87 random_factor &= 0xfffffful;
88 else
89 random_factor &= 0xffffffful;
90 }
91
92 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
93 mm->get_unmapped_area = arch_get_unmapped_area;
94 mm->unmap_area = arch_unmap_area;
95}
96
97static inline unsigned long brk_rnd(void)
98{
99 unsigned long rnd = get_random_int();
100
101 rnd = rnd << PAGE_SHIFT;
102 /* 8MB for 32bit, 256MB for 64bit */
103 if (TASK_IS_32BIT_ADDR)
104 rnd = rnd & 0x7ffffful;
105 else
106 rnd = rnd & 0xffffffful;
107
108 return rnd;
109}
110
111unsigned long arch_randomize_brk(struct mm_struct *mm)
112{
113 unsigned long base = mm->brk;
114 unsigned long ret;
115
116 ret = PAGE_ALIGN(base + brk_rnd());
117
118 if (ret < mm->brk)
119 return mm->brk;
120
121 return ret;
122}