blob: 74be05f3e03ac58be921aff208c6f6aa460ab683 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/arm/mm/mmap.c
3 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <linux/fs.h>
5#include <linux/mm.h>
6#include <linux/mman.h>
7#include <linux/shm.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +04008#include <linux/sched.h>
Russell King09d9bae2008-09-05 14:08:44 +01009#include <linux/io.h>
Nicolas Pitredf5419a2011-04-13 04:57:17 +010010#include <linux/personality.h>
Nicolas Pitrecc92c282010-06-14 21:16:19 -040011#include <linux/random.h>
Russell King0ba8b9b2008-08-10 18:08:10 +010012#include <asm/cputype.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <asm/system.h>
14
15#define COLOUR_ALIGN(addr,pgoff) \
16 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
17 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
18
19/*
20 * We need to ensure that shared mappings are correctly aligned to
21 * avoid aliasing issues with VIPT caches. We need to ensure that
22 * a specific page of an object is always mapped at a multiple of
23 * SHMLBA bytes.
24 *
25 * We unconditionally provide this function for all cases, however
26 * in the VIVT case, we optimise out the alignment rules.
27 */
28unsigned long
29arch_get_unmapped_area(struct file *filp, unsigned long addr,
30 unsigned long len, unsigned long pgoff, unsigned long flags)
31{
32 struct mm_struct *mm = current->mm;
33 struct vm_area_struct *vma;
34 unsigned long start_addr;
Russell Kinge399b1a2011-01-17 15:08:32 +000035#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 unsigned int cache_type;
37 int do_align = 0, aliasing = 0;
38
39 /*
40 * We only need to do colour alignment if either the I or D
41 * caches alias. This is indicated by bits 9 and 21 of the
42 * cache type register.
43 */
Russell King0ba8b9b2008-08-10 18:08:10 +010044 cache_type = read_cpuid_cachetype();
45 if (cache_type != read_cpuid_id()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 aliasing = (cache_type | cache_type >> 12) & (1 << 11);
47 if (aliasing)
48 do_align = filp || flags & MAP_SHARED;
49 }
50#else
51#define do_align 0
52#define aliasing 0
53#endif
54
55 /*
Benjamin Herrenschmidtacec0ac2007-05-06 14:50:07 -070056 * We enforce the MAP_FIXED case.
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 */
58 if (flags & MAP_FIXED) {
Al Viroe77414e2009-12-05 15:10:44 -050059 if (aliasing && flags & MAP_SHARED &&
60 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 return -EINVAL;
62 return addr;
63 }
64
65 if (len > TASK_SIZE)
66 return -ENOMEM;
67
68 if (addr) {
69 if (do_align)
70 addr = COLOUR_ALIGN(addr, pgoff);
71 else
72 addr = PAGE_ALIGN(addr);
73
74 vma = find_vma(mm, addr);
75 if (TASK_SIZE - len >= addr &&
76 (!vma || addr + len <= vma->vm_start))
77 return addr;
78 }
Wolfgang Wander1363c3c2005-06-21 17:14:49 -070079 if (len > mm->cached_hole_size) {
80 start_addr = addr = mm->free_area_cache;
81 } else {
82 start_addr = addr = TASK_UNMAPPED_BASE;
83 mm->cached_hole_size = 0;
84 }
Nicolas Pitrecc92c282010-06-14 21:16:19 -040085 /* 8 bits of randomness in 20 address space bits */
Nicolas Pitredf5419a2011-04-13 04:57:17 +010086 if ((current->flags & PF_RANDOMIZE) &&
87 !(current->personality & ADDR_NO_RANDOMIZE))
Nicolas Pitrecc92c282010-06-14 21:16:19 -040088 addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
90full_search:
91 if (do_align)
92 addr = COLOUR_ALIGN(addr, pgoff);
93 else
94 addr = PAGE_ALIGN(addr);
95
96 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
97 /* At this point: (!vma || addr < vma->vm_end). */
98 if (TASK_SIZE - len < addr) {
99 /*
100 * Start a new search - just in case we missed
101 * some holes.
102 */
103 if (start_addr != TASK_UNMAPPED_BASE) {
104 start_addr = addr = TASK_UNMAPPED_BASE;
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700105 mm->cached_hole_size = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 goto full_search;
107 }
108 return -ENOMEM;
109 }
110 if (!vma || addr + len <= vma->vm_start) {
111 /*
112 * Remember the place where we stopped the search:
113 */
114 mm->free_area_cache = addr + len;
115 return addr;
116 }
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700117 if (addr + mm->cached_hole_size < vma->vm_start)
118 mm->cached_hole_size = vma->vm_start - addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 addr = vma->vm_end;
120 if (do_align)
121 addr = COLOUR_ALIGN(addr, pgoff);
122 }
123}
124
Lennert Buytenhek51635ad2006-09-16 10:50:22 +0100125
126/*
127 * You really shouldn't be using read() or write() on /dev/mem. This
128 * might go away in the future.
129 */
130int valid_phys_addr_range(unsigned long addr, size_t size)
131{
Alexandre Rusev9ae3ae02008-02-26 18:42:10 +0100132 if (addr < PHYS_OFFSET)
133 return 0;
Greg Ungerer6806bfe2009-10-02 00:45:28 +0100134 if (addr + size > __pa(high_memory - 1) + 1)
Lennert Buytenhek51635ad2006-09-16 10:50:22 +0100135 return 0;
136
137 return 1;
138}
139
140/*
141 * We don't use supersection mappings for mmap() on /dev/mem, which
142 * means that we can't map the memory area above the 4G barrier into
143 * userspace.
144 */
145int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
146{
147 return !(pfn + (size >> PAGE_SHIFT) > 0x00100000);
148}
Nicolas Pitre087aaff2010-09-22 18:34:36 -0400149
150#ifdef CONFIG_STRICT_DEVMEM
151
152#include <linux/ioport.h>
153
154/*
155 * devmem_is_allowed() checks to see if /dev/mem access to a certain
156 * address is valid. The argument is a physical page number.
157 * We mimic x86 here by disallowing access to system RAM as well as
158 * device-exclusive MMIO regions. This effectively disable read()/write()
159 * on /dev/mem.
160 */
161int devmem_is_allowed(unsigned long pfn)
162{
163 if (iomem_is_exclusive(pfn << PAGE_SHIFT))
164 return 0;
165 if (!page_is_ram(pfn))
166 return 1;
167 return 0;
168}
169
170#endif