blob: afe209e1e1f85fb299e9822ea07a49b13cc29ecf [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/arm/mm/mmap.c
3 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <linux/fs.h>
5#include <linux/mm.h>
6#include <linux/mman.h>
7#include <linux/shm.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +04008#include <linux/sched.h>
Russell King09d9bae2008-09-05 14:08:44 +01009#include <linux/io.h>
Nicolas Pitrecc92c282010-06-14 21:16:19 -040010#include <linux/random.h>
Russell King0ba8b9b2008-08-10 18:08:10 +010011#include <asm/cputype.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <asm/system.h>
13
14#define COLOUR_ALIGN(addr,pgoff) \
15 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
16 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
17
18/*
19 * We need to ensure that shared mappings are correctly aligned to
20 * avoid aliasing issues with VIPT caches. We need to ensure that
21 * a specific page of an object is always mapped at a multiple of
22 * SHMLBA bytes.
23 *
24 * We unconditionally provide this function for all cases, however
25 * in the VIVT case, we optimise out the alignment rules.
26 */
27unsigned long
28arch_get_unmapped_area(struct file *filp, unsigned long addr,
29 unsigned long len, unsigned long pgoff, unsigned long flags)
30{
31 struct mm_struct *mm = current->mm;
32 struct vm_area_struct *vma;
33 unsigned long start_addr;
Russell Kinge399b1a2011-01-17 15:08:32 +000034#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 unsigned int cache_type;
36 int do_align = 0, aliasing = 0;
37
38 /*
39 * We only need to do colour alignment if either the I or D
40 * caches alias. This is indicated by bits 9 and 21 of the
41 * cache type register.
42 */
Russell King0ba8b9b2008-08-10 18:08:10 +010043 cache_type = read_cpuid_cachetype();
44 if (cache_type != read_cpuid_id()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 aliasing = (cache_type | cache_type >> 12) & (1 << 11);
46 if (aliasing)
47 do_align = filp || flags & MAP_SHARED;
48 }
49#else
50#define do_align 0
51#define aliasing 0
52#endif
53
54 /*
Benjamin Herrenschmidtacec0ac2007-05-06 14:50:07 -070055 * We enforce the MAP_FIXED case.
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 */
57 if (flags & MAP_FIXED) {
Al Viroe77414e2009-12-05 15:10:44 -050058 if (aliasing && flags & MAP_SHARED &&
59 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 return -EINVAL;
61 return addr;
62 }
63
64 if (len > TASK_SIZE)
65 return -ENOMEM;
66
67 if (addr) {
68 if (do_align)
69 addr = COLOUR_ALIGN(addr, pgoff);
70 else
71 addr = PAGE_ALIGN(addr);
72
73 vma = find_vma(mm, addr);
74 if (TASK_SIZE - len >= addr &&
75 (!vma || addr + len <= vma->vm_start))
76 return addr;
77 }
Wolfgang Wander1363c3c2005-06-21 17:14:49 -070078 if (len > mm->cached_hole_size) {
79 start_addr = addr = mm->free_area_cache;
80 } else {
81 start_addr = addr = TASK_UNMAPPED_BASE;
82 mm->cached_hole_size = 0;
83 }
Nicolas Pitrecc92c282010-06-14 21:16:19 -040084 /* 8 bits of randomness in 20 address space bits */
85 if (current->flags & PF_RANDOMIZE)
86 addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -070087
88full_search:
89 if (do_align)
90 addr = COLOUR_ALIGN(addr, pgoff);
91 else
92 addr = PAGE_ALIGN(addr);
93
94 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
95 /* At this point: (!vma || addr < vma->vm_end). */
96 if (TASK_SIZE - len < addr) {
97 /*
98 * Start a new search - just in case we missed
99 * some holes.
100 */
101 if (start_addr != TASK_UNMAPPED_BASE) {
102 start_addr = addr = TASK_UNMAPPED_BASE;
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700103 mm->cached_hole_size = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 goto full_search;
105 }
106 return -ENOMEM;
107 }
108 if (!vma || addr + len <= vma->vm_start) {
109 /*
110 * Remember the place where we stopped the search:
111 */
112 mm->free_area_cache = addr + len;
113 return addr;
114 }
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700115 if (addr + mm->cached_hole_size < vma->vm_start)
116 mm->cached_hole_size = vma->vm_start - addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 addr = vma->vm_end;
118 if (do_align)
119 addr = COLOUR_ALIGN(addr, pgoff);
120 }
121}
122
Lennert Buytenhek51635ad2006-09-16 10:50:22 +0100123
124/*
125 * You really shouldn't be using read() or write() on /dev/mem. This
126 * might go away in the future.
127 */
128int valid_phys_addr_range(unsigned long addr, size_t size)
129{
Alexandre Rusev9ae3ae02008-02-26 18:42:10 +0100130 if (addr < PHYS_OFFSET)
131 return 0;
Greg Ungerer6806bfe2009-10-02 00:45:28 +0100132 if (addr + size > __pa(high_memory - 1) + 1)
Lennert Buytenhek51635ad2006-09-16 10:50:22 +0100133 return 0;
134
135 return 1;
136}
137
138/*
139 * We don't use supersection mappings for mmap() on /dev/mem, which
140 * means that we can't map the memory area above the 4G barrier into
141 * userspace.
142 */
143int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
144{
145 return !(pfn + (size >> PAGE_SHIFT) > 0x00100000);
146}
Nicolas Pitre087aaff2010-09-22 18:34:36 -0400147
148#ifdef CONFIG_STRICT_DEVMEM
149
150#include <linux/ioport.h>
151
152/*
153 * devmem_is_allowed() checks to see if /dev/mem access to a certain
154 * address is valid. The argument is a physical page number.
155 * We mimic x86 here by disallowing access to system RAM as well as
156 * device-exclusive MMIO regions. This effectively disable read()/write()
157 * on /dev/mem.
158 */
159int devmem_is_allowed(unsigned long pfn)
160{
161 if (iomem_is_exclusive(pfn << PAGE_SHIFT))
162 return 0;
163 if (!page_is_ram(pfn))
164 return 1;
165 return 0;
166}
167
168#endif