Paul Mundt | 185aed7 | 2008-11-12 12:53:48 +0900 | [diff] [blame] | 1 | /* |
| 2 | * arch/sh/mm/mmap.c |
| 3 | * |
Paul Mundt | ee1acbf | 2009-05-07 16:38:16 +0900 | [diff] [blame] | 4 | * Copyright (C) 2008 - 2009 Paul Mundt |
Paul Mundt | 185aed7 | 2008-11-12 12:53:48 +0900 | [diff] [blame] | 5 | * |
| 6 | * This file is subject to the terms and conditions of the GNU General Public |
| 7 | * License. See the file "COPYING" in the main directory of this archive |
| 8 | * for more details. |
| 9 | */ |
| 10 | #include <linux/io.h> |
| 11 | #include <linux/mm.h> |
Paul Mundt | 4a4a9be | 2008-11-12 13:17:38 +0900 | [diff] [blame] | 12 | #include <linux/mman.h> |
| 13 | #include <linux/module.h> |
Paul Mundt | 185aed7 | 2008-11-12 12:53:48 +0900 | [diff] [blame] | 14 | #include <asm/page.h> |
Paul Mundt | 4a4a9be | 2008-11-12 13:17:38 +0900 | [diff] [blame] | 15 | #include <asm/processor.h> |
| 16 | |
| 17 | #ifdef CONFIG_MMU |
| 18 | unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ |
| 19 | EXPORT_SYMBOL(shm_align_mask); |
| 20 | |
| 21 | /* |
| 22 | * To avoid cache aliases, we map the shared page with same color. |
| 23 | */ |
Paul Mundt | ee1acbf | 2009-05-07 16:38:16 +0900 | [diff] [blame] | 24 | static inline unsigned long COLOUR_ALIGN(unsigned long addr, |
| 25 | unsigned long pgoff) |
| 26 | { |
| 27 | unsigned long base = (addr + shm_align_mask) & ~shm_align_mask; |
| 28 | unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask; |
| 29 | |
| 30 | return base + off; |
| 31 | } |
| 32 | |
| 33 | static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr, |
| 34 | unsigned long pgoff) |
| 35 | { |
| 36 | unsigned long base = addr & ~shm_align_mask; |
| 37 | unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask; |
| 38 | |
| 39 | if (base + off <= addr) |
| 40 | return base + off; |
| 41 | |
| 42 | return base - off; |
| 43 | } |
Paul Mundt | 4a4a9be | 2008-11-12 13:17:38 +0900 | [diff] [blame] | 44 | |
| 45 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, |
| 46 | unsigned long len, unsigned long pgoff, unsigned long flags) |
| 47 | { |
| 48 | struct mm_struct *mm = current->mm; |
| 49 | struct vm_area_struct *vma; |
| 50 | unsigned long start_addr; |
| 51 | int do_colour_align; |
| 52 | |
| 53 | if (flags & MAP_FIXED) { |
| 54 | /* We do not accept a shared mapping if it would violate |
| 55 | * cache aliasing constraints. |
| 56 | */ |
| 57 | if ((flags & MAP_SHARED) && (addr & shm_align_mask)) |
| 58 | return -EINVAL; |
| 59 | return addr; |
| 60 | } |
| 61 | |
| 62 | if (unlikely(len > TASK_SIZE)) |
| 63 | return -ENOMEM; |
| 64 | |
| 65 | do_colour_align = 0; |
| 66 | if (filp || (flags & MAP_SHARED)) |
| 67 | do_colour_align = 1; |
| 68 | |
| 69 | if (addr) { |
| 70 | if (do_colour_align) |
| 71 | addr = COLOUR_ALIGN(addr, pgoff); |
| 72 | else |
| 73 | addr = PAGE_ALIGN(addr); |
| 74 | |
| 75 | vma = find_vma(mm, addr); |
| 76 | if (TASK_SIZE - len >= addr && |
| 77 | (!vma || addr + len <= vma->vm_start)) |
| 78 | return addr; |
| 79 | } |
| 80 | |
| 81 | if (len > mm->cached_hole_size) { |
| 82 | start_addr = addr = mm->free_area_cache; |
| 83 | } else { |
| 84 | mm->cached_hole_size = 0; |
| 85 | start_addr = addr = TASK_UNMAPPED_BASE; |
| 86 | } |
| 87 | |
| 88 | full_search: |
| 89 | if (do_colour_align) |
| 90 | addr = COLOUR_ALIGN(addr, pgoff); |
| 91 | else |
| 92 | addr = PAGE_ALIGN(mm->free_area_cache); |
| 93 | |
| 94 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { |
| 95 | /* At this point: (!vma || addr < vma->vm_end). */ |
| 96 | if (unlikely(TASK_SIZE - len < addr)) { |
| 97 | /* |
| 98 | * Start a new search - just in case we missed |
| 99 | * some holes. |
| 100 | */ |
| 101 | if (start_addr != TASK_UNMAPPED_BASE) { |
| 102 | start_addr = addr = TASK_UNMAPPED_BASE; |
| 103 | mm->cached_hole_size = 0; |
| 104 | goto full_search; |
| 105 | } |
| 106 | return -ENOMEM; |
| 107 | } |
| 108 | if (likely(!vma || addr + len <= vma->vm_start)) { |
| 109 | /* |
| 110 | * Remember the place where we stopped the search: |
| 111 | */ |
| 112 | mm->free_area_cache = addr + len; |
| 113 | return addr; |
| 114 | } |
| 115 | if (addr + mm->cached_hole_size < vma->vm_start) |
| 116 | mm->cached_hole_size = vma->vm_start - addr; |
| 117 | |
| 118 | addr = vma->vm_end; |
| 119 | if (do_colour_align) |
| 120 | addr = COLOUR_ALIGN(addr, pgoff); |
| 121 | } |
| 122 | } |
Paul Mundt | ee1acbf | 2009-05-07 16:38:16 +0900 | [diff] [blame] | 123 | |
| 124 | unsigned long |
| 125 | arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, |
| 126 | const unsigned long len, const unsigned long pgoff, |
| 127 | const unsigned long flags) |
| 128 | { |
| 129 | struct vm_area_struct *vma; |
| 130 | struct mm_struct *mm = current->mm; |
| 131 | unsigned long addr = addr0; |
| 132 | int do_colour_align; |
| 133 | |
| 134 | if (flags & MAP_FIXED) { |
| 135 | /* We do not accept a shared mapping if it would violate |
| 136 | * cache aliasing constraints. |
| 137 | */ |
| 138 | if ((flags & MAP_SHARED) && |
| 139 | ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) |
| 140 | return -EINVAL; |
| 141 | return addr; |
| 142 | } |
| 143 | |
| 144 | if (unlikely(len > TASK_SIZE)) |
| 145 | return -ENOMEM; |
| 146 | |
| 147 | do_colour_align = 0; |
| 148 | if (filp || (flags & MAP_SHARED)) |
| 149 | do_colour_align = 1; |
| 150 | |
| 151 | /* requesting a specific address */ |
| 152 | if (addr) { |
| 153 | if (do_colour_align) |
| 154 | addr = COLOUR_ALIGN(addr, pgoff); |
| 155 | else |
| 156 | addr = PAGE_ALIGN(addr); |
| 157 | |
| 158 | vma = find_vma(mm, addr); |
| 159 | if (TASK_SIZE - len >= addr && |
| 160 | (!vma || addr + len <= vma->vm_start)) |
| 161 | return addr; |
| 162 | } |
| 163 | |
| 164 | /* check if free_area_cache is useful for us */ |
| 165 | if (len <= mm->cached_hole_size) { |
| 166 | mm->cached_hole_size = 0; |
| 167 | mm->free_area_cache = mm->mmap_base; |
| 168 | } |
| 169 | |
| 170 | /* either no address requested or can't fit in requested address hole */ |
| 171 | addr = mm->free_area_cache; |
| 172 | if (do_colour_align) { |
| 173 | unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff); |
| 174 | |
| 175 | addr = base + len; |
| 176 | } |
| 177 | |
| 178 | /* make sure it can fit in the remaining address space */ |
| 179 | if (likely(addr > len)) { |
| 180 | vma = find_vma(mm, addr-len); |
| 181 | if (!vma || addr <= vma->vm_start) { |
| 182 | /* remember the address as a hint for next time */ |
| 183 | return (mm->free_area_cache = addr-len); |
| 184 | } |
| 185 | } |
| 186 | |
| 187 | if (unlikely(mm->mmap_base < len)) |
| 188 | goto bottomup; |
| 189 | |
| 190 | addr = mm->mmap_base-len; |
| 191 | if (do_colour_align) |
| 192 | addr = COLOUR_ALIGN_DOWN(addr, pgoff); |
| 193 | |
| 194 | do { |
| 195 | /* |
| 196 | * Lookup failure means no vma is above this address, |
| 197 | * else if new region fits below vma->vm_start, |
| 198 | * return with success: |
| 199 | */ |
| 200 | vma = find_vma(mm, addr); |
| 201 | if (likely(!vma || addr+len <= vma->vm_start)) { |
| 202 | /* remember the address as a hint for next time */ |
| 203 | return (mm->free_area_cache = addr); |
| 204 | } |
| 205 | |
| 206 | /* remember the largest hole we saw so far */ |
| 207 | if (addr + mm->cached_hole_size < vma->vm_start) |
| 208 | mm->cached_hole_size = vma->vm_start - addr; |
| 209 | |
| 210 | /* try just below the current vma->vm_start */ |
| 211 | addr = vma->vm_start-len; |
| 212 | if (do_colour_align) |
| 213 | addr = COLOUR_ALIGN_DOWN(addr, pgoff); |
| 214 | } while (likely(len < vma->vm_start)); |
| 215 | |
| 216 | bottomup: |
| 217 | /* |
| 218 | * A failed mmap() very likely causes application failure, |
| 219 | * so fall back to the bottom-up function here. This scenario |
| 220 | * can happen with large stack limits and large mmap() |
| 221 | * allocations. |
| 222 | */ |
| 223 | mm->cached_hole_size = ~0UL; |
| 224 | mm->free_area_cache = TASK_UNMAPPED_BASE; |
| 225 | addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); |
| 226 | /* |
| 227 | * Restore the topdown base: |
| 228 | */ |
| 229 | mm->free_area_cache = mm->mmap_base; |
| 230 | mm->cached_hole_size = ~0UL; |
| 231 | |
| 232 | return addr; |
| 233 | } |
Paul Mundt | 4a4a9be | 2008-11-12 13:17:38 +0900 | [diff] [blame] | 234 | #endif /* CONFIG_MMU */ |
Paul Mundt | 185aed7 | 2008-11-12 12:53:48 +0900 | [diff] [blame] | 235 | |
| 236 | /* |
| 237 | * You really shouldn't be using read() or write() on /dev/mem. This |
| 238 | * might go away in the future. |
| 239 | */ |
| 240 | int valid_phys_addr_range(unsigned long addr, size_t count) |
| 241 | { |
Paul Mundt | 10840f0 | 2008-11-13 15:38:02 +0900 | [diff] [blame] | 242 | if (addr < __MEMORY_START) |
Paul Mundt | 185aed7 | 2008-11-12 12:53:48 +0900 | [diff] [blame] | 243 | return 0; |
| 244 | if (addr + count > __pa(high_memory)) |
| 245 | return 0; |
| 246 | |
| 247 | return 1; |
| 248 | } |
| 249 | |
| 250 | int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) |
| 251 | { |
| 252 | return 1; |
| 253 | } |