Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame^] | 1 | /* |
| 2 | * linux/arch/arm/mm/mmap.c |
| 3 | */ |
| 4 | #include <linux/config.h> |
| 5 | #include <linux/fs.h> |
| 6 | #include <linux/mm.h> |
| 7 | #include <linux/mman.h> |
| 8 | #include <linux/shm.h> |
| 9 | |
| 10 | #include <asm/system.h> |
| 11 | |
| 12 | #define COLOUR_ALIGN(addr,pgoff) \ |
| 13 | ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ |
| 14 | (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1))) |
| 15 | |
| 16 | /* |
| 17 | * We need to ensure that shared mappings are correctly aligned to |
| 18 | * avoid aliasing issues with VIPT caches. We need to ensure that |
| 19 | * a specific page of an object is always mapped at a multiple of |
| 20 | * SHMLBA bytes. |
| 21 | * |
| 22 | * We unconditionally provide this function for all cases, however |
| 23 | * in the VIVT case, we optimise out the alignment rules. |
| 24 | */ |
| 25 | unsigned long |
| 26 | arch_get_unmapped_area(struct file *filp, unsigned long addr, |
| 27 | unsigned long len, unsigned long pgoff, unsigned long flags) |
| 28 | { |
| 29 | struct mm_struct *mm = current->mm; |
| 30 | struct vm_area_struct *vma; |
| 31 | unsigned long start_addr; |
| 32 | #ifdef CONFIG_CPU_V6 |
| 33 | unsigned int cache_type; |
| 34 | int do_align = 0, aliasing = 0; |
| 35 | |
| 36 | /* |
| 37 | * We only need to do colour alignment if either the I or D |
| 38 | * caches alias. This is indicated by bits 9 and 21 of the |
| 39 | * cache type register. |
| 40 | */ |
| 41 | cache_type = read_cpuid(CPUID_CACHETYPE); |
| 42 | if (cache_type != read_cpuid(CPUID_ID)) { |
| 43 | aliasing = (cache_type | cache_type >> 12) & (1 << 11); |
| 44 | if (aliasing) |
| 45 | do_align = filp || flags & MAP_SHARED; |
| 46 | } |
| 47 | #else |
| 48 | #define do_align 0 |
| 49 | #define aliasing 0 |
| 50 | #endif |
| 51 | |
| 52 | /* |
| 53 | * We should enforce the MAP_FIXED case. However, currently |
| 54 | * the generic kernel code doesn't allow us to handle this. |
| 55 | */ |
| 56 | if (flags & MAP_FIXED) { |
| 57 | if (aliasing && flags & MAP_SHARED && addr & (SHMLBA - 1)) |
| 58 | return -EINVAL; |
| 59 | return addr; |
| 60 | } |
| 61 | |
| 62 | if (len > TASK_SIZE) |
| 63 | return -ENOMEM; |
| 64 | |
| 65 | if (addr) { |
| 66 | if (do_align) |
| 67 | addr = COLOUR_ALIGN(addr, pgoff); |
| 68 | else |
| 69 | addr = PAGE_ALIGN(addr); |
| 70 | |
| 71 | vma = find_vma(mm, addr); |
| 72 | if (TASK_SIZE - len >= addr && |
| 73 | (!vma || addr + len <= vma->vm_start)) |
| 74 | return addr; |
| 75 | } |
| 76 | start_addr = addr = mm->free_area_cache; |
| 77 | |
| 78 | full_search: |
| 79 | if (do_align) |
| 80 | addr = COLOUR_ALIGN(addr, pgoff); |
| 81 | else |
| 82 | addr = PAGE_ALIGN(addr); |
| 83 | |
| 84 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { |
| 85 | /* At this point: (!vma || addr < vma->vm_end). */ |
| 86 | if (TASK_SIZE - len < addr) { |
| 87 | /* |
| 88 | * Start a new search - just in case we missed |
| 89 | * some holes. |
| 90 | */ |
| 91 | if (start_addr != TASK_UNMAPPED_BASE) { |
| 92 | start_addr = addr = TASK_UNMAPPED_BASE; |
| 93 | goto full_search; |
| 94 | } |
| 95 | return -ENOMEM; |
| 96 | } |
| 97 | if (!vma || addr + len <= vma->vm_start) { |
| 98 | /* |
| 99 | * Remember the place where we stopped the search: |
| 100 | */ |
| 101 | mm->free_area_cache = addr + len; |
| 102 | return addr; |
| 103 | } |
| 104 | addr = vma->vm_end; |
| 105 | if (do_align) |
| 106 | addr = COLOUR_ALIGN(addr, pgoff); |
| 107 | } |
| 108 | } |
| 109 | |