Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * mm/mremap.c |
| 3 | * |
| 4 | * (C) Copyright 1996 Linus Torvalds |
| 5 | * |
Alan Cox | 046c688 | 2009-01-05 14:06:29 +0000 | [diff] [blame] | 6 | * Address space accounting code <alan@lxorguk.ukuu.org.uk> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * (C) Copyright 2002 Red Hat Inc, All Rights Reserved |
| 8 | */ |
| 9 | |
| 10 | #include <linux/mm.h> |
| 11 | #include <linux/hugetlb.h> |
| 12 | #include <linux/slab.h> |
| 13 | #include <linux/shm.h> |
| 14 | #include <linux/mman.h> |
| 15 | #include <linux/swap.h> |
Randy.Dunlap | c59ede7 | 2006-01-11 12:17:46 -0800 | [diff] [blame] | 16 | #include <linux/capability.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/fs.h> |
| 18 | #include <linux/highmem.h> |
| 19 | #include <linux/security.h> |
| 20 | #include <linux/syscalls.h> |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 21 | #include <linux/mmu_notifier.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | |
| 23 | #include <asm/uaccess.h> |
| 24 | #include <asm/cacheflush.h> |
| 25 | #include <asm/tlbflush.h> |
| 26 | |
Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 27 | #include "internal.h" |
| 28 | |
Hugh Dickins | 7be7a54 | 2005-10-29 18:16:00 -0700 | [diff] [blame] | 29 | static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | { |
| 31 | pgd_t *pgd; |
| 32 | pud_t *pud; |
| 33 | pmd_t *pmd; |
| 34 | |
| 35 | pgd = pgd_offset(mm, addr); |
| 36 | if (pgd_none_or_clear_bad(pgd)) |
| 37 | return NULL; |
| 38 | |
| 39 | pud = pud_offset(pgd, addr); |
| 40 | if (pud_none_or_clear_bad(pud)) |
| 41 | return NULL; |
| 42 | |
| 43 | pmd = pmd_offset(pud, addr); |
| 44 | if (pmd_none_or_clear_bad(pmd)) |
| 45 | return NULL; |
| 46 | |
Hugh Dickins | 7be7a54 | 2005-10-29 18:16:00 -0700 | [diff] [blame] | 47 | return pmd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | } |
| 49 | |
Hugh Dickins | 7be7a54 | 2005-10-29 18:16:00 -0700 | [diff] [blame] | 50 | static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | { |
| 52 | pgd_t *pgd; |
| 53 | pud_t *pud; |
Hugh Dickins | c74df32 | 2005-10-29 18:16:23 -0700 | [diff] [blame] | 54 | pmd_t *pmd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | |
| 56 | pgd = pgd_offset(mm, addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | pud = pud_alloc(mm, pgd, addr); |
| 58 | if (!pud) |
Hugh Dickins | c74df32 | 2005-10-29 18:16:23 -0700 | [diff] [blame] | 59 | return NULL; |
Hugh Dickins | 7be7a54 | 2005-10-29 18:16:00 -0700 | [diff] [blame] | 60 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | pmd = pmd_alloc(mm, pud, addr); |
Hugh Dickins | 7be7a54 | 2005-10-29 18:16:00 -0700 | [diff] [blame] | 62 | if (!pmd) |
Hugh Dickins | c74df32 | 2005-10-29 18:16:23 -0700 | [diff] [blame] | 63 | return NULL; |
Hugh Dickins | 7be7a54 | 2005-10-29 18:16:00 -0700 | [diff] [blame] | 64 | |
Hugh Dickins | 1bb3630 | 2005-10-29 18:16:22 -0700 | [diff] [blame] | 65 | if (!pmd_present(*pmd) && __pte_alloc(mm, pmd, addr)) |
Hugh Dickins | c74df32 | 2005-10-29 18:16:23 -0700 | [diff] [blame] | 66 | return NULL; |
| 67 | |
Hugh Dickins | 7be7a54 | 2005-10-29 18:16:00 -0700 | [diff] [blame] | 68 | return pmd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | } |
| 70 | |
Hugh Dickins | 7be7a54 | 2005-10-29 18:16:00 -0700 | [diff] [blame] | 71 | static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, |
| 72 | unsigned long old_addr, unsigned long old_end, |
| 73 | struct vm_area_struct *new_vma, pmd_t *new_pmd, |
| 74 | unsigned long new_addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | { |
| 76 | struct address_space *mapping = NULL; |
| 77 | struct mm_struct *mm = vma->vm_mm; |
Hugh Dickins | 7be7a54 | 2005-10-29 18:16:00 -0700 | [diff] [blame] | 78 | pte_t *old_pte, *new_pte, pte; |
Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 79 | spinlock_t *old_ptl, *new_ptl; |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 80 | unsigned long old_start; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 82 | old_start = old_addr; |
| 83 | mmu_notifier_invalidate_range_start(vma->vm_mm, |
| 84 | old_start, old_end); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | if (vma->vm_file) { |
| 86 | /* |
| 87 | * Subtle point from Rajesh Venkatasubramanian: before |
| 88 | * moving file-based ptes, we must lock vmtruncate out, |
| 89 | * since it might clean the dst vma before the src vma, |
| 90 | * and we propagate stale pages into the dst afterward. |
| 91 | */ |
| 92 | mapping = vma->vm_file->f_mapping; |
| 93 | spin_lock(&mapping->i_mmap_lock); |
| 94 | if (new_vma->vm_truncate_count && |
| 95 | new_vma->vm_truncate_count != vma->vm_truncate_count) |
| 96 | new_vma->vm_truncate_count = 0; |
| 97 | } |
Hugh Dickins | 7be7a54 | 2005-10-29 18:16:00 -0700 | [diff] [blame] | 98 | |
Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 99 | /* |
| 100 | * We don't have to worry about the ordering of src and dst |
| 101 | * pte locks because exclusive mmap_sem prevents deadlock. |
| 102 | */ |
Hugh Dickins | c74df32 | 2005-10-29 18:16:23 -0700 | [diff] [blame] | 103 | old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); |
| 104 | new_pte = pte_offset_map_nested(new_pmd, new_addr); |
Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 105 | new_ptl = pte_lockptr(mm, new_pmd); |
| 106 | if (new_ptl != old_ptl) |
Ingo Molnar | f20dc5f | 2006-07-03 00:25:08 -0700 | [diff] [blame] | 107 | spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); |
Zachary Amsden | 6606c3e | 2006-09-30 23:29:33 -0700 | [diff] [blame] | 108 | arch_enter_lazy_mmu_mode(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | |
Hugh Dickins | 7be7a54 | 2005-10-29 18:16:00 -0700 | [diff] [blame] | 110 | for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, |
| 111 | new_pte++, new_addr += PAGE_SIZE) { |
| 112 | if (pte_none(*old_pte)) |
| 113 | continue; |
| 114 | pte = ptep_clear_flush(vma, old_addr, old_pte); |
Hugh Dickins | 7be7a54 | 2005-10-29 18:16:00 -0700 | [diff] [blame] | 115 | pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); |
| 116 | set_pte_at(mm, new_addr, new_pte, pte); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | } |
Hugh Dickins | 7be7a54 | 2005-10-29 18:16:00 -0700 | [diff] [blame] | 118 | |
Zachary Amsden | 6606c3e | 2006-09-30 23:29:33 -0700 | [diff] [blame] | 119 | arch_leave_lazy_mmu_mode(); |
Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 120 | if (new_ptl != old_ptl) |
| 121 | spin_unlock(new_ptl); |
Hugh Dickins | 7be7a54 | 2005-10-29 18:16:00 -0700 | [diff] [blame] | 122 | pte_unmap_nested(new_pte - 1); |
Hugh Dickins | c74df32 | 2005-10-29 18:16:23 -0700 | [diff] [blame] | 123 | pte_unmap_unlock(old_pte - 1, old_ptl); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | if (mapping) |
| 125 | spin_unlock(&mapping->i_mmap_lock); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 126 | mmu_notifier_invalidate_range_end(vma->vm_mm, old_start, old_end); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | } |
| 128 | |
Hugh Dickins | 7be7a54 | 2005-10-29 18:16:00 -0700 | [diff] [blame] | 129 | #define LATENCY_LIMIT (64 * PAGE_SIZE) |
| 130 | |
Ollie Wild | b6a2fea | 2007-07-19 01:48:16 -0700 | [diff] [blame] | 131 | unsigned long move_page_tables(struct vm_area_struct *vma, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | unsigned long old_addr, struct vm_area_struct *new_vma, |
| 133 | unsigned long new_addr, unsigned long len) |
| 134 | { |
Hugh Dickins | 7be7a54 | 2005-10-29 18:16:00 -0700 | [diff] [blame] | 135 | unsigned long extent, next, old_end; |
| 136 | pmd_t *old_pmd, *new_pmd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | |
Hugh Dickins | 7be7a54 | 2005-10-29 18:16:00 -0700 | [diff] [blame] | 138 | old_end = old_addr + len; |
| 139 | flush_cache_range(vma, old_addr, old_end); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | |
Hugh Dickins | 7be7a54 | 2005-10-29 18:16:00 -0700 | [diff] [blame] | 141 | for (; old_addr < old_end; old_addr += extent, new_addr += extent) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | cond_resched(); |
Hugh Dickins | 7be7a54 | 2005-10-29 18:16:00 -0700 | [diff] [blame] | 143 | next = (old_addr + PMD_SIZE) & PMD_MASK; |
| 144 | if (next - 1 > old_end) |
| 145 | next = old_end; |
| 146 | extent = next - old_addr; |
| 147 | old_pmd = get_old_pmd(vma->vm_mm, old_addr); |
| 148 | if (!old_pmd) |
| 149 | continue; |
| 150 | new_pmd = alloc_new_pmd(vma->vm_mm, new_addr); |
| 151 | if (!new_pmd) |
| 152 | break; |
| 153 | next = (new_addr + PMD_SIZE) & PMD_MASK; |
| 154 | if (extent > next - new_addr) |
| 155 | extent = next - new_addr; |
| 156 | if (extent > LATENCY_LIMIT) |
| 157 | extent = LATENCY_LIMIT; |
| 158 | move_ptes(vma, old_pmd, old_addr, old_addr + extent, |
| 159 | new_vma, new_pmd, new_addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | } |
Hugh Dickins | 7be7a54 | 2005-10-29 18:16:00 -0700 | [diff] [blame] | 161 | |
| 162 | return len + old_addr - old_end; /* how much done */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | } |
| 164 | |
| 165 | static unsigned long move_vma(struct vm_area_struct *vma, |
| 166 | unsigned long old_addr, unsigned long old_len, |
| 167 | unsigned long new_len, unsigned long new_addr) |
| 168 | { |
| 169 | struct mm_struct *mm = vma->vm_mm; |
| 170 | struct vm_area_struct *new_vma; |
| 171 | unsigned long vm_flags = vma->vm_flags; |
| 172 | unsigned long new_pgoff; |
| 173 | unsigned long moved_len; |
| 174 | unsigned long excess = 0; |
Hugh Dickins | 365e9c87 | 2005-10-29 18:16:18 -0700 | [diff] [blame] | 175 | unsigned long hiwater_vm; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | int split = 0; |
| 177 | |
| 178 | /* |
| 179 | * We'd prefer to avoid failure later on in do_munmap: |
| 180 | * which may split one vma into three before unmapping. |
| 181 | */ |
| 182 | if (mm->map_count >= sysctl_max_map_count - 3) |
| 183 | return -ENOMEM; |
| 184 | |
| 185 | new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); |
| 186 | new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff); |
| 187 | if (!new_vma) |
| 188 | return -ENOMEM; |
| 189 | |
| 190 | moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len); |
| 191 | if (moved_len < old_len) { |
| 192 | /* |
| 193 | * On error, move entries back from new area to old, |
| 194 | * which will succeed since page tables still there, |
| 195 | * and then proceed to unmap new area instead of old. |
| 196 | */ |
| 197 | move_page_tables(new_vma, new_addr, vma, old_addr, moved_len); |
| 198 | vma = new_vma; |
| 199 | old_len = new_len; |
| 200 | old_addr = new_addr; |
| 201 | new_addr = -ENOMEM; |
| 202 | } |
| 203 | |
| 204 | /* Conceal VM_ACCOUNT so old reservation is not undone */ |
| 205 | if (vm_flags & VM_ACCOUNT) { |
| 206 | vma->vm_flags &= ~VM_ACCOUNT; |
| 207 | excess = vma->vm_end - vma->vm_start - old_len; |
| 208 | if (old_addr > vma->vm_start && |
| 209 | old_addr + old_len < vma->vm_end) |
| 210 | split = 1; |
| 211 | } |
| 212 | |
Kirill Korotaev | 7179906 | 2005-05-16 21:53:18 -0700 | [diff] [blame] | 213 | /* |
Hugh Dickins | 365e9c87 | 2005-10-29 18:16:18 -0700 | [diff] [blame] | 214 | * If we failed to move page tables we still do total_vm increment |
| 215 | * since do_munmap() will decrement it by old_len == new_len. |
| 216 | * |
| 217 | * Since total_vm is about to be raised artificially high for a |
| 218 | * moment, we need to restore high watermark afterwards: if stats |
| 219 | * are taken meanwhile, total_vm and hiwater_vm appear too high. |
| 220 | * If this were a serious issue, we'd add a flag to do_munmap(). |
Kirill Korotaev | 7179906 | 2005-05-16 21:53:18 -0700 | [diff] [blame] | 221 | */ |
Hugh Dickins | 365e9c87 | 2005-10-29 18:16:18 -0700 | [diff] [blame] | 222 | hiwater_vm = mm->hiwater_vm; |
Kirill Korotaev | 7179906 | 2005-05-16 21:53:18 -0700 | [diff] [blame] | 223 | mm->total_vm += new_len >> PAGE_SHIFT; |
Hugh Dickins | ab50b8e | 2005-10-29 18:15:56 -0700 | [diff] [blame] | 224 | vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT); |
Kirill Korotaev | 7179906 | 2005-05-16 21:53:18 -0700 | [diff] [blame] | 225 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 | if (do_munmap(mm, old_addr, old_len) < 0) { |
| 227 | /* OOM: unable to split vma, just get accounts right */ |
| 228 | vm_unacct_memory(excess >> PAGE_SHIFT); |
| 229 | excess = 0; |
| 230 | } |
Hugh Dickins | 365e9c87 | 2005-10-29 18:16:18 -0700 | [diff] [blame] | 231 | mm->hiwater_vm = hiwater_vm; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | |
| 233 | /* Restore VM_ACCOUNT if one or two pieces of vma left */ |
| 234 | if (excess) { |
| 235 | vma->vm_flags |= VM_ACCOUNT; |
| 236 | if (split) |
| 237 | vma->vm_next->vm_flags |= VM_ACCOUNT; |
| 238 | } |
| 239 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | if (vm_flags & VM_LOCKED) { |
| 241 | mm->locked_vm += new_len >> PAGE_SHIFT; |
| 242 | if (new_len > old_len) |
Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 243 | mlock_vma_pages_range(new_vma, new_addr + old_len, |
| 244 | new_addr + new_len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | } |
| 246 | |
| 247 | return new_addr; |
| 248 | } |
| 249 | |
| 250 | /* |
| 251 | * Expand (or shrink) an existing mapping, potentially moving it at the |
| 252 | * same time (controlled by the MREMAP_MAYMOVE flag and available VM space) |
| 253 | * |
| 254 | * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise |
| 255 | * This option implies MREMAP_MAYMOVE. |
| 256 | */ |
| 257 | unsigned long do_mremap(unsigned long addr, |
| 258 | unsigned long old_len, unsigned long new_len, |
| 259 | unsigned long flags, unsigned long new_addr) |
| 260 | { |
Hugh Dickins | d0de32d | 2005-10-29 18:16:16 -0700 | [diff] [blame] | 261 | struct mm_struct *mm = current->mm; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 262 | struct vm_area_struct *vma; |
| 263 | unsigned long ret = -EINVAL; |
| 264 | unsigned long charged = 0; |
| 265 | |
| 266 | if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) |
| 267 | goto out; |
| 268 | |
| 269 | if (addr & ~PAGE_MASK) |
| 270 | goto out; |
| 271 | |
| 272 | old_len = PAGE_ALIGN(old_len); |
| 273 | new_len = PAGE_ALIGN(new_len); |
| 274 | |
| 275 | /* |
| 276 | * We allow a zero old-len as a special case |
| 277 | * for DOS-emu "duplicate shm area" thing. But |
| 278 | * a zero new-len is nonsensical. |
| 279 | */ |
| 280 | if (!new_len) |
| 281 | goto out; |
| 282 | |
| 283 | /* new_addr is only valid if MREMAP_FIXED is specified */ |
| 284 | if (flags & MREMAP_FIXED) { |
| 285 | if (new_addr & ~PAGE_MASK) |
| 286 | goto out; |
| 287 | if (!(flags & MREMAP_MAYMOVE)) |
| 288 | goto out; |
| 289 | |
| 290 | if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) |
| 291 | goto out; |
| 292 | |
| 293 | /* Check if the location we're moving into overlaps the |
| 294 | * old location at all, and fail if it does. |
| 295 | */ |
| 296 | if ((new_addr <= addr) && (new_addr+new_len) > addr) |
| 297 | goto out; |
| 298 | |
| 299 | if ((addr <= new_addr) && (addr+old_len) > new_addr) |
| 300 | goto out; |
| 301 | |
Stephen Hemminger | c80544d | 2007-10-18 03:07:05 -0700 | [diff] [blame] | 302 | ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1); |
Eric Paris | ed03218 | 2007-06-28 15:55:21 -0400 | [diff] [blame] | 303 | if (ret) |
| 304 | goto out; |
| 305 | |
Hugh Dickins | d0de32d | 2005-10-29 18:16:16 -0700 | [diff] [blame] | 306 | ret = do_munmap(mm, new_addr, new_len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 307 | if (ret) |
| 308 | goto out; |
| 309 | } |
| 310 | |
| 311 | /* |
| 312 | * Always allow a shrinking remap: that just unmaps |
| 313 | * the unnecessary pages.. |
| 314 | * do_munmap does all the needed commit accounting |
| 315 | */ |
| 316 | if (old_len >= new_len) { |
Hugh Dickins | d0de32d | 2005-10-29 18:16:16 -0700 | [diff] [blame] | 317 | ret = do_munmap(mm, addr+new_len, old_len - new_len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | if (ret && old_len != new_len) |
| 319 | goto out; |
| 320 | ret = addr; |
| 321 | if (!(flags & MREMAP_FIXED) || (new_addr == addr)) |
| 322 | goto out; |
| 323 | old_len = new_len; |
| 324 | } |
| 325 | |
| 326 | /* |
| 327 | * Ok, we need to grow.. or relocate. |
| 328 | */ |
| 329 | ret = -EFAULT; |
Hugh Dickins | d0de32d | 2005-10-29 18:16:16 -0700 | [diff] [blame] | 330 | vma = find_vma(mm, addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | if (!vma || vma->vm_start > addr) |
| 332 | goto out; |
| 333 | if (is_vm_hugetlb_page(vma)) { |
| 334 | ret = -EINVAL; |
| 335 | goto out; |
| 336 | } |
| 337 | /* We can't remap across vm area boundaries */ |
| 338 | if (old_len > vma->vm_end - addr) |
| 339 | goto out; |
Linus Torvalds | 4d7672b | 2005-12-16 10:21:23 -0800 | [diff] [blame] | 340 | if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | if (new_len > old_len) |
| 342 | goto out; |
| 343 | } |
| 344 | if (vma->vm_flags & VM_LOCKED) { |
| 345 | unsigned long locked, lock_limit; |
Hugh Dickins | d0de32d | 2005-10-29 18:16:16 -0700 | [diff] [blame] | 346 | locked = mm->locked_vm << PAGE_SHIFT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 | lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; |
| 348 | locked += new_len - old_len; |
| 349 | ret = -EAGAIN; |
| 350 | if (locked > lock_limit && !capable(CAP_IPC_LOCK)) |
| 351 | goto out; |
| 352 | } |
Hugh Dickins | d0de32d | 2005-10-29 18:16:16 -0700 | [diff] [blame] | 353 | if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT)) { |
akpm@osdl.org | 119f657 | 2005-05-01 08:58:35 -0700 | [diff] [blame] | 354 | ret = -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 355 | goto out; |
akpm@osdl.org | 119f657 | 2005-05-01 08:58:35 -0700 | [diff] [blame] | 356 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 357 | |
| 358 | if (vma->vm_flags & VM_ACCOUNT) { |
| 359 | charged = (new_len - old_len) >> PAGE_SHIFT; |
| 360 | if (security_vm_enough_memory(charged)) |
| 361 | goto out_nc; |
| 362 | } |
| 363 | |
| 364 | /* old_len exactly to the end of the area.. |
| 365 | * And we're not relocating the area. |
| 366 | */ |
| 367 | if (old_len == vma->vm_end - addr && |
| 368 | !((flags & MREMAP_FIXED) && (addr != new_addr)) && |
| 369 | (old_len != new_len || !(flags & MREMAP_MAYMOVE))) { |
| 370 | unsigned long max_addr = TASK_SIZE; |
| 371 | if (vma->vm_next) |
| 372 | max_addr = vma->vm_next->vm_start; |
| 373 | /* can we just expand the current mapping? */ |
| 374 | if (max_addr - addr >= new_len) { |
| 375 | int pages = (new_len - old_len) >> PAGE_SHIFT; |
| 376 | |
| 377 | vma_adjust(vma, vma->vm_start, |
| 378 | addr + new_len, vma->vm_pgoff, NULL); |
| 379 | |
Hugh Dickins | d0de32d | 2005-10-29 18:16:16 -0700 | [diff] [blame] | 380 | mm->total_vm += pages; |
| 381 | vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 | if (vma->vm_flags & VM_LOCKED) { |
Hugh Dickins | d0de32d | 2005-10-29 18:16:16 -0700 | [diff] [blame] | 383 | mm->locked_vm += pages; |
Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 384 | mlock_vma_pages_range(vma, addr + old_len, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | addr + new_len); |
| 386 | } |
| 387 | ret = addr; |
| 388 | goto out; |
| 389 | } |
| 390 | } |
| 391 | |
| 392 | /* |
| 393 | * We weren't able to just expand or shrink the area, |
| 394 | * we need to create a new one and move it.. |
| 395 | */ |
| 396 | ret = -ENOMEM; |
| 397 | if (flags & MREMAP_MAYMOVE) { |
| 398 | if (!(flags & MREMAP_FIXED)) { |
| 399 | unsigned long map_flags = 0; |
| 400 | if (vma->vm_flags & VM_MAYSHARE) |
| 401 | map_flags |= MAP_SHARED; |
| 402 | |
| 403 | new_addr = get_unmapped_area(vma->vm_file, 0, new_len, |
| 404 | vma->vm_pgoff, map_flags); |
Eric Paris | ed03218 | 2007-06-28 15:55:21 -0400 | [diff] [blame] | 405 | if (new_addr & ~PAGE_MASK) { |
| 406 | ret = new_addr; |
| 407 | goto out; |
| 408 | } |
| 409 | |
Stephen Hemminger | c80544d | 2007-10-18 03:07:05 -0700 | [diff] [blame] | 410 | ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1); |
Eric Paris | ed03218 | 2007-06-28 15:55:21 -0400 | [diff] [blame] | 411 | if (ret) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 412 | goto out; |
| 413 | } |
| 414 | ret = move_vma(vma, addr, old_len, new_len, new_addr); |
| 415 | } |
| 416 | out: |
| 417 | if (ret & ~PAGE_MASK) |
| 418 | vm_unacct_memory(charged); |
| 419 | out_nc: |
| 420 | return ret; |
| 421 | } |
| 422 | |
Heiko Carstens | 2ed7c03 | 2009-01-14 14:13:54 +0100 | [diff] [blame] | 423 | asmlinkage long sys_mremap(unsigned long addr, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 424 | unsigned long old_len, unsigned long new_len, |
| 425 | unsigned long flags, unsigned long new_addr) |
| 426 | { |
| 427 | unsigned long ret; |
| 428 | |
| 429 | down_write(¤t->mm->mmap_sem); |
| 430 | ret = do_mremap(addr, old_len, new_len, flags, new_addr); |
| 431 | up_write(¤t->mm->mmap_sem); |
| 432 | return ret; |
| 433 | } |