Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/mm/fremap.c |
| 3 | * |
| 4 | * Explicit pagetable population and nonlinear (random) mappings support. |
| 5 | * |
| 6 | * started by Ingo Molnar, Copyright (C) 2002, 2003 |
| 7 | */ |
| 8 | |
| 9 | #include <linux/mm.h> |
| 10 | #include <linux/swap.h> |
| 11 | #include <linux/file.h> |
| 12 | #include <linux/mman.h> |
| 13 | #include <linux/pagemap.h> |
| 14 | #include <linux/swapops.h> |
| 15 | #include <linux/rmap.h> |
| 16 | #include <linux/module.h> |
| 17 | #include <linux/syscalls.h> |
| 18 | |
| 19 | #include <asm/mmu_context.h> |
| 20 | #include <asm/cacheflush.h> |
| 21 | #include <asm/tlbflush.h> |
| 22 | |
| 23 | static inline void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, |
| 24 | unsigned long addr, pte_t *ptep) |
| 25 | { |
| 26 | pte_t pte = *ptep; |
| 27 | |
| 28 | if (pte_none(pte)) |
| 29 | return; |
| 30 | if (pte_present(pte)) { |
| 31 | unsigned long pfn = pte_pfn(pte); |
| 32 | |
| 33 | flush_cache_page(vma, addr, pfn); |
| 34 | pte = ptep_clear_flush(vma, addr, ptep); |
| 35 | if (pfn_valid(pfn)) { |
| 36 | struct page *page = pfn_to_page(pfn); |
| 37 | if (!PageReserved(page)) { |
| 38 | if (pte_dirty(pte)) |
| 39 | set_page_dirty(page); |
| 40 | page_remove_rmap(page); |
| 41 | page_cache_release(page); |
| 42 | dec_mm_counter(mm, rss); |
| 43 | } |
| 44 | } |
| 45 | } else { |
| 46 | if (!pte_file(pte)) |
| 47 | free_swap_and_cache(pte_to_swp_entry(pte)); |
| 48 | pte_clear(mm, addr, ptep); |
| 49 | } |
| 50 | } |
| 51 | |
| 52 | /* |
| 53 | * Install a file page to a given virtual memory address, release any |
| 54 | * previously existing mapping. |
| 55 | */ |
| 56 | int install_page(struct mm_struct *mm, struct vm_area_struct *vma, |
| 57 | unsigned long addr, struct page *page, pgprot_t prot) |
| 58 | { |
| 59 | struct inode *inode; |
| 60 | pgoff_t size; |
| 61 | int err = -ENOMEM; |
| 62 | pte_t *pte; |
| 63 | pmd_t *pmd; |
| 64 | pud_t *pud; |
| 65 | pgd_t *pgd; |
| 66 | pte_t pte_val; |
| 67 | |
| 68 | pgd = pgd_offset(mm, addr); |
| 69 | spin_lock(&mm->page_table_lock); |
| 70 | |
| 71 | pud = pud_alloc(mm, pgd, addr); |
| 72 | if (!pud) |
| 73 | goto err_unlock; |
| 74 | |
| 75 | pmd = pmd_alloc(mm, pud, addr); |
| 76 | if (!pmd) |
| 77 | goto err_unlock; |
| 78 | |
| 79 | pte = pte_alloc_map(mm, pmd, addr); |
| 80 | if (!pte) |
| 81 | goto err_unlock; |
| 82 | |
| 83 | /* |
| 84 | * This page may have been truncated. Tell the |
| 85 | * caller about it. |
| 86 | */ |
| 87 | err = -EINVAL; |
| 88 | inode = vma->vm_file->f_mapping->host; |
| 89 | size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; |
| 90 | if (!page->mapping || page->index >= size) |
| 91 | goto err_unlock; |
| 92 | |
| 93 | zap_pte(mm, vma, addr, pte); |
| 94 | |
| 95 | inc_mm_counter(mm,rss); |
| 96 | flush_icache_page(vma, page); |
| 97 | set_pte_at(mm, addr, pte, mk_pte(page, prot)); |
| 98 | page_add_file_rmap(page); |
| 99 | pte_val = *pte; |
| 100 | pte_unmap(pte); |
| 101 | update_mmu_cache(vma, addr, pte_val); |
| 102 | |
| 103 | err = 0; |
| 104 | err_unlock: |
| 105 | spin_unlock(&mm->page_table_lock); |
| 106 | return err; |
| 107 | } |
| 108 | EXPORT_SYMBOL(install_page); |
| 109 | |
| 110 | |
| 111 | /* |
| 112 | * Install a file pte to a given virtual memory address, release any |
| 113 | * previously existing mapping. |
| 114 | */ |
| 115 | int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, |
| 116 | unsigned long addr, unsigned long pgoff, pgprot_t prot) |
| 117 | { |
| 118 | int err = -ENOMEM; |
| 119 | pte_t *pte; |
| 120 | pmd_t *pmd; |
| 121 | pud_t *pud; |
| 122 | pgd_t *pgd; |
| 123 | pte_t pte_val; |
| 124 | |
| 125 | pgd = pgd_offset(mm, addr); |
| 126 | spin_lock(&mm->page_table_lock); |
| 127 | |
| 128 | pud = pud_alloc(mm, pgd, addr); |
| 129 | if (!pud) |
| 130 | goto err_unlock; |
| 131 | |
| 132 | pmd = pmd_alloc(mm, pud, addr); |
| 133 | if (!pmd) |
| 134 | goto err_unlock; |
| 135 | |
| 136 | pte = pte_alloc_map(mm, pmd, addr); |
| 137 | if (!pte) |
| 138 | goto err_unlock; |
| 139 | |
| 140 | zap_pte(mm, vma, addr, pte); |
| 141 | |
| 142 | set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff)); |
| 143 | pte_val = *pte; |
| 144 | pte_unmap(pte); |
| 145 | update_mmu_cache(vma, addr, pte_val); |
| 146 | spin_unlock(&mm->page_table_lock); |
| 147 | return 0; |
| 148 | |
| 149 | err_unlock: |
| 150 | spin_unlock(&mm->page_table_lock); |
| 151 | return err; |
| 152 | } |
| 153 | |
| 154 | |
| 155 | /*** |
| 156 | * sys_remap_file_pages - remap arbitrary pages of a shared backing store |
| 157 | * file within an existing vma. |
| 158 | * @start: start of the remapped virtual memory range |
| 159 | * @size: size of the remapped virtual memory range |
| 160 | * @prot: new protection bits of the range |
| 161 | * @pgoff: to be mapped page of the backing store file |
| 162 | * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO. |
| 163 | * |
| 164 | * this syscall works purely via pagetables, so it's the most efficient |
| 165 | * way to map the same (large) file into a given virtual window. Unlike |
| 166 | * mmap()/mremap() it does not create any new vmas. The new mappings are |
| 167 | * also safe across swapout. |
| 168 | * |
| 169 | * NOTE: the 'prot' parameter right now is ignored, and the vma's default |
| 170 | * protection is used. Arbitrary protections might be implemented in the |
| 171 | * future. |
| 172 | */ |
| 173 | asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size, |
| 174 | unsigned long __prot, unsigned long pgoff, unsigned long flags) |
| 175 | { |
| 176 | struct mm_struct *mm = current->mm; |
| 177 | struct address_space *mapping; |
| 178 | unsigned long end = start + size; |
| 179 | struct vm_area_struct *vma; |
| 180 | int err = -EINVAL; |
| 181 | int has_write_lock = 0; |
| 182 | |
| 183 | if (__prot) |
| 184 | return err; |
| 185 | /* |
| 186 | * Sanitize the syscall parameters: |
| 187 | */ |
| 188 | start = start & PAGE_MASK; |
| 189 | size = size & PAGE_MASK; |
| 190 | |
| 191 | /* Does the address range wrap, or is the span zero-sized? */ |
| 192 | if (start + size <= start) |
| 193 | return err; |
| 194 | |
| 195 | /* Can we represent this offset inside this architecture's pte's? */ |
| 196 | #if PTE_FILE_MAX_BITS < BITS_PER_LONG |
| 197 | if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS)) |
| 198 | return err; |
| 199 | #endif |
| 200 | |
| 201 | /* We need down_write() to change vma->vm_flags. */ |
| 202 | down_read(&mm->mmap_sem); |
| 203 | retry: |
| 204 | vma = find_vma(mm, start); |
| 205 | |
| 206 | /* |
| 207 | * Make sure the vma is shared, that it supports prefaulting, |
| 208 | * and that the remapped range is valid and fully within |
| 209 | * the single existing vma. vm_private_data is used as a |
| 210 | * swapout cursor in a VM_NONLINEAR vma (unless VM_RESERVED |
| 211 | * or VM_LOCKED, but VM_LOCKED could be revoked later on). |
| 212 | */ |
| 213 | if (vma && (vma->vm_flags & VM_SHARED) && |
| 214 | (!vma->vm_private_data || |
| 215 | (vma->vm_flags & (VM_NONLINEAR|VM_RESERVED))) && |
| 216 | vma->vm_ops && vma->vm_ops->populate && |
| 217 | end > start && start >= vma->vm_start && |
| 218 | end <= vma->vm_end) { |
| 219 | |
| 220 | /* Must set VM_NONLINEAR before any pages are populated. */ |
| 221 | if (pgoff != linear_page_index(vma, start) && |
| 222 | !(vma->vm_flags & VM_NONLINEAR)) { |
| 223 | if (!has_write_lock) { |
| 224 | up_read(&mm->mmap_sem); |
| 225 | down_write(&mm->mmap_sem); |
| 226 | has_write_lock = 1; |
| 227 | goto retry; |
| 228 | } |
| 229 | mapping = vma->vm_file->f_mapping; |
| 230 | spin_lock(&mapping->i_mmap_lock); |
| 231 | flush_dcache_mmap_lock(mapping); |
| 232 | vma->vm_flags |= VM_NONLINEAR; |
| 233 | vma_prio_tree_remove(vma, &mapping->i_mmap); |
| 234 | vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear); |
| 235 | flush_dcache_mmap_unlock(mapping); |
| 236 | spin_unlock(&mapping->i_mmap_lock); |
| 237 | } |
| 238 | |
| 239 | err = vma->vm_ops->populate(vma, start, size, |
| 240 | vma->vm_page_prot, |
| 241 | pgoff, flags & MAP_NONBLOCK); |
| 242 | |
| 243 | /* |
| 244 | * We can't clear VM_NONLINEAR because we'd have to do |
| 245 | * it after ->populate completes, and that would prevent |
| 246 | * downgrading the lock. (Locks can't be upgraded). |
| 247 | */ |
| 248 | } |
| 249 | if (likely(!has_write_lock)) |
| 250 | up_read(&mm->mmap_sem); |
| 251 | else |
| 252 | up_write(&mm->mmap_sem); |
| 253 | |
| 254 | return err; |
| 255 | } |
| 256 | |