blob: 7881638e4a12bd8c5489c8d08b5779b5ae5a0080 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/fremap.c
3 *
4 * Explicit pagetable population and nonlinear (random) mappings support.
5 *
6 * started by Ingo Molnar, Copyright (C) 2002, 2003
7 */
Alexey Dobriyan4af3c9c2007-10-16 23:29:23 -07008#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/mm.h>
10#include <linux/swap.h>
11#include <linux/file.h>
12#include <linux/mman.h>
13#include <linux/pagemap.h>
14#include <linux/swapops.h>
15#include <linux/rmap.h>
16#include <linux/module.h>
17#include <linux/syscalls.h>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070018#include <linux/mmu_notifier.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
20#include <asm/mmu_context.h>
21#include <asm/cacheflush.h>
22#include <asm/tlbflush.h>
23
Nick Piggind0217ac2007-07-19 01:47:03 -070024static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
Linus Torvalds1da177e2005-04-16 15:20:36 -070025 unsigned long addr, pte_t *ptep)
26{
27 pte_t pte = *ptep;
28
Linus Torvalds1da177e2005-04-16 15:20:36 -070029 if (pte_present(pte)) {
Nick Piggind0217ac2007-07-19 01:47:03 -070030 struct page *page;
31
Linus Torvalds6aab3412005-11-28 14:34:23 -080032 flush_cache_page(vma, addr, pte_pfn(pte));
Linus Torvalds1da177e2005-04-16 15:20:36 -070033 pte = ptep_clear_flush(vma, addr, ptep);
Linus Torvalds6aab3412005-11-28 14:34:23 -080034 page = vm_normal_page(vma, addr, pte);
35 if (page) {
36 if (pte_dirty(pte))
37 set_page_dirty(page);
Nick Piggin7de6b802006-12-22 01:09:33 -080038 page_remove_rmap(page, vma);
Linus Torvalds6aab3412005-11-28 14:34:23 -080039 page_cache_release(page);
Nick Piggind0217ac2007-07-19 01:47:03 -070040 update_hiwater_rss(mm);
41 dec_mm_counter(mm, file_rss);
Linus Torvalds1da177e2005-04-16 15:20:36 -070042 }
43 } else {
44 if (!pte_file(pte))
45 free_swap_and_cache(pte_to_swp_entry(pte));
Zachary Amsden9888a1c2006-09-30 23:29:31 -070046 pte_clear_not_present_full(mm, addr, ptep, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 }
48}
49
50/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 * Install a file pte to a given virtual memory address, release any
52 * previously existing mapping.
53 */
Nick Piggind0217ac2007-07-19 01:47:03 -070054static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
Linus Torvalds1da177e2005-04-16 15:20:36 -070055 unsigned long addr, unsigned long pgoff, pgprot_t prot)
56{
57 int err = -ENOMEM;
58 pte_t *pte;
Hugh Dickinsc74df322005-10-29 18:16:23 -070059 spinlock_t *ptl;
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Linus Torvaldsc9cfcdd2005-11-29 14:03:14 -080061 pte = get_locked_pte(mm, addr, &ptl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 if (!pte)
Hugh Dickinsc74df322005-10-29 18:16:23 -070063 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
Nick Piggind0217ac2007-07-19 01:47:03 -070065 if (!pte_none(*pte))
66 zap_pte(mm, vma, addr, pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
68 set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
Hugh Dickins668e0d82006-06-23 02:03:45 -070069 /*
70 * We don't need to run update_mmu_cache() here because the "file pte"
71 * being installed by install_file_pte() is not a real pte - it's a
72 * non-present entry (like a swap entry), noting what file offset should
73 * be mapped there when there's a fault (in a non-linear vma where
74 * that's not obvious).
75 */
Hugh Dickinsc74df322005-10-29 18:16:23 -070076 pte_unmap_unlock(pte, ptl);
77 err = 0;
78out:
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 return err;
80}
81
Nick Piggin54cb8822007-07-19 01:46:59 -070082static int populate_range(struct mm_struct *mm, struct vm_area_struct *vma,
83 unsigned long addr, unsigned long size, pgoff_t pgoff)
84{
85 int err;
86
87 do {
88 err = install_file_pte(mm, vma, addr, pgoff, vma->vm_page_prot);
89 if (err)
90 return err;
91
92 size -= PAGE_SIZE;
93 addr += PAGE_SIZE;
94 pgoff++;
95 } while (size);
96
97 return 0;
98
99}
100
Randy Dunlap8d634942007-10-16 23:31:29 -0700101/**
102 * sys_remap_file_pages - remap arbitrary pages of an existing VM_SHARED vma
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 * @start: start of the remapped virtual memory range
104 * @size: size of the remapped virtual memory range
Randy Dunlap8d634942007-10-16 23:31:29 -0700105 * @prot: new protection bits of the range (see NOTE)
106 * @pgoff: to-be-mapped page of the backing store file
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
108 *
Randy Dunlap8d634942007-10-16 23:31:29 -0700109 * sys_remap_file_pages remaps arbitrary pages of an existing VM_SHARED vma
110 * (shared backing store file).
111 *
112 * This syscall works purely via pagetables, so it's the most efficient
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 * way to map the same (large) file into a given virtual window. Unlike
114 * mmap()/mremap() it does not create any new vmas. The new mappings are
115 * also safe across swapout.
116 *
Randy Dunlap76824862008-03-19 17:00:40 -0700117 * NOTE: the @prot parameter right now is ignored (but must be zero),
Randy Dunlap8d634942007-10-16 23:31:29 -0700118 * and the vma's default protection is used. Arbitrary protections
119 * might be implemented in the future.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 */
121asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
Randy Dunlap8d634942007-10-16 23:31:29 -0700122 unsigned long prot, unsigned long pgoff, unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123{
124 struct mm_struct *mm = current->mm;
125 struct address_space *mapping;
126 unsigned long end = start + size;
127 struct vm_area_struct *vma;
128 int err = -EINVAL;
129 int has_write_lock = 0;
130
Randy Dunlap8d634942007-10-16 23:31:29 -0700131 if (prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 return err;
133 /*
134 * Sanitize the syscall parameters:
135 */
136 start = start & PAGE_MASK;
137 size = size & PAGE_MASK;
138
139 /* Does the address range wrap, or is the span zero-sized? */
140 if (start + size <= start)
141 return err;
142
143 /* Can we represent this offset inside this architecture's pte's? */
144#if PTE_FILE_MAX_BITS < BITS_PER_LONG
145 if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
146 return err;
147#endif
148
149 /* We need down_write() to change vma->vm_flags. */
150 down_read(&mm->mmap_sem);
151 retry:
152 vma = find_vma(mm, start);
153
154 /*
155 * Make sure the vma is shared, that it supports prefaulting,
156 * and that the remapped range is valid and fully within
157 * the single existing vma. vm_private_data is used as a
Hugh Dickins101d2be2005-11-21 21:32:16 -0800158 * swapout cursor in a VM_NONLINEAR vma.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 */
Nick Piggin54cb8822007-07-19 01:46:59 -0700160 if (!vma || !(vma->vm_flags & VM_SHARED))
161 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
Nick Piggin54cb8822007-07-19 01:46:59 -0700163 if (vma->vm_private_data && !(vma->vm_flags & VM_NONLINEAR))
164 goto out;
165
Yan Zhengdd204d62007-10-08 10:05:48 -0700166 if (!(vma->vm_flags & VM_CAN_NONLINEAR))
Nick Piggin54cb8822007-07-19 01:46:59 -0700167 goto out;
168
169 if (end <= start || start < vma->vm_start || end > vma->vm_end)
170 goto out;
171
172 /* Must set VM_NONLINEAR before any pages are populated. */
173 if (!(vma->vm_flags & VM_NONLINEAR)) {
174 /* Don't need a nonlinear mapping, exit success */
175 if (pgoff == linear_page_index(vma, start)) {
176 err = 0;
177 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 }
179
Nick Piggin54cb8822007-07-19 01:46:59 -0700180 if (!has_write_lock) {
181 up_read(&mm->mmap_sem);
182 down_write(&mm->mmap_sem);
183 has_write_lock = 1;
184 goto retry;
185 }
186 mapping = vma->vm_file->f_mapping;
Miklos Szeredi3ee6daf2007-07-19 01:47:24 -0700187 /*
188 * page_mkclean doesn't work on nonlinear vmas, so if
189 * dirty pages need to be accounted, emulate with linear
190 * vmas.
191 */
192 if (mapping_cap_account_dirty(mapping)) {
193 unsigned long addr;
Oleg Nesterov8a459e42008-02-04 22:27:18 -0800194 struct file *file = vma->vm_file;
Miklos Szeredi3ee6daf2007-07-19 01:47:24 -0700195
196 flags &= MAP_NONBLOCK;
Oleg Nesterov8a459e42008-02-04 22:27:18 -0800197 get_file(file);
198 addr = mmap_region(file, start, size,
Miklos Szeredi3ee6daf2007-07-19 01:47:24 -0700199 flags, vma->vm_flags, pgoff, 1);
Oleg Nesterov8a459e42008-02-04 22:27:18 -0800200 fput(file);
Miklos Szeredi3ee6daf2007-07-19 01:47:24 -0700201 if (IS_ERR_VALUE(addr)) {
202 err = addr;
203 } else {
204 BUG_ON(addr != start);
205 err = 0;
206 }
207 goto out;
208 }
Nick Piggin54cb8822007-07-19 01:46:59 -0700209 spin_lock(&mapping->i_mmap_lock);
210 flush_dcache_mmap_lock(mapping);
211 vma->vm_flags |= VM_NONLINEAR;
212 vma_prio_tree_remove(vma, &mapping->i_mmap);
213 vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
214 flush_dcache_mmap_unlock(mapping);
215 spin_unlock(&mapping->i_mmap_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 }
Nick Piggin54cb8822007-07-19 01:46:59 -0700217
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700218 mmu_notifier_invalidate_range_start(mm, start, start + size);
Nick Piggind0217ac2007-07-19 01:47:03 -0700219 err = populate_range(mm, vma, start, size, pgoff);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700220 mmu_notifier_invalidate_range_end(mm, start, start + size);
Nick Piggind0217ac2007-07-19 01:47:03 -0700221 if (!err && !(flags & MAP_NONBLOCK)) {
222 if (unlikely(has_write_lock)) {
223 downgrade_write(&mm->mmap_sem);
224 has_write_lock = 0;
Nick Piggin54cb8822007-07-19 01:46:59 -0700225 }
Nick Piggind0217ac2007-07-19 01:47:03 -0700226 make_pages_present(start, start+size);
227 }
Nick Piggin54cb8822007-07-19 01:46:59 -0700228
229 /*
230 * We can't clear VM_NONLINEAR because we'd have to do
231 * it after ->populate completes, and that would prevent
232 * downgrading the lock. (Locks can't be upgraded).
233 */
234
235out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 if (likely(!has_write_lock))
237 up_read(&mm->mmap_sem);
238 else
239 up_write(&mm->mmap_sem);
240
241 return err;
242}
243