blob: ee9bd67598330439b759ea5d158de8060f307a16 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/msync.c
3 *
4 * Copyright (C) 1994-1999 Linus Torvalds
5 */
6
7/*
8 * The msync() system call.
9 */
10#include <linux/slab.h>
11#include <linux/pagemap.h>
12#include <linux/mm.h>
13#include <linux/mman.h>
14#include <linux/hugetlb.h>
Andrew Morton9c50823e2006-03-24 03:18:12 -080015#include <linux/writeback.h>
16#include <linux/file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/syscalls.h>
18
19#include <asm/pgtable.h>
20#include <asm/tlbflush.h>
21
Andrew Morton9c50823e2006-03-24 03:18:12 -080022static unsigned long msync_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 unsigned long addr, unsigned long end)
24{
25 pte_t *pte;
Hugh Dickins705e87c2005-10-29 18:16:27 -070026 spinlock_t *ptl;
Hugh Dickins0c942a42005-10-29 18:15:53 -070027 int progress = 0;
Andrew Morton9c50823e2006-03-24 03:18:12 -080028 unsigned long ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
Hugh Dickins0c942a42005-10-29 18:15:53 -070030again:
Hugh Dickins705e87c2005-10-29 18:16:27 -070031 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070032 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -070033 struct page *page;
34
Hugh Dickins0c942a42005-10-29 18:15:53 -070035 if (progress >= 64) {
36 progress = 0;
Hugh Dickins705e87c2005-10-29 18:16:27 -070037 if (need_resched() || need_lockbreak(ptl))
Hugh Dickins0c942a42005-10-29 18:15:53 -070038 break;
39 }
40 progress++;
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 if (!pte_present(*pte))
42 continue;
Abhijit Karmarkarb4955ce2005-06-21 17:15:13 -070043 if (!pte_maybe_dirty(*pte))
44 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -080045 page = vm_normal_page(vma, addr, *pte);
46 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 if (ptep_clear_flush_dirty(vma, addr, pte) ||
Andrew Morton9c50823e2006-03-24 03:18:12 -080049 page_test_and_clear_dirty(page))
50 ret += set_page_dirty(page);
Hugh Dickins0c942a42005-10-29 18:15:53 -070051 progress += 3;
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 } while (pte++, addr += PAGE_SIZE, addr != end);
Hugh Dickins705e87c2005-10-29 18:16:27 -070053 pte_unmap_unlock(pte - 1, ptl);
54 cond_resched();
Hugh Dickins0c942a42005-10-29 18:15:53 -070055 if (addr != end)
56 goto again;
Andrew Morton9c50823e2006-03-24 03:18:12 -080057 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070058}
59
Andrew Morton9c50823e2006-03-24 03:18:12 -080060static inline unsigned long msync_pmd_range(struct vm_area_struct *vma,
61 pud_t *pud, unsigned long addr, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -070062{
63 pmd_t *pmd;
64 unsigned long next;
Andrew Morton9c50823e2006-03-24 03:18:12 -080065 unsigned long ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070066
67 pmd = pmd_offset(pud, addr);
68 do {
69 next = pmd_addr_end(addr, end);
70 if (pmd_none_or_clear_bad(pmd))
71 continue;
Andrew Morton9c50823e2006-03-24 03:18:12 -080072 ret += msync_pte_range(vma, pmd, addr, next);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 } while (pmd++, addr = next, addr != end);
Andrew Morton9c50823e2006-03-24 03:18:12 -080074 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070075}
76
Andrew Morton9c50823e2006-03-24 03:18:12 -080077static inline unsigned long msync_pud_range(struct vm_area_struct *vma,
78 pgd_t *pgd, unsigned long addr, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -070079{
80 pud_t *pud;
81 unsigned long next;
Andrew Morton9c50823e2006-03-24 03:18:12 -080082 unsigned long ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
84 pud = pud_offset(pgd, addr);
85 do {
86 next = pud_addr_end(addr, end);
87 if (pud_none_or_clear_bad(pud))
88 continue;
Andrew Morton9c50823e2006-03-24 03:18:12 -080089 ret += msync_pmd_range(vma, pud, addr, next);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 } while (pud++, addr = next, addr != end);
Andrew Morton9c50823e2006-03-24 03:18:12 -080091 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
Andrew Morton9c50823e2006-03-24 03:18:12 -080094static unsigned long msync_page_range(struct vm_area_struct *vma,
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 unsigned long addr, unsigned long end)
96{
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 pgd_t *pgd;
98 unsigned long next;
Andrew Morton9c50823e2006-03-24 03:18:12 -080099 unsigned long ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
101 /* For hugepages we can't go walking the page table normally,
102 * but that's ok, hugetlbfs is memory based, so we don't need
Nick Pigginb5810032005-10-29 18:16:12 -0700103 * to do anything more on an msync().
Nick Pigginb5810032005-10-29 18:16:12 -0700104 */
Linus Torvalds6aab3412005-11-28 14:34:23 -0800105 if (vma->vm_flags & VM_HUGETLB)
Andrew Morton9c50823e2006-03-24 03:18:12 -0800106 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
108 BUG_ON(addr >= end);
Hugh Dickins705e87c2005-10-29 18:16:27 -0700109 pgd = pgd_offset(vma->vm_mm, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 flush_cache_range(vma, addr, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 do {
112 next = pgd_addr_end(addr, end);
113 if (pgd_none_or_clear_bad(pgd))
114 continue;
Andrew Morton9c50823e2006-03-24 03:18:12 -0800115 ret += msync_pud_range(vma, pgd, addr, next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 } while (pgd++, addr = next, addr != end);
Andrew Morton9c50823e2006-03-24 03:18:12 -0800117 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118}
119
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120/*
121 * MS_SYNC syncs the entire file - including mappings.
122 *
123 * MS_ASYNC does not start I/O (it used to, up to 2.5.67). Instead, it just
124 * marks the relevant pages dirty. The application may now run fsync() to
125 * write out the dirty pages and wait on the writeout and check the result.
126 * Or the application may run fadvise(FADV_DONTNEED) against the fd to start
127 * async writeout immediately.
128 * So my _not_ starting I/O in MS_ASYNC we provide complete flexibility to
129 * applications.
130 */
Andrew Morton9c50823e2006-03-24 03:18:12 -0800131static int msync_interval(struct vm_area_struct *vma, unsigned long addr,
132 unsigned long end, int flags,
133 unsigned long *nr_pages_dirtied)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 struct file *file = vma->vm_file;
136
137 if ((flags & MS_INVALIDATE) && (vma->vm_flags & VM_LOCKED))
138 return -EBUSY;
139
Andrew Morton707c21c2006-03-24 03:18:13 -0800140 if (file && (vma->vm_flags & VM_SHARED))
Andrew Morton9c50823e2006-03-24 03:18:12 -0800141 *nr_pages_dirtied = msync_page_range(vma, addr, end);
Andrew Morton707c21c2006-03-24 03:18:13 -0800142 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143}
144
145asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
146{
147 unsigned long end;
148 struct vm_area_struct *vma;
149 int unmapped_error, error = -EINVAL;
Andrew Morton9c50823e2006-03-24 03:18:12 -0800150 int done = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
153 goto out;
154 if (start & ~PAGE_MASK)
155 goto out;
156 if ((flags & MS_ASYNC) && (flags & MS_SYNC))
157 goto out;
158 error = -ENOMEM;
159 len = (len + ~PAGE_MASK) & PAGE_MASK;
160 end = start + len;
161 if (end < start)
162 goto out;
163 error = 0;
164 if (end == start)
165 goto out;
166 /*
167 * If the interval [start,end) covers some unmapped address ranges,
168 * just ignore them, but return -ENOMEM at the end.
169 */
Andrew Morton9c50823e2006-03-24 03:18:12 -0800170 down_read(&current->mm->mmap_sem);
171 if (flags & MS_SYNC)
172 current->flags |= PF_SYNCWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 vma = find_vma(current->mm, start);
174 unmapped_error = 0;
Andrew Morton9c50823e2006-03-24 03:18:12 -0800175 do {
176 unsigned long nr_pages_dirtied = 0;
177 struct file *file;
178
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 /* Still start < end. */
180 error = -ENOMEM;
181 if (!vma)
Andrew Morton9c50823e2006-03-24 03:18:12 -0800182 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 /* Here start < vma->vm_end. */
184 if (start < vma->vm_start) {
185 unmapped_error = -ENOMEM;
186 start = vma->vm_start;
187 }
188 /* Here vma->vm_start <= start < vma->vm_end. */
189 if (end <= vma->vm_end) {
190 if (start < end) {
Andrew Morton9c50823e2006-03-24 03:18:12 -0800191 error = msync_interval(vma, start, end, flags,
192 &nr_pages_dirtied);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 if (error)
Andrew Morton9c50823e2006-03-24 03:18:12 -0800194 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 }
196 error = unmapped_error;
Andrew Morton9c50823e2006-03-24 03:18:12 -0800197 done = 1;
198 } else {
199 /* Here vma->vm_start <= start < vma->vm_end < end. */
200 error = msync_interval(vma, start, vma->vm_end, flags,
201 &nr_pages_dirtied);
202 if (error)
203 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 }
Andrew Morton9c50823e2006-03-24 03:18:12 -0800205 file = vma->vm_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 start = vma->vm_end;
Andrew Morton9c50823e2006-03-24 03:18:12 -0800207 if ((flags & MS_ASYNC) && file && nr_pages_dirtied) {
208 get_file(file);
209 up_read(&current->mm->mmap_sem);
210 balance_dirty_pages_ratelimited_nr(file->f_mapping,
211 nr_pages_dirtied);
212 fput(file);
213 down_read(&current->mm->mmap_sem);
214 vma = find_vma(current->mm, start);
Andrew Morton707c21c2006-03-24 03:18:13 -0800215 } else if ((flags & MS_SYNC) && file &&
216 (vma->vm_flags & VM_SHARED)) {
217 struct address_space *mapping;
218 int err;
219
220 get_file(file);
221 up_read(&current->mm->mmap_sem);
222 mapping = file->f_mapping;
223 error = filemap_fdatawrite(mapping);
224 if (file->f_op && file->f_op->fsync) {
225 mutex_lock(&mapping->host->i_mutex);
226 err = file->f_op->fsync(file,file->f_dentry,1);
227 mutex_unlock(&mapping->host->i_mutex);
228 if (err && !error)
229 error = err;
230 }
231 err = filemap_fdatawait(mapping);
232 if (err && !error)
233 error = err;
234 fput(file);
235 down_read(&current->mm->mmap_sem);
236 if (error)
237 goto out_unlock;
238 vma = find_vma(current->mm, start);
Andrew Morton9c50823e2006-03-24 03:18:12 -0800239 } else {
240 vma = vma->vm_next;
241 }
242 } while (!done);
243out_unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 current->flags &= ~PF_SYNCWRITE;
Andrew Morton9c50823e2006-03-24 03:18:12 -0800245 up_read(&current->mm->mmap_sem);
246out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 return error;
248}