blob: 192e6eebe4f240e4a8ece7cc0e10e902ff29f308 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/mlock.c
3 *
4 * (C) Copyright 1995 Linus Torvalds
5 * (C) Copyright 2002 Christoph Hellwig
6 */
7
Randy.Dunlapc59ede72006-01-11 12:17:46 -08008#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/mman.h>
10#include <linux/mm.h>
Nick Pigginb291f002008-10-18 20:26:44 -070011#include <linux/swap.h>
12#include <linux/swapops.h>
13#include <linux/pagemap.h>
Vlastimil Babka72255222013-09-11 14:22:29 -070014#include <linux/pagevec.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/mempolicy.h>
16#include <linux/syscalls.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040017#include <linux/sched.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040018#include <linux/export.h>
Nick Pigginb291f002008-10-18 20:26:44 -070019#include <linux/rmap.h>
20#include <linux/mmzone.h>
21#include <linux/hugetlb.h>
Vlastimil Babka72255222013-09-11 14:22:29 -070022#include <linux/memcontrol.h>
23#include <linux/mm_inline.h>
Nick Pigginb291f002008-10-18 20:26:44 -070024
25#include "internal.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040027int can_do_mlock(void)
28{
29 if (capable(CAP_IPC_LOCK))
30 return 1;
Jiri Slaby59e99e52010-03-05 13:41:44 -080031 if (rlimit(RLIMIT_MEMLOCK) != 0)
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040032 return 1;
33 return 0;
34}
35EXPORT_SYMBOL(can_do_mlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
Nick Pigginb291f002008-10-18 20:26:44 -070037/*
38 * Mlocked pages are marked with PageMlocked() flag for efficient testing
39 * in vmscan and, possibly, the fault path; and to support semi-accurate
40 * statistics.
41 *
42 * An mlocked page [PageMlocked(page)] is unevictable. As such, it will
43 * be placed on the LRU "unevictable" list, rather than the [in]active lists.
44 * The unevictable list is an LRU sibling list to the [in]active lists.
45 * PageUnevictable is set to indicate the unevictable state.
46 *
47 * When lazy mlocking via vmscan, it is important to ensure that the
48 * vma's VM_LOCKED status is not concurrently being modified, otherwise we
49 * may have mlocked a page that is being munlocked. So lazy mlock must take
50 * the mmap_sem for read, and verify that the vma really is locked
51 * (see mm/rmap.c).
52 */
53
54/*
55 * LRU accounting for clear_page_mlock()
56 */
Hugh Dickinse6c509f2012-10-08 16:33:19 -070057void clear_page_mlock(struct page *page)
Nick Pigginb291f002008-10-18 20:26:44 -070058{
Hugh Dickinse6c509f2012-10-08 16:33:19 -070059 if (!TestClearPageMlocked(page))
Nick Pigginb291f002008-10-18 20:26:44 -070060 return;
Nick Pigginb291f002008-10-18 20:26:44 -070061
David Rientjes8449d212012-10-08 16:34:06 -070062 mod_zone_page_state(page_zone(page), NR_MLOCK,
63 -hpage_nr_pages(page));
Nick Piggin5344b7e2008-10-18 20:26:51 -070064 count_vm_event(UNEVICTABLE_PGCLEARED);
Nick Pigginb291f002008-10-18 20:26:44 -070065 if (!isolate_lru_page(page)) {
66 putback_lru_page(page);
67 } else {
68 /*
KOSAKI Motohiro8891d6d2008-11-12 13:26:53 -080069 * We lost the race. the page already moved to evictable list.
Nick Pigginb291f002008-10-18 20:26:44 -070070 */
KOSAKI Motohiro8891d6d2008-11-12 13:26:53 -080071 if (PageUnevictable(page))
Nick Piggin5344b7e2008-10-18 20:26:51 -070072 count_vm_event(UNEVICTABLE_PGSTRANDED);
Nick Pigginb291f002008-10-18 20:26:44 -070073 }
74}
75
76/*
77 * Mark page as mlocked if not already.
78 * If page on LRU, isolate and putback to move to unevictable list.
79 */
80void mlock_vma_page(struct page *page)
81{
82 BUG_ON(!PageLocked(page));
83
Nick Piggin5344b7e2008-10-18 20:26:51 -070084 if (!TestSetPageMlocked(page)) {
David Rientjes8449d212012-10-08 16:34:06 -070085 mod_zone_page_state(page_zone(page), NR_MLOCK,
86 hpage_nr_pages(page));
Nick Piggin5344b7e2008-10-18 20:26:51 -070087 count_vm_event(UNEVICTABLE_PGMLOCKED);
88 if (!isolate_lru_page(page))
89 putback_lru_page(page);
90 }
Nick Pigginb291f002008-10-18 20:26:44 -070091}
92
Vlastimil Babka72255222013-09-11 14:22:29 -070093/*
94 * Finish munlock after successful page isolation
95 *
96 * Page must be locked. This is a wrapper for try_to_munlock()
97 * and putback_lru_page() with munlock accounting.
98 */
99static void __munlock_isolated_page(struct page *page)
100{
101 int ret = SWAP_AGAIN;
102
103 /*
104 * Optimization: if the page was mapped just once, that's our mapping
105 * and we don't need to check all the other vmas.
106 */
107 if (page_mapcount(page) > 1)
108 ret = try_to_munlock(page);
109
110 /* Did try_to_unlock() succeed or punt? */
111 if (ret != SWAP_MLOCK)
112 count_vm_event(UNEVICTABLE_PGMUNLOCKED);
113
114 putback_lru_page(page);
115}
116
117/*
118 * Accounting for page isolation fail during munlock
119 *
120 * Performs accounting when page isolation fails in munlock. There is nothing
121 * else to do because it means some other task has already removed the page
122 * from the LRU. putback_lru_page() will take care of removing the page from
123 * the unevictable list, if necessary. vmscan [page_referenced()] will move
124 * the page back to the unevictable list if some other vma has it mlocked.
125 */
126static void __munlock_isolation_failed(struct page *page)
127{
128 if (PageUnevictable(page))
129 count_vm_event(UNEVICTABLE_PGSTRANDED);
130 else
131 count_vm_event(UNEVICTABLE_PGMUNLOCKED);
132}
133
Lee Schermerhorn6927c1d2009-12-14 17:59:55 -0800134/**
135 * munlock_vma_page - munlock a vma page
Vlastimil Babkac424be12014-01-02 12:58:43 -0800136 * @page - page to be unlocked, either a normal page or THP page head
137 *
138 * returns the size of the page as a page mask (0 for normal page,
139 * HPAGE_PMD_NR - 1 for THP head page)
Nick Pigginb291f002008-10-18 20:26:44 -0700140 *
Lee Schermerhorn6927c1d2009-12-14 17:59:55 -0800141 * called from munlock()/munmap() path with page supposedly on the LRU.
142 * When we munlock a page, because the vma where we found the page is being
143 * munlock()ed or munmap()ed, we want to check whether other vmas hold the
144 * page locked so that we can leave it on the unevictable lru list and not
145 * bother vmscan with it. However, to walk the page's rmap list in
146 * try_to_munlock() we must isolate the page from the LRU. If some other
147 * task has removed the page from the LRU, we won't be able to do that.
148 * So we clear the PageMlocked as we might not get another chance. If we
149 * can't isolate the page, we leave it for putback_lru_page() and vmscan
150 * [page_referenced()/try_to_unmap()] to deal with.
Nick Pigginb291f002008-10-18 20:26:44 -0700151 */
Michel Lespinasseff6a6da2013-02-27 17:02:44 -0800152unsigned int munlock_vma_page(struct page *page)
Nick Pigginb291f002008-10-18 20:26:44 -0700153{
Vlastimil Babkac424be12014-01-02 12:58:43 -0800154 unsigned int nr_pages;
Michel Lespinasseff6a6da2013-02-27 17:02:44 -0800155
Nick Pigginb291f002008-10-18 20:26:44 -0700156 BUG_ON(!PageLocked(page));
157
Nick Piggin5344b7e2008-10-18 20:26:51 -0700158 if (TestClearPageMlocked(page)) {
Vlastimil Babkac424be12014-01-02 12:58:43 -0800159 nr_pages = hpage_nr_pages(page);
Michel Lespinasseff6a6da2013-02-27 17:02:44 -0800160 mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
Vlastimil Babka72255222013-09-11 14:22:29 -0700161 if (!isolate_lru_page(page))
162 __munlock_isolated_page(page);
163 else
164 __munlock_isolation_failed(page);
Vlastimil Babkac424be12014-01-02 12:58:43 -0800165 } else {
166 nr_pages = hpage_nr_pages(page);
Nick Pigginb291f002008-10-18 20:26:44 -0700167 }
Michel Lespinasseff6a6da2013-02-27 17:02:44 -0800168
Vlastimil Babkac424be12014-01-02 12:58:43 -0800169 /*
170 * Regardless of the original PageMlocked flag, we determine nr_pages
171 * after touching the flag. This leaves a possible race with a THP page
172 * split, such that a whole THP page was munlocked, but nr_pages == 1.
173 * Returning a smaller mask due to that is OK, the worst that can
174 * happen is subsequent useless scanning of the former tail pages.
175 * The NR_MLOCK accounting can however become broken.
176 */
177 return nr_pages - 1;
Nick Pigginb291f002008-10-18 20:26:44 -0700178}
179
Rik van Rielba470de2008-10-18 20:26:50 -0700180/**
Hugh Dickins408e82b2009-09-21 17:03:23 -0700181 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
Rik van Rielba470de2008-10-18 20:26:50 -0700182 * @vma: target vma
183 * @start: start address
184 * @end: end address
Nick Pigginb291f002008-10-18 20:26:44 -0700185 *
Hugh Dickins408e82b2009-09-21 17:03:23 -0700186 * This takes care of making the pages present too.
Nick Pigginb291f002008-10-18 20:26:44 -0700187 *
Rik van Rielba470de2008-10-18 20:26:50 -0700188 * return 0 on success, negative error code on error.
189 *
190 * vma->vm_mm->mmap_sem must be held for at least read.
Nick Pigginb291f002008-10-18 20:26:44 -0700191 */
Michel Lespinassecea10a12013-02-22 16:32:44 -0800192long __mlock_vma_pages_range(struct vm_area_struct *vma,
193 unsigned long start, unsigned long end, int *nonblocking)
Nick Pigginb291f002008-10-18 20:26:44 -0700194{
195 struct mm_struct *mm = vma->vm_mm;
Michel Lespinasse28a35712013-02-22 16:35:55 -0800196 unsigned long nr_pages = (end - start) / PAGE_SIZE;
Hugh Dickins408e82b2009-09-21 17:03:23 -0700197 int gup_flags;
Nick Pigginb291f002008-10-18 20:26:44 -0700198
Rik van Rielba470de2008-10-18 20:26:50 -0700199 VM_BUG_ON(start & ~PAGE_MASK);
200 VM_BUG_ON(end & ~PAGE_MASK);
201 VM_BUG_ON(start < vma->vm_start);
202 VM_BUG_ON(end > vma->vm_end);
Hugh Dickins408e82b2009-09-21 17:03:23 -0700203 VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
Rik van Rielba470de2008-10-18 20:26:50 -0700204
Linus Torvaldsa1fde082011-05-04 21:30:28 -0700205 gup_flags = FOLL_TOUCH | FOLL_MLOCK;
Michel Lespinasse5ecfda02011-01-13 15:46:09 -0800206 /*
207 * We want to touch writable mappings with a write fault in order
208 * to break COW, except for shared mappings because these don't COW
209 * and we would not want to dirty them for nothing.
210 */
211 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
Hugh Dickins58fa8792009-09-21 17:03:31 -0700212 gup_flags |= FOLL_WRITE;
Nick Pigginb291f002008-10-18 20:26:44 -0700213
Michel Lespinassefdf4c582011-01-31 17:03:41 -0800214 /*
215 * We want mlock to succeed for regions that have any permissions
216 * other than PROT_NONE.
217 */
218 if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
219 gup_flags |= FOLL_FORCE;
220
Johannes Weiner4805b022013-02-22 16:35:20 -0800221 /*
222 * We made sure addr is within a VMA, so the following will
223 * not result in a stack expansion that recurses back here.
224 */
Michel Lespinasseff6a6da2013-02-27 17:02:44 -0800225 return __get_user_pages(current, mm, start, nr_pages, gup_flags,
Michel Lespinasse53a77062011-01-13 15:46:14 -0800226 NULL, NULL, nonblocking);
Lee Schermerhorn9978ad52008-10-18 20:26:56 -0700227}
228
229/*
230 * convert get_user_pages() return value to posix mlock() error
231 */
232static int __mlock_posix_error_return(long retval)
233{
234 if (retval == -EFAULT)
235 retval = -ENOMEM;
236 else if (retval == -ENOMEM)
237 retval = -EAGAIN;
238 return retval;
Nick Pigginb291f002008-10-18 20:26:44 -0700239}
240
Nick Pigginb291f002008-10-18 20:26:44 -0700241/*
Vlastimil Babka56afe472013-09-11 14:22:32 -0700242 * Prepare page for fast batched LRU putback via putback_lru_evictable_pagevec()
243 *
244 * The fast path is available only for evictable pages with single mapping.
245 * Then we can bypass the per-cpu pvec and get better performance.
246 * when mapcount > 1 we need try_to_munlock() which can fail.
247 * when !page_evictable(), we need the full redo logic of putback_lru_page to
248 * avoid leaving evictable page in unevictable list.
249 *
250 * In case of success, @page is added to @pvec and @pgrescued is incremented
251 * in case that the page was previously unevictable. @page is also unlocked.
252 */
253static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec,
254 int *pgrescued)
255{
256 VM_BUG_ON(PageLRU(page));
257 VM_BUG_ON(!PageLocked(page));
258
259 if (page_mapcount(page) <= 1 && page_evictable(page)) {
260 pagevec_add(pvec, page);
261 if (TestClearPageUnevictable(page))
262 (*pgrescued)++;
263 unlock_page(page);
264 return true;
265 }
266
267 return false;
268}
269
270/*
271 * Putback multiple evictable pages to the LRU
272 *
273 * Batched putback of evictable pages that bypasses the per-cpu pvec. Some of
274 * the pages might have meanwhile become unevictable but that is OK.
275 */
276static void __putback_lru_fast(struct pagevec *pvec, int pgrescued)
277{
278 count_vm_events(UNEVICTABLE_PGMUNLOCKED, pagevec_count(pvec));
279 /*
280 *__pagevec_lru_add() calls release_pages() so we don't call
281 * put_page() explicitly
282 */
283 __pagevec_lru_add(pvec);
284 count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
285}
286
287/*
Vlastimil Babka72255222013-09-11 14:22:29 -0700288 * Munlock a batch of pages from the same zone
289 *
290 * The work is split to two main phases. First phase clears the Mlocked flag
291 * and attempts to isolate the pages, all under a single zone lru lock.
292 * The second phase finishes the munlock only for pages where isolation
293 * succeeded.
294 *
Vlastimil Babka7a8010c2013-09-11 14:22:35 -0700295 * Note that the pagevec may be modified during the process.
Vlastimil Babka72255222013-09-11 14:22:29 -0700296 */
297static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
298{
299 int i;
300 int nr = pagevec_count(pvec);
Vlastimil Babka3b25df92014-01-02 12:58:44 -0800301 int delta_munlocked;
Vlastimil Babka56afe472013-09-11 14:22:32 -0700302 struct pagevec pvec_putback;
303 int pgrescued = 0;
Vlastimil Babka72255222013-09-11 14:22:29 -0700304
Vlastimil Babka3b25df92014-01-02 12:58:44 -0800305 pagevec_init(&pvec_putback, 0);
306
Vlastimil Babka72255222013-09-11 14:22:29 -0700307 /* Phase 1: page isolation */
308 spin_lock_irq(&zone->lru_lock);
309 for (i = 0; i < nr; i++) {
310 struct page *page = pvec->pages[i];
311
312 if (TestClearPageMlocked(page)) {
313 struct lruvec *lruvec;
314 int lru;
315
Vlastimil Babka72255222013-09-11 14:22:29 -0700316 if (PageLRU(page)) {
317 lruvec = mem_cgroup_page_lruvec(page, zone);
318 lru = page_lru(page);
Vlastimil Babka5b409982013-09-11 14:22:33 -0700319 /*
320 * We already have pin from follow_page_mask()
321 * so we can spare the get_page() here.
322 */
Vlastimil Babka72255222013-09-11 14:22:29 -0700323 ClearPageLRU(page);
324 del_page_from_lru_list(page, lruvec, lru);
325 } else {
326 __munlock_isolation_failed(page);
327 goto skip_munlock;
328 }
329
330 } else {
331skip_munlock:
332 /*
333 * We won't be munlocking this page in the next phase
334 * but we still need to release the follow_page_mask()
Vlastimil Babka3b25df92014-01-02 12:58:44 -0800335 * pin. We cannot do it under lru_lock however. If it's
336 * the last pin, __page_cache_release would deadlock.
Vlastimil Babka72255222013-09-11 14:22:29 -0700337 */
Vlastimil Babka3b25df92014-01-02 12:58:44 -0800338 pagevec_add(&pvec_putback, pvec->pages[i]);
Vlastimil Babka72255222013-09-11 14:22:29 -0700339 pvec->pages[i] = NULL;
Vlastimil Babka72255222013-09-11 14:22:29 -0700340 }
341 }
Vlastimil Babka3b25df92014-01-02 12:58:44 -0800342 delta_munlocked = -nr + pagevec_count(&pvec_putback);
Vlastimil Babka1ebb7cc2013-09-11 14:22:30 -0700343 __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
Vlastimil Babka72255222013-09-11 14:22:29 -0700344 spin_unlock_irq(&zone->lru_lock);
345
Vlastimil Babka3b25df92014-01-02 12:58:44 -0800346 /* Now we can release pins of pages that we are not munlocking */
347 pagevec_release(&pvec_putback);
348
Vlastimil Babka56afe472013-09-11 14:22:32 -0700349 /* Phase 2: page munlock */
Vlastimil Babka72255222013-09-11 14:22:29 -0700350 for (i = 0; i < nr; i++) {
351 struct page *page = pvec->pages[i];
352
353 if (page) {
354 lock_page(page);
Vlastimil Babka56afe472013-09-11 14:22:32 -0700355 if (!__putback_lru_fast_prepare(page, &pvec_putback,
356 &pgrescued)) {
Vlastimil Babka5b409982013-09-11 14:22:33 -0700357 /*
358 * Slow path. We don't want to lose the last
359 * pin before unlock_page()
360 */
361 get_page(page); /* for putback_lru_page() */
Vlastimil Babka56afe472013-09-11 14:22:32 -0700362 __munlock_isolated_page(page);
363 unlock_page(page);
Vlastimil Babka5b409982013-09-11 14:22:33 -0700364 put_page(page); /* from follow_page_mask() */
Vlastimil Babka56afe472013-09-11 14:22:32 -0700365 }
Vlastimil Babka72255222013-09-11 14:22:29 -0700366 }
367 }
Vlastimil Babka56afe472013-09-11 14:22:32 -0700368
Vlastimil Babka5b409982013-09-11 14:22:33 -0700369 /*
370 * Phase 3: page putback for pages that qualified for the fast path
371 * This will also call put_page() to return pin from follow_page_mask()
372 */
Vlastimil Babka56afe472013-09-11 14:22:32 -0700373 if (pagevec_count(&pvec_putback))
374 __putback_lru_fast(&pvec_putback, pgrescued);
Vlastimil Babka7a8010c2013-09-11 14:22:35 -0700375}
Vlastimil Babka56afe472013-09-11 14:22:32 -0700376
Vlastimil Babka7a8010c2013-09-11 14:22:35 -0700377/*
378 * Fill up pagevec for __munlock_pagevec using pte walk
379 *
380 * The function expects that the struct page corresponding to @start address is
381 * a non-TPH page already pinned and in the @pvec, and that it belongs to @zone.
382 *
383 * The rest of @pvec is filled by subsequent pages within the same pmd and same
384 * zone, as long as the pte's are present and vm_normal_page() succeeds. These
385 * pages also get pinned.
386 *
387 * Returns the address of the next page that should be scanned. This equals
388 * @start + PAGE_SIZE when no page could be added by the pte walk.
389 */
390static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
391 struct vm_area_struct *vma, int zoneid, unsigned long start,
392 unsigned long end)
393{
394 pte_t *pte;
395 spinlock_t *ptl;
396
397 /*
398 * Initialize pte walk starting at the already pinned page where we
Vlastimil Babkaeadb41a2013-09-30 13:45:18 -0700399 * are sure that there is a pte, as it was pinned under the same
400 * mmap_sem write op.
Vlastimil Babka7a8010c2013-09-11 14:22:35 -0700401 */
402 pte = get_locked_pte(vma->vm_mm, start, &ptl);
Vlastimil Babkaeadb41a2013-09-30 13:45:18 -0700403 /* Make sure we do not cross the page table boundary */
404 end = pgd_addr_end(start, end);
405 end = pud_addr_end(start, end);
406 end = pmd_addr_end(start, end);
Vlastimil Babka7a8010c2013-09-11 14:22:35 -0700407
408 /* The page next to the pinned page is the first we will try to get */
409 start += PAGE_SIZE;
410 while (start < end) {
411 struct page *page = NULL;
412 pte++;
413 if (pte_present(*pte))
414 page = vm_normal_page(vma, start, *pte);
415 /*
416 * Break if page could not be obtained or the page's node+zone does not
417 * match
418 */
419 if (!page || page_zone_id(page) != zoneid)
420 break;
421
422 get_page(page);
423 /*
424 * Increase the address that will be returned *before* the
425 * eventual break due to pvec becoming full by adding the page
426 */
427 start += PAGE_SIZE;
428 if (pagevec_add(pvec, page) == 0)
429 break;
430 }
431 pte_unmap_unlock(pte, ptl);
432 return start;
Vlastimil Babka72255222013-09-11 14:22:29 -0700433}
434
435/*
Rik van Rielba470de2008-10-18 20:26:50 -0700436 * munlock_vma_pages_range() - munlock all pages in the vma range.'
437 * @vma - vma containing range to be munlock()ed.
438 * @start - start address in @vma of the range
439 * @end - end of range in @vma.
440 *
441 * For mremap(), munmap() and exit().
442 *
443 * Called with @vma VM_LOCKED.
444 *
445 * Returns with VM_LOCKED cleared. Callers must be prepared to
446 * deal with this.
447 *
448 * We don't save and restore VM_LOCKED here because pages are
449 * still on lru. In unmap path, pages might be scanned by reclaim
450 * and re-mlocked by try_to_{munlock|unmap} before we unmap and
451 * free them. This will result in freeing mlocked pages.
Nick Pigginb291f002008-10-18 20:26:44 -0700452 */
Rik van Rielba470de2008-10-18 20:26:50 -0700453void munlock_vma_pages_range(struct vm_area_struct *vma,
Hugh Dickins408e82b2009-09-21 17:03:23 -0700454 unsigned long start, unsigned long end)
Nick Pigginb291f002008-10-18 20:26:44 -0700455{
456 vma->vm_flags &= ~VM_LOCKED;
Hugh Dickins408e82b2009-09-21 17:03:23 -0700457
Michel Lespinasseff6a6da2013-02-27 17:02:44 -0800458 while (start < end) {
Vlastimil Babka7a8010c2013-09-11 14:22:35 -0700459 struct page *page = NULL;
Vlastimil Babkac424be12014-01-02 12:58:43 -0800460 unsigned int page_mask;
461 unsigned long page_increm;
Vlastimil Babka7a8010c2013-09-11 14:22:35 -0700462 struct pagevec pvec;
463 struct zone *zone;
464 int zoneid;
Michel Lespinasseff6a6da2013-02-27 17:02:44 -0800465
Vlastimil Babka7a8010c2013-09-11 14:22:35 -0700466 pagevec_init(&pvec, 0);
Hugh Dickins6e919712009-09-21 17:03:32 -0700467 /*
468 * Although FOLL_DUMP is intended for get_dump_page(),
469 * it just so happens that its special treatment of the
470 * ZERO_PAGE (returning an error instead of doing get_page)
471 * suits munlock very well (and if somehow an abnormal page
472 * has sneaked into the range, we won't oops here: great).
473 */
Michel Lespinasseff6a6da2013-02-27 17:02:44 -0800474 page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP,
Vlastimil Babka7a8010c2013-09-11 14:22:35 -0700475 &page_mask);
476
Hugh Dickins6e919712009-09-21 17:03:32 -0700477 if (page && !IS_ERR(page)) {
Vlastimil Babka72255222013-09-11 14:22:29 -0700478 if (PageTransHuge(page)) {
Vlastimil Babka72255222013-09-11 14:22:29 -0700479 lock_page(page);
480 /*
481 * Any THP page found by follow_page_mask() may
482 * have gotten split before reaching
483 * munlock_vma_page(), so we need to recompute
484 * the page_mask here.
485 */
486 page_mask = munlock_vma_page(page);
487 unlock_page(page);
488 put_page(page); /* follow_page_mask() */
489 } else {
490 /*
Vlastimil Babka7a8010c2013-09-11 14:22:35 -0700491 * Non-huge pages are handled in batches via
492 * pagevec. The pin from follow_page_mask()
493 * prevents them from collapsing by THP.
Vlastimil Babka72255222013-09-11 14:22:29 -0700494 */
Vlastimil Babka7a8010c2013-09-11 14:22:35 -0700495 pagevec_add(&pvec, page);
496 zone = page_zone(page);
497 zoneid = page_zone_id(page);
498
499 /*
500 * Try to fill the rest of pagevec using fast
501 * pte walk. This will also update start to
502 * the next page to process. Then munlock the
503 * pagevec.
504 */
505 start = __munlock_pagevec_fill(&pvec, vma,
506 zoneid, start, end);
507 __munlock_pagevec(&pvec, zone);
508 goto next;
Vlastimil Babka72255222013-09-11 14:22:29 -0700509 }
Hugh Dickins408e82b2009-09-21 17:03:23 -0700510 }
Vlastimil Babkac424be12014-01-02 12:58:43 -0800511 /* It's a bug to munlock in the middle of a THP page */
512 VM_BUG_ON((start >> PAGE_SHIFT) & page_mask);
513 page_increm = 1 + page_mask;
Michel Lespinasseff6a6da2013-02-27 17:02:44 -0800514 start += page_increm * PAGE_SIZE;
Vlastimil Babka7a8010c2013-09-11 14:22:35 -0700515next:
Hugh Dickins408e82b2009-09-21 17:03:23 -0700516 cond_resched();
517 }
Nick Pigginb291f002008-10-18 20:26:44 -0700518}
519
520/*
521 * mlock_fixup - handle mlock[all]/munlock[all] requests.
522 *
523 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
524 * munlock is a no-op. However, for some special vmas, we go ahead and
Michel Lespinassecea10a12013-02-22 16:32:44 -0800525 * populate the ptes.
Nick Pigginb291f002008-10-18 20:26:44 -0700526 *
527 * For vmas that pass the filters, merge/split as appropriate.
528 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
KOSAKI Motohiroca16d142011-05-26 19:16:19 +0900530 unsigned long start, unsigned long end, vm_flags_t newflags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531{
Nick Pigginb291f002008-10-18 20:26:44 -0700532 struct mm_struct *mm = vma->vm_mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 pgoff_t pgoff;
Nick Pigginb291f002008-10-18 20:26:44 -0700534 int nr_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 int ret = 0;
KOSAKI Motohiroca16d142011-05-26 19:16:19 +0900536 int lock = !!(newflags & VM_LOCKED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537
Michel Lespinassefed067d2011-01-13 15:46:10 -0800538 if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
Stephen Wilson31db58b2011-03-13 15:49:15 -0400539 is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm))
Nick Pigginb291f002008-10-18 20:26:44 -0700540 goto out; /* don't set VM_LOCKED, don't count */
541
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
543 *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
544 vma->vm_file, pgoff, vma_policy(vma));
545 if (*prev) {
546 vma = *prev;
547 goto success;
548 }
549
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 if (start != vma->vm_start) {
551 ret = split_vma(mm, vma, start, 1);
552 if (ret)
553 goto out;
554 }
555
556 if (end != vma->vm_end) {
557 ret = split_vma(mm, vma, end, 0);
558 if (ret)
559 goto out;
560 }
561
562success:
563 /*
Nick Pigginb291f002008-10-18 20:26:44 -0700564 * Keep track of amount of locked VM.
565 */
566 nr_pages = (end - start) >> PAGE_SHIFT;
567 if (!lock)
568 nr_pages = -nr_pages;
569 mm->locked_vm += nr_pages;
570
571 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 * vm_flags is protected by the mmap_sem held in write mode.
573 * It's okay if try_to_unmap_one unmaps a page just after we
Nick Pigginb291f002008-10-18 20:26:44 -0700574 * set VM_LOCKED, __mlock_vma_pages_range will bring it back.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576
Michel Lespinassefed067d2011-01-13 15:46:10 -0800577 if (lock)
Hugh Dickins408e82b2009-09-21 17:03:23 -0700578 vma->vm_flags = newflags;
Michel Lespinassefed067d2011-01-13 15:46:10 -0800579 else
Hugh Dickins408e82b2009-09-21 17:03:23 -0700580 munlock_vma_pages_range(vma, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582out:
Nick Pigginb291f002008-10-18 20:26:44 -0700583 *prev = vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 return ret;
585}
586
587static int do_mlock(unsigned long start, size_t len, int on)
588{
589 unsigned long nstart, end, tmp;
590 struct vm_area_struct * vma, * prev;
591 int error;
592
Michel Lespinassefed067d2011-01-13 15:46:10 -0800593 VM_BUG_ON(start & ~PAGE_MASK);
594 VM_BUG_ON(len != PAGE_ALIGN(len));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 end = start + len;
596 if (end < start)
597 return -EINVAL;
598 if (end == start)
599 return 0;
Linus Torvalds097d5912012-03-06 18:23:36 -0800600 vma = find_vma(current->mm, start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 if (!vma || vma->vm_start > start)
602 return -ENOMEM;
603
Linus Torvalds097d5912012-03-06 18:23:36 -0800604 prev = vma->vm_prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 if (start > vma->vm_start)
606 prev = vma;
607
608 for (nstart = start ; ; ) {
KOSAKI Motohiroca16d142011-05-26 19:16:19 +0900609 vm_flags_t newflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610
611 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
612
Michel Lespinasse18693052013-02-22 16:32:46 -0800613 newflags = vma->vm_flags & ~VM_LOCKED;
614 if (on)
Michel Lespinasse09a9f1d2013-03-28 16:26:23 -0700615 newflags |= VM_LOCKED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
617 tmp = vma->vm_end;
618 if (tmp > end)
619 tmp = end;
620 error = mlock_fixup(vma, &prev, nstart, tmp, newflags);
621 if (error)
622 break;
623 nstart = tmp;
624 if (nstart < prev->vm_end)
625 nstart = prev->vm_end;
626 if (nstart >= end)
627 break;
628
629 vma = prev->vm_next;
630 if (!vma || vma->vm_start != nstart) {
631 error = -ENOMEM;
632 break;
633 }
634 }
635 return error;
636}
637
Michel Lespinassebebeb3d2013-02-22 16:32:37 -0800638/*
639 * __mm_populate - populate and/or mlock pages within a range of address space.
640 *
641 * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
642 * flags. VMAs must be already marked with the desired vm_flags, and
643 * mmap_sem must not be held.
644 */
645int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
Michel Lespinassefed067d2011-01-13 15:46:10 -0800646{
647 struct mm_struct *mm = current->mm;
648 unsigned long end, nstart, nend;
649 struct vm_area_struct *vma = NULL;
Michel Lespinasse53a77062011-01-13 15:46:14 -0800650 int locked = 0;
Michel Lespinasse28a35712013-02-22 16:35:55 -0800651 long ret = 0;
Michel Lespinassefed067d2011-01-13 15:46:10 -0800652
653 VM_BUG_ON(start & ~PAGE_MASK);
654 VM_BUG_ON(len != PAGE_ALIGN(len));
655 end = start + len;
656
Michel Lespinassefed067d2011-01-13 15:46:10 -0800657 for (nstart = start; nstart < end; nstart = nend) {
658 /*
659 * We want to fault in pages for [nstart; end) address range.
660 * Find first corresponding VMA.
661 */
Michel Lespinasse53a77062011-01-13 15:46:14 -0800662 if (!locked) {
663 locked = 1;
664 down_read(&mm->mmap_sem);
Michel Lespinassefed067d2011-01-13 15:46:10 -0800665 vma = find_vma(mm, nstart);
Michel Lespinasse53a77062011-01-13 15:46:14 -0800666 } else if (nstart >= vma->vm_end)
Michel Lespinassefed067d2011-01-13 15:46:10 -0800667 vma = vma->vm_next;
668 if (!vma || vma->vm_start >= end)
669 break;
670 /*
671 * Set [nstart; nend) to intersection of desired address
672 * range with the first VMA. Also, skip undesirable VMA types.
673 */
674 nend = min(end, vma->vm_end);
Michel Lespinasse09a9f1d2013-03-28 16:26:23 -0700675 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
Michel Lespinassefed067d2011-01-13 15:46:10 -0800676 continue;
677 if (nstart < vma->vm_start)
678 nstart = vma->vm_start;
679 /*
Michel Lespinasse53a77062011-01-13 15:46:14 -0800680 * Now fault in a range of pages. __mlock_vma_pages_range()
681 * double checks the vma flags, so that it won't mlock pages
682 * if the vma was already munlocked.
Michel Lespinassefed067d2011-01-13 15:46:10 -0800683 */
Michel Lespinasse53a77062011-01-13 15:46:14 -0800684 ret = __mlock_vma_pages_range(vma, nstart, nend, &locked);
685 if (ret < 0) {
686 if (ignore_errors) {
687 ret = 0;
688 continue; /* continue at next VMA */
689 }
Michel Lespinasse5fdb2002011-01-13 15:46:12 -0800690 ret = __mlock_posix_error_return(ret);
691 break;
692 }
Michel Lespinasse53a77062011-01-13 15:46:14 -0800693 nend = nstart + ret * PAGE_SIZE;
694 ret = 0;
Michel Lespinassefed067d2011-01-13 15:46:10 -0800695 }
Michel Lespinasse53a77062011-01-13 15:46:14 -0800696 if (locked)
697 up_read(&mm->mmap_sem);
Michel Lespinassefed067d2011-01-13 15:46:10 -0800698 return ret; /* 0 or negative error code */
699}
700
Heiko Carstens6a6160a2009-01-14 14:14:15 +0100701SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702{
703 unsigned long locked;
704 unsigned long lock_limit;
705 int error = -ENOMEM;
706
707 if (!can_do_mlock())
708 return -EPERM;
709
KOSAKI Motohiro8891d6d2008-11-12 13:26:53 -0800710 lru_add_drain_all(); /* flush pagevec */
711
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 down_write(&current->mm->mmap_sem);
713 len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
714 start &= PAGE_MASK;
715
716 locked = len >> PAGE_SHIFT;
717 locked += current->mm->locked_vm;
718
Jiri Slaby59e99e52010-03-05 13:41:44 -0800719 lock_limit = rlimit(RLIMIT_MEMLOCK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 lock_limit >>= PAGE_SHIFT;
721
722 /* check against resource limits */
723 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
724 error = do_mlock(start, len, 1);
725 up_write(&current->mm->mmap_sem);
Michel Lespinassefed067d2011-01-13 15:46:10 -0800726 if (!error)
Michel Lespinassebebeb3d2013-02-22 16:32:37 -0800727 error = __mm_populate(start, len, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 return error;
729}
730
Heiko Carstens6a6160a2009-01-14 14:14:15 +0100731SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732{
733 int ret;
734
735 down_write(&current->mm->mmap_sem);
736 len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
737 start &= PAGE_MASK;
738 ret = do_mlock(start, len, 0);
739 up_write(&current->mm->mmap_sem);
740 return ret;
741}
742
743static int do_mlockall(int flags)
744{
745 struct vm_area_struct * vma, * prev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746
747 if (flags & MCL_FUTURE)
Michel Lespinasse09a9f1d2013-03-28 16:26:23 -0700748 current->mm->def_flags |= VM_LOCKED;
Gerald Schaefer9977f0f2013-02-12 13:46:20 -0800749 else
Michel Lespinasse09a9f1d2013-03-28 16:26:23 -0700750 current->mm->def_flags &= ~VM_LOCKED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 if (flags == MCL_FUTURE)
752 goto out;
753
754 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
KOSAKI Motohiroca16d142011-05-26 19:16:19 +0900755 vm_flags_t newflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756
Michel Lespinasse18693052013-02-22 16:32:46 -0800757 newflags = vma->vm_flags & ~VM_LOCKED;
758 if (flags & MCL_CURRENT)
Michel Lespinasse09a9f1d2013-03-28 16:26:23 -0700759 newflags |= VM_LOCKED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760
761 /* Ignore errors */
762 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
Paul E. McKenney22356f42013-09-24 18:29:11 -0700763 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 }
765out:
766 return 0;
767}
768
Heiko Carstens3480b252009-01-14 14:14:16 +0100769SYSCALL_DEFINE1(mlockall, int, flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770{
771 unsigned long lock_limit;
772 int ret = -EINVAL;
773
774 if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
775 goto out;
776
777 ret = -EPERM;
778 if (!can_do_mlock())
779 goto out;
780
Christoph Lameterdf9d6982011-10-31 17:09:35 -0700781 if (flags & MCL_CURRENT)
782 lru_add_drain_all(); /* flush pagevec */
KOSAKI Motohiro8891d6d2008-11-12 13:26:53 -0800783
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 down_write(&current->mm->mmap_sem);
785
Jiri Slaby59e99e52010-03-05 13:41:44 -0800786 lock_limit = rlimit(RLIMIT_MEMLOCK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 lock_limit >>= PAGE_SHIFT;
788
789 ret = -ENOMEM;
790 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
791 capable(CAP_IPC_LOCK))
792 ret = do_mlockall(flags);
793 up_write(&current->mm->mmap_sem);
Michel Lespinassebebeb3d2013-02-22 16:32:37 -0800794 if (!ret && (flags & MCL_CURRENT))
795 mm_populate(0, TASK_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796out:
797 return ret;
798}
799
Heiko Carstens3480b252009-01-14 14:14:16 +0100800SYSCALL_DEFINE0(munlockall)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801{
802 int ret;
803
804 down_write(&current->mm->mmap_sem);
805 ret = do_mlockall(0);
806 up_write(&current->mm->mmap_sem);
807 return ret;
808}
809
810/*
811 * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
812 * shm segments) get accounted against the user_struct instead.
813 */
814static DEFINE_SPINLOCK(shmlock_user_lock);
815
816int user_shm_lock(size_t size, struct user_struct *user)
817{
818 unsigned long lock_limit, locked;
819 int allowed = 0;
820
821 locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
Jiri Slaby59e99e52010-03-05 13:41:44 -0800822 lock_limit = rlimit(RLIMIT_MEMLOCK);
Herbert van den Bergh5ed44a42007-07-15 23:38:25 -0700823 if (lock_limit == RLIM_INFINITY)
824 allowed = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 lock_limit >>= PAGE_SHIFT;
826 spin_lock(&shmlock_user_lock);
Herbert van den Bergh5ed44a42007-07-15 23:38:25 -0700827 if (!allowed &&
828 locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 goto out;
830 get_uid(user);
831 user->locked_shm += locked;
832 allowed = 1;
833out:
834 spin_unlock(&shmlock_user_lock);
835 return allowed;
836}
837
838void user_shm_unlock(size_t size, struct user_struct *user)
839{
840 spin_lock(&shmlock_user_lock);
841 user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
842 spin_unlock(&shmlock_user_lock);
843 free_uid(user);
844}