blob: 87dbb0fcfa1a5fa9f13734274ae1a995a9b5b106 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4#include <linux/mm.h>
5#include <linux/sched.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +01006#include <linux/sched/mm.h>
Ingo Molnarf7ccbae2017-02-08 18:51:30 +01007#include <linux/sched/coredump.h>
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07008#include <linux/mmu_notifier.h>
9#include <linux/rmap.h>
10#include <linux/swap.h>
11#include <linux/mm_inline.h>
12#include <linux/kthread.h>
13#include <linux/khugepaged.h>
14#include <linux/freezer.h>
15#include <linux/mman.h>
16#include <linux/hashtable.h>
17#include <linux/userfaultfd_k.h>
18#include <linux/page_idle.h>
19#include <linux/swapops.h>
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -070020#include <linux/shmem_fs.h>
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070021
22#include <asm/tlb.h>
23#include <asm/pgalloc.h>
24#include "internal.h"
25
26enum scan_result {
27 SCAN_FAIL,
28 SCAN_SUCCEED,
29 SCAN_PMD_NULL,
30 SCAN_EXCEED_NONE_PTE,
31 SCAN_PTE_NON_PRESENT,
32 SCAN_PAGE_RO,
Ebru Akagunduz0db501f2016-07-26 15:26:46 -070033 SCAN_LACK_REFERENCED_PAGE,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070034 SCAN_PAGE_NULL,
35 SCAN_SCAN_ABORT,
36 SCAN_PAGE_COUNT,
37 SCAN_PAGE_LRU,
38 SCAN_PAGE_LOCK,
39 SCAN_PAGE_ANON,
40 SCAN_PAGE_COMPOUND,
41 SCAN_ANY_PROCESS,
42 SCAN_VMA_NULL,
43 SCAN_VMA_CHECK,
44 SCAN_ADDRESS_RANGE,
45 SCAN_SWAP_CACHE_PAGE,
46 SCAN_DEL_PAGE_LRU,
47 SCAN_ALLOC_HUGE_PAGE_FAIL,
48 SCAN_CGROUP_CHARGE_FAIL,
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -070049 SCAN_EXCEED_SWAP_PTE,
50 SCAN_TRUNCATED,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070051};
52
53#define CREATE_TRACE_POINTS
54#include <trace/events/huge_memory.h>
55
56/* default scan 8*512 pte (or vmas) every 30 second */
57static unsigned int khugepaged_pages_to_scan __read_mostly;
58static unsigned int khugepaged_pages_collapsed;
59static unsigned int khugepaged_full_scans;
60static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
61/* during fragmentation poll the hugepage allocator once every minute */
62static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
63static unsigned long khugepaged_sleep_expire;
64static DEFINE_SPINLOCK(khugepaged_mm_lock);
65static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
66/*
67 * default collapse hugepages if there is at least one pte mapped like
68 * it would have happened if the vma was large enough during page
69 * fault.
70 */
71static unsigned int khugepaged_max_ptes_none __read_mostly;
72static unsigned int khugepaged_max_ptes_swap __read_mostly;
73
74#define MM_SLOTS_HASH_BITS 10
75static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
76
77static struct kmem_cache *mm_slot_cache __read_mostly;
78
79/**
80 * struct mm_slot - hash lookup from mm to mm_slot
81 * @hash: hash collision list
82 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
83 * @mm: the mm that this information is valid for
84 */
85struct mm_slot {
86 struct hlist_node hash;
87 struct list_head mm_node;
88 struct mm_struct *mm;
89};
90
91/**
92 * struct khugepaged_scan - cursor for scanning
93 * @mm_head: the head of the mm list to scan
94 * @mm_slot: the current mm_slot we are scanning
95 * @address: the next address inside that to be scanned
96 *
97 * There is only the one khugepaged_scan instance of this cursor structure.
98 */
99struct khugepaged_scan {
100 struct list_head mm_head;
101 struct mm_slot *mm_slot;
102 unsigned long address;
103};
104
105static struct khugepaged_scan khugepaged_scan = {
106 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
107};
108
Jérémy Lefauree1465d12016-11-30 15:54:02 -0800109#ifdef CONFIG_SYSFS
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700110static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
111 struct kobj_attribute *attr,
112 char *buf)
113{
114 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
115}
116
117static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
118 struct kobj_attribute *attr,
119 const char *buf, size_t count)
120{
121 unsigned long msecs;
122 int err;
123
124 err = kstrtoul(buf, 10, &msecs);
125 if (err || msecs > UINT_MAX)
126 return -EINVAL;
127
128 khugepaged_scan_sleep_millisecs = msecs;
129 khugepaged_sleep_expire = 0;
130 wake_up_interruptible(&khugepaged_wait);
131
132 return count;
133}
134static struct kobj_attribute scan_sleep_millisecs_attr =
135 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
136 scan_sleep_millisecs_store);
137
138static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
139 struct kobj_attribute *attr,
140 char *buf)
141{
142 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
143}
144
145static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
146 struct kobj_attribute *attr,
147 const char *buf, size_t count)
148{
149 unsigned long msecs;
150 int err;
151
152 err = kstrtoul(buf, 10, &msecs);
153 if (err || msecs > UINT_MAX)
154 return -EINVAL;
155
156 khugepaged_alloc_sleep_millisecs = msecs;
157 khugepaged_sleep_expire = 0;
158 wake_up_interruptible(&khugepaged_wait);
159
160 return count;
161}
162static struct kobj_attribute alloc_sleep_millisecs_attr =
163 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
164 alloc_sleep_millisecs_store);
165
166static ssize_t pages_to_scan_show(struct kobject *kobj,
167 struct kobj_attribute *attr,
168 char *buf)
169{
170 return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
171}
172static ssize_t pages_to_scan_store(struct kobject *kobj,
173 struct kobj_attribute *attr,
174 const char *buf, size_t count)
175{
176 int err;
177 unsigned long pages;
178
179 err = kstrtoul(buf, 10, &pages);
180 if (err || !pages || pages > UINT_MAX)
181 return -EINVAL;
182
183 khugepaged_pages_to_scan = pages;
184
185 return count;
186}
187static struct kobj_attribute pages_to_scan_attr =
188 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
189 pages_to_scan_store);
190
191static ssize_t pages_collapsed_show(struct kobject *kobj,
192 struct kobj_attribute *attr,
193 char *buf)
194{
195 return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
196}
197static struct kobj_attribute pages_collapsed_attr =
198 __ATTR_RO(pages_collapsed);
199
200static ssize_t full_scans_show(struct kobject *kobj,
201 struct kobj_attribute *attr,
202 char *buf)
203{
204 return sprintf(buf, "%u\n", khugepaged_full_scans);
205}
206static struct kobj_attribute full_scans_attr =
207 __ATTR_RO(full_scans);
208
209static ssize_t khugepaged_defrag_show(struct kobject *kobj,
210 struct kobj_attribute *attr, char *buf)
211{
212 return single_hugepage_flag_show(kobj, attr, buf,
213 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
214}
215static ssize_t khugepaged_defrag_store(struct kobject *kobj,
216 struct kobj_attribute *attr,
217 const char *buf, size_t count)
218{
219 return single_hugepage_flag_store(kobj, attr, buf, count,
220 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
221}
222static struct kobj_attribute khugepaged_defrag_attr =
223 __ATTR(defrag, 0644, khugepaged_defrag_show,
224 khugepaged_defrag_store);
225
226/*
227 * max_ptes_none controls if khugepaged should collapse hugepages over
228 * any unmapped ptes in turn potentially increasing the memory
229 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
230 * reduce the available free memory in the system as it
231 * runs. Increasing max_ptes_none will instead potentially reduce the
232 * free memory in the system during the khugepaged scan.
233 */
234static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
235 struct kobj_attribute *attr,
236 char *buf)
237{
238 return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
239}
240static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
241 struct kobj_attribute *attr,
242 const char *buf, size_t count)
243{
244 int err;
245 unsigned long max_ptes_none;
246
247 err = kstrtoul(buf, 10, &max_ptes_none);
248 if (err || max_ptes_none > HPAGE_PMD_NR-1)
249 return -EINVAL;
250
251 khugepaged_max_ptes_none = max_ptes_none;
252
253 return count;
254}
255static struct kobj_attribute khugepaged_max_ptes_none_attr =
256 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
257 khugepaged_max_ptes_none_store);
258
259static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
260 struct kobj_attribute *attr,
261 char *buf)
262{
263 return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
264}
265
266static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
267 struct kobj_attribute *attr,
268 const char *buf, size_t count)
269{
270 int err;
271 unsigned long max_ptes_swap;
272
273 err = kstrtoul(buf, 10, &max_ptes_swap);
274 if (err || max_ptes_swap > HPAGE_PMD_NR-1)
275 return -EINVAL;
276
277 khugepaged_max_ptes_swap = max_ptes_swap;
278
279 return count;
280}
281
282static struct kobj_attribute khugepaged_max_ptes_swap_attr =
283 __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
284 khugepaged_max_ptes_swap_store);
285
286static struct attribute *khugepaged_attr[] = {
287 &khugepaged_defrag_attr.attr,
288 &khugepaged_max_ptes_none_attr.attr,
289 &pages_to_scan_attr.attr,
290 &pages_collapsed_attr.attr,
291 &full_scans_attr.attr,
292 &scan_sleep_millisecs_attr.attr,
293 &alloc_sleep_millisecs_attr.attr,
294 &khugepaged_max_ptes_swap_attr.attr,
295 NULL,
296};
297
298struct attribute_group khugepaged_attr_group = {
299 .attrs = khugepaged_attr,
300 .name = "khugepaged",
301};
Jérémy Lefauree1465d12016-11-30 15:54:02 -0800302#endif /* CONFIG_SYSFS */
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700303
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -0700304#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700305
306int hugepage_madvise(struct vm_area_struct *vma,
307 unsigned long *vm_flags, int advice)
308{
309 switch (advice) {
310 case MADV_HUGEPAGE:
311#ifdef CONFIG_S390
312 /*
313 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
314 * can't handle this properly after s390_enable_sie, so we simply
315 * ignore the madvise to prevent qemu from causing a SIGSEGV.
316 */
317 if (mm_has_pgste(vma->vm_mm))
318 return 0;
319#endif
320 *vm_flags &= ~VM_NOHUGEPAGE;
321 *vm_flags |= VM_HUGEPAGE;
322 /*
323 * If the vma become good for khugepaged to scan,
324 * register it here without waiting a page fault that
325 * may not happen any time soon.
326 */
327 if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
328 khugepaged_enter_vma_merge(vma, *vm_flags))
329 return -ENOMEM;
330 break;
331 case MADV_NOHUGEPAGE:
332 *vm_flags &= ~VM_HUGEPAGE;
333 *vm_flags |= VM_NOHUGEPAGE;
334 /*
335 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
336 * this vma even if we leave the mm registered in khugepaged if
337 * it got registered before VM_NOHUGEPAGE was set.
338 */
339 break;
340 }
341
342 return 0;
343}
344
345int __init khugepaged_init(void)
346{
347 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
348 sizeof(struct mm_slot),
349 __alignof__(struct mm_slot), 0, NULL);
350 if (!mm_slot_cache)
351 return -ENOMEM;
352
353 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
354 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
355 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
356
357 return 0;
358}
359
360void __init khugepaged_destroy(void)
361{
362 kmem_cache_destroy(mm_slot_cache);
363}
364
365static inline struct mm_slot *alloc_mm_slot(void)
366{
367 if (!mm_slot_cache) /* initialization failed */
368 return NULL;
369 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
370}
371
372static inline void free_mm_slot(struct mm_slot *mm_slot)
373{
374 kmem_cache_free(mm_slot_cache, mm_slot);
375}
376
377static struct mm_slot *get_mm_slot(struct mm_struct *mm)
378{
379 struct mm_slot *mm_slot;
380
381 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
382 if (mm == mm_slot->mm)
383 return mm_slot;
384
385 return NULL;
386}
387
388static void insert_to_mm_slots_hash(struct mm_struct *mm,
389 struct mm_slot *mm_slot)
390{
391 mm_slot->mm = mm;
392 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
393}
394
395static inline int khugepaged_test_exit(struct mm_struct *mm)
396{
397 return atomic_read(&mm->mm_users) == 0;
398}
399
Song Liu50f8b922018-08-17 15:47:00 -0700400static bool hugepage_vma_check(struct vm_area_struct *vma,
401 unsigned long vm_flags)
Yang Shic2231022018-08-17 15:45:26 -0700402{
Song Liu50f8b922018-08-17 15:47:00 -0700403 if ((!(vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
404 (vm_flags & VM_NOHUGEPAGE) ||
Yang Shic2231022018-08-17 15:45:26 -0700405 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
406 return false;
407 if (shmem_file(vma->vm_file)) {
408 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
409 return false;
410 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
411 HPAGE_PMD_NR);
412 }
413 if (!vma->anon_vma || vma->vm_ops)
414 return false;
415 if (is_vma_temporary_stack(vma))
416 return false;
Song Liu50f8b922018-08-17 15:47:00 -0700417 return !(vm_flags & VM_NO_KHUGEPAGED);
Yang Shic2231022018-08-17 15:45:26 -0700418}
419
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700420int __khugepaged_enter(struct mm_struct *mm)
421{
422 struct mm_slot *mm_slot;
423 int wakeup;
424
425 mm_slot = alloc_mm_slot();
426 if (!mm_slot)
427 return -ENOMEM;
428
429 /* __khugepaged_exit() must not run from under us */
430 VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
431 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
432 free_mm_slot(mm_slot);
433 return 0;
434 }
435
436 spin_lock(&khugepaged_mm_lock);
437 insert_to_mm_slots_hash(mm, mm_slot);
438 /*
439 * Insert just behind the scanning cursor, to let the area settle
440 * down a little.
441 */
442 wakeup = list_empty(&khugepaged_scan.mm_head);
443 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
444 spin_unlock(&khugepaged_mm_lock);
445
Vegard Nossumf1f10072017-02-27 14:30:07 -0800446 mmgrab(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700447 if (wakeup)
448 wake_up_interruptible(&khugepaged_wait);
449
450 return 0;
451}
452
453int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
454 unsigned long vm_flags)
455{
456 unsigned long hstart, hend;
Yang Shic2231022018-08-17 15:45:26 -0700457
458 /*
459 * khugepaged does not yet work on non-shmem files or special
460 * mappings. And file-private shmem THP is not supported.
461 */
Song Liu50f8b922018-08-17 15:47:00 -0700462 if (!hugepage_vma_check(vma, vm_flags))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700463 return 0;
Yang Shic2231022018-08-17 15:45:26 -0700464
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700465 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
466 hend = vma->vm_end & HPAGE_PMD_MASK;
467 if (hstart < hend)
468 return khugepaged_enter(vma, vm_flags);
469 return 0;
470}
471
472void __khugepaged_exit(struct mm_struct *mm)
473{
474 struct mm_slot *mm_slot;
475 int free = 0;
476
477 spin_lock(&khugepaged_mm_lock);
478 mm_slot = get_mm_slot(mm);
479 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
480 hash_del(&mm_slot->hash);
481 list_del(&mm_slot->mm_node);
482 free = 1;
483 }
484 spin_unlock(&khugepaged_mm_lock);
485
486 if (free) {
487 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
488 free_mm_slot(mm_slot);
489 mmdrop(mm);
490 } else if (mm_slot) {
491 /*
492 * This is required to serialize against
493 * khugepaged_test_exit() (which is guaranteed to run
494 * under mmap sem read mode). Stop here (after we
495 * return all pagetables will be destroyed) until
496 * khugepaged has finished working on the pagetables
497 * under the mmap_sem.
498 */
499 down_write(&mm->mmap_sem);
500 up_write(&mm->mmap_sem);
501 }
502}
503
504static void release_pte_page(struct page *page)
505{
Shaohua Lid44d3632017-05-03 14:52:26 -0700506 dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page));
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700507 unlock_page(page);
508 putback_lru_page(page);
509}
510
511static void release_pte_pages(pte_t *pte, pte_t *_pte)
512{
513 while (--_pte >= pte) {
514 pte_t pteval = *_pte;
515 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)))
516 release_pte_page(pte_page(pteval));
517 }
518}
519
520static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
521 unsigned long address,
522 pte_t *pte)
523{
524 struct page *page = NULL;
525 pte_t *_pte;
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700526 int none_or_zero = 0, result = 0, referenced = 0;
527 bool writable = false;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700528
529 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
530 _pte++, address += PAGE_SIZE) {
531 pte_t pteval = *_pte;
532 if (pte_none(pteval) || (pte_present(pteval) &&
533 is_zero_pfn(pte_pfn(pteval)))) {
534 if (!userfaultfd_armed(vma) &&
535 ++none_or_zero <= khugepaged_max_ptes_none) {
536 continue;
537 } else {
538 result = SCAN_EXCEED_NONE_PTE;
539 goto out;
540 }
541 }
542 if (!pte_present(pteval)) {
543 result = SCAN_PTE_NON_PRESENT;
544 goto out;
545 }
546 page = vm_normal_page(vma, address, pteval);
547 if (unlikely(!page)) {
548 result = SCAN_PAGE_NULL;
549 goto out;
550 }
551
Kirill A. Shutemovfece2022018-03-22 16:17:28 -0700552 /* TODO: teach khugepaged to collapse THP mapped with pte */
553 if (PageCompound(page)) {
554 result = SCAN_PAGE_COMPOUND;
555 goto out;
556 }
557
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700558 VM_BUG_ON_PAGE(!PageAnon(page), page);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700559
560 /*
561 * We can do it before isolate_lru_page because the
562 * page can't be freed from under us. NOTE: PG_lock
563 * is needed to serialize against split_huge_page
564 * when invoked from the VM.
565 */
566 if (!trylock_page(page)) {
567 result = SCAN_PAGE_LOCK;
568 goto out;
569 }
570
571 /*
572 * cannot use mapcount: can't collapse if there's a gup pin.
573 * The page must only be referenced by the scanned process
574 * and page swap cache.
575 */
Minchan Kim2948be52017-05-03 14:53:35 -0700576 if (page_count(page) != 1 + PageSwapCache(page)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700577 unlock_page(page);
578 result = SCAN_PAGE_COUNT;
579 goto out;
580 }
581 if (pte_write(pteval)) {
582 writable = true;
583 } else {
584 if (PageSwapCache(page) &&
585 !reuse_swap_page(page, NULL)) {
586 unlock_page(page);
587 result = SCAN_SWAP_CACHE_PAGE;
588 goto out;
589 }
590 /*
591 * Page is not in the swap cache. It can be collapsed
592 * into a THP.
593 */
594 }
595
596 /*
597 * Isolate the page to avoid collapsing an hugepage
598 * currently in use by the VM.
599 */
600 if (isolate_lru_page(page)) {
601 unlock_page(page);
602 result = SCAN_DEL_PAGE_LRU;
603 goto out;
604 }
Shaohua Lid44d3632017-05-03 14:52:26 -0700605 inc_node_page_state(page,
606 NR_ISOLATED_ANON + page_is_file_cache(page));
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700607 VM_BUG_ON_PAGE(!PageLocked(page), page);
608 VM_BUG_ON_PAGE(PageLRU(page), page);
609
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700610 /* There should be enough young pte to collapse the page */
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700611 if (pte_young(pteval) ||
612 page_is_young(page) || PageReferenced(page) ||
613 mmu_notifier_test_young(vma->vm_mm, address))
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700614 referenced++;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700615 }
616 if (likely(writable)) {
617 if (likely(referenced)) {
618 result = SCAN_SUCCEED;
619 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
620 referenced, writable, result);
621 return 1;
622 }
623 } else {
624 result = SCAN_PAGE_RO;
625 }
626
627out:
628 release_pte_pages(pte, _pte);
629 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
630 referenced, writable, result);
631 return 0;
632}
633
634static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
635 struct vm_area_struct *vma,
636 unsigned long address,
637 spinlock_t *ptl)
638{
639 pte_t *_pte;
David Rientjes338a16b2017-05-12 15:47:03 -0700640 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
641 _pte++, page++, address += PAGE_SIZE) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700642 pte_t pteval = *_pte;
643 struct page *src_page;
644
645 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
646 clear_user_highpage(page, address);
647 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
648 if (is_zero_pfn(pte_pfn(pteval))) {
649 /*
650 * ptl mostly unnecessary.
651 */
652 spin_lock(ptl);
653 /*
654 * paravirt calls inside pte_clear here are
655 * superfluous.
656 */
657 pte_clear(vma->vm_mm, address, _pte);
658 spin_unlock(ptl);
659 }
660 } else {
661 src_page = pte_page(pteval);
662 copy_user_highpage(page, src_page, address, vma);
663 VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
664 release_pte_page(src_page);
665 /*
666 * ptl mostly unnecessary, but preempt has to
667 * be disabled to update the per-cpu stats
668 * inside page_remove_rmap().
669 */
670 spin_lock(ptl);
671 /*
672 * paravirt calls inside pte_clear here are
673 * superfluous.
674 */
675 pte_clear(vma->vm_mm, address, _pte);
676 page_remove_rmap(src_page, false);
677 spin_unlock(ptl);
678 free_page_and_swap_cache(src_page);
679 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700680 }
681}
682
683static void khugepaged_alloc_sleep(void)
684{
685 DEFINE_WAIT(wait);
686
687 add_wait_queue(&khugepaged_wait, &wait);
688 freezable_schedule_timeout_interruptible(
689 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
690 remove_wait_queue(&khugepaged_wait, &wait);
691}
692
693static int khugepaged_node_load[MAX_NUMNODES];
694
695static bool khugepaged_scan_abort(int nid)
696{
697 int i;
698
699 /*
Mel Gormana5f5f912016-07-28 15:46:32 -0700700 * If node_reclaim_mode is disabled, then no extra effort is made to
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700701 * allocate memory locally.
702 */
Mel Gormana5f5f912016-07-28 15:46:32 -0700703 if (!node_reclaim_mode)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700704 return false;
705
706 /* If there is a count for this node already, it must be acceptable */
707 if (khugepaged_node_load[nid])
708 return false;
709
710 for (i = 0; i < MAX_NUMNODES; i++) {
711 if (!khugepaged_node_load[i])
712 continue;
713 if (node_distance(nid, i) > RECLAIM_DISTANCE)
714 return true;
715 }
716 return false;
717}
718
719/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
720static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
721{
Vlastimil Babka25160352016-07-28 15:49:25 -0700722 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700723}
724
725#ifdef CONFIG_NUMA
726static int khugepaged_find_target_node(void)
727{
728 static int last_khugepaged_target_node = NUMA_NO_NODE;
729 int nid, target_node = 0, max_value = 0;
730
731 /* find first node with max normal pages hit */
732 for (nid = 0; nid < MAX_NUMNODES; nid++)
733 if (khugepaged_node_load[nid] > max_value) {
734 max_value = khugepaged_node_load[nid];
735 target_node = nid;
736 }
737
738 /* do some balance if several nodes have the same hit record */
739 if (target_node <= last_khugepaged_target_node)
740 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
741 nid++)
742 if (max_value == khugepaged_node_load[nid]) {
743 target_node = nid;
744 break;
745 }
746
747 last_khugepaged_target_node = target_node;
748 return target_node;
749}
750
751static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
752{
753 if (IS_ERR(*hpage)) {
754 if (!*wait)
755 return false;
756
757 *wait = false;
758 *hpage = NULL;
759 khugepaged_alloc_sleep();
760 } else if (*hpage) {
761 put_page(*hpage);
762 *hpage = NULL;
763 }
764
765 return true;
766}
767
768static struct page *
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -0700769khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700770{
771 VM_BUG_ON_PAGE(*hpage, *hpage);
772
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700773 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
774 if (unlikely(!*hpage)) {
775 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
776 *hpage = ERR_PTR(-ENOMEM);
777 return NULL;
778 }
779
780 prep_transhuge_page(*hpage);
781 count_vm_event(THP_COLLAPSE_ALLOC);
782 return *hpage;
783}
784#else
785static int khugepaged_find_target_node(void)
786{
787 return 0;
788}
789
790static inline struct page *alloc_khugepaged_hugepage(void)
791{
792 struct page *page;
793
794 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
795 HPAGE_PMD_ORDER);
796 if (page)
797 prep_transhuge_page(page);
798 return page;
799}
800
801static struct page *khugepaged_alloc_hugepage(bool *wait)
802{
803 struct page *hpage;
804
805 do {
806 hpage = alloc_khugepaged_hugepage();
807 if (!hpage) {
808 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
809 if (!*wait)
810 return NULL;
811
812 *wait = false;
813 khugepaged_alloc_sleep();
814 } else
815 count_vm_event(THP_COLLAPSE_ALLOC);
816 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
817
818 return hpage;
819}
820
821static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
822{
823 if (!*hpage)
824 *hpage = khugepaged_alloc_hugepage(wait);
825
826 if (unlikely(!*hpage))
827 return false;
828
829 return true;
830}
831
832static struct page *
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -0700833khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700834{
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700835 VM_BUG_ON(!*hpage);
836
837 return *hpage;
838}
839#endif
840
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700841/*
842 * If mmap_sem temporarily dropped, revalidate vma
843 * before taking mmap_sem.
844 * Return 0 if succeeds, otherwise return none-zero
845 * value (scan code).
846 */
847
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700848static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
849 struct vm_area_struct **vmap)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700850{
851 struct vm_area_struct *vma;
852 unsigned long hstart, hend;
853
854 if (unlikely(khugepaged_test_exit(mm)))
855 return SCAN_ANY_PROCESS;
856
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700857 *vmap = vma = find_vma(mm, address);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700858 if (!vma)
859 return SCAN_VMA_NULL;
860
861 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
862 hend = vma->vm_end & HPAGE_PMD_MASK;
863 if (address < hstart || address + HPAGE_PMD_SIZE > hend)
864 return SCAN_ADDRESS_RANGE;
Song Liu50f8b922018-08-17 15:47:00 -0700865 if (!hugepage_vma_check(vma, vma->vm_flags))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700866 return SCAN_VMA_CHECK;
867 return 0;
868}
869
870/*
871 * Bring missing pages in from swap, to complete THP collapse.
872 * Only done if khugepaged_scan_pmd believes it is worthwhile.
873 *
874 * Called and returns without pte mapped or spinlocks held,
875 * but with mmap_sem held to protect against vma changes.
876 */
877
878static bool __collapse_huge_page_swapin(struct mm_struct *mm,
879 struct vm_area_struct *vma,
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700880 unsigned long address, pmd_t *pmd,
881 int referenced)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700882{
Souptick Joarder2b740302018-08-23 17:01:36 -0700883 int swapped_in = 0;
884 vm_fault_t ret = 0;
Jan Kara82b0f8c2016-12-14 15:06:58 -0800885 struct vm_fault vmf = {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700886 .vma = vma,
887 .address = address,
888 .flags = FAULT_FLAG_ALLOW_RETRY,
889 .pmd = pmd,
Jan Kara0721ec82016-12-14 15:07:04 -0800890 .pgoff = linear_page_index(vma, address),
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700891 };
892
Ebru Akagunduz982785c2016-09-19 14:44:04 -0700893 /* we only decide to swapin, if there is enough young ptes */
894 if (referenced < HPAGE_PMD_NR/2) {
895 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
896 return false;
897 }
Jan Kara82b0f8c2016-12-14 15:06:58 -0800898 vmf.pte = pte_offset_map(pmd, address);
899 for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
900 vmf.pte++, vmf.address += PAGE_SIZE) {
Jan Kara29943022016-12-14 15:07:16 -0800901 vmf.orig_pte = *vmf.pte;
902 if (!is_swap_pte(vmf.orig_pte))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700903 continue;
904 swapped_in++;
Jan Kara29943022016-12-14 15:07:16 -0800905 ret = do_swap_page(&vmf);
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700906
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700907 /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
908 if (ret & VM_FAULT_RETRY) {
909 down_read(&mm->mmap_sem);
Jan Kara82b0f8c2016-12-14 15:06:58 -0800910 if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
Ebru Akagunduz47f863e2016-07-26 15:26:43 -0700911 /* vma is no longer available, don't continue to swapin */
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700912 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700913 return false;
Ebru Akagunduz47f863e2016-07-26 15:26:43 -0700914 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700915 /* check if the pmd is still valid */
SeongJae Park835152a2017-05-12 15:46:38 -0700916 if (mm_find_pmd(mm, address) != pmd) {
917 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700918 return false;
SeongJae Park835152a2017-05-12 15:46:38 -0700919 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700920 }
921 if (ret & VM_FAULT_ERROR) {
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700922 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700923 return false;
924 }
925 /* pte is unmapped now, we need to map it */
Jan Kara82b0f8c2016-12-14 15:06:58 -0800926 vmf.pte = pte_offset_map(pmd, vmf.address);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700927 }
Jan Kara82b0f8c2016-12-14 15:06:58 -0800928 vmf.pte--;
929 pte_unmap(vmf.pte);
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700930 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700931 return true;
932}
933
934static void collapse_huge_page(struct mm_struct *mm,
935 unsigned long address,
936 struct page **hpage,
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700937 int node, int referenced)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700938{
939 pmd_t *pmd, _pmd;
940 pte_t *pte;
941 pgtable_t pgtable;
942 struct page *new_page;
943 spinlock_t *pmd_ptl, *pte_ptl;
944 int isolated = 0, result = 0;
945 struct mem_cgroup *memcg;
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700946 struct vm_area_struct *vma;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700947 unsigned long mmun_start; /* For mmu_notifiers */
948 unsigned long mmun_end; /* For mmu_notifiers */
949 gfp_t gfp;
950
951 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
952
953 /* Only allocate from the target node */
Michal Hocko41b61672017-01-10 16:57:42 -0800954 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700955
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -0700956 /*
957 * Before allocating the hugepage, release the mmap_sem read lock.
958 * The allocation can take potentially a long time if it involves
959 * sync compaction, and we do not need to hold the mmap_sem during
960 * that. We will recheck the vma after taking it again in write mode.
961 */
962 up_read(&mm->mmap_sem);
963 new_page = khugepaged_alloc_page(hpage, gfp, node);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700964 if (!new_page) {
965 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
966 goto out_nolock;
967 }
968
Michal Hocko2a70f6a2018-04-10 16:29:30 -0700969 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700970 result = SCAN_CGROUP_CHARGE_FAIL;
971 goto out_nolock;
972 }
973
974 down_read(&mm->mmap_sem);
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700975 result = hugepage_vma_revalidate(mm, address, &vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700976 if (result) {
977 mem_cgroup_cancel_charge(new_page, memcg, true);
978 up_read(&mm->mmap_sem);
979 goto out_nolock;
980 }
981
982 pmd = mm_find_pmd(mm, address);
983 if (!pmd) {
984 result = SCAN_PMD_NULL;
985 mem_cgroup_cancel_charge(new_page, memcg, true);
986 up_read(&mm->mmap_sem);
987 goto out_nolock;
988 }
989
990 /*
991 * __collapse_huge_page_swapin always returns with mmap_sem locked.
Ebru Akagunduz47f863e2016-07-26 15:26:43 -0700992 * If it fails, we release mmap_sem and jump out_nolock.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700993 * Continuing to collapse causes inconsistency.
994 */
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700995 if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700996 mem_cgroup_cancel_charge(new_page, memcg, true);
997 up_read(&mm->mmap_sem);
998 goto out_nolock;
999 }
1000
1001 up_read(&mm->mmap_sem);
1002 /*
1003 * Prevent all access to pagetables with the exception of
1004 * gup_fast later handled by the ptep_clear_flush and the VM
1005 * handled by the anon_vma lock + PG_lock.
1006 */
1007 down_write(&mm->mmap_sem);
Kirill A. Shutemovc131f752016-09-19 14:44:01 -07001008 result = hugepage_vma_revalidate(mm, address, &vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001009 if (result)
1010 goto out;
1011 /* check if the pmd is still valid */
1012 if (mm_find_pmd(mm, address) != pmd)
1013 goto out;
1014
1015 anon_vma_lock_write(vma->anon_vma);
1016
1017 pte = pte_offset_map(pmd, address);
1018 pte_ptl = pte_lockptr(mm, pmd);
1019
1020 mmun_start = address;
1021 mmun_end = address + HPAGE_PMD_SIZE;
1022 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1023 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1024 /*
1025 * After this gup_fast can't run anymore. This also removes
1026 * any huge TLB entry from the CPU so we won't allow
1027 * huge and small TLB entries for the same virtual address
1028 * to avoid the risk of CPU bugs in that area.
1029 */
1030 _pmd = pmdp_collapse_flush(vma, address, pmd);
1031 spin_unlock(pmd_ptl);
1032 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1033
1034 spin_lock(pte_ptl);
1035 isolated = __collapse_huge_page_isolate(vma, address, pte);
1036 spin_unlock(pte_ptl);
1037
1038 if (unlikely(!isolated)) {
1039 pte_unmap(pte);
1040 spin_lock(pmd_ptl);
1041 BUG_ON(!pmd_none(*pmd));
1042 /*
1043 * We can only use set_pmd_at when establishing
1044 * hugepmds and never for establishing regular pmds that
1045 * points to regular pagetables. Use pmd_populate for that
1046 */
1047 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1048 spin_unlock(pmd_ptl);
1049 anon_vma_unlock_write(vma->anon_vma);
1050 result = SCAN_FAIL;
1051 goto out;
1052 }
1053
1054 /*
1055 * All pages are isolated and locked so anon_vma rmap
1056 * can't run anymore.
1057 */
1058 anon_vma_unlock_write(vma->anon_vma);
1059
1060 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
1061 pte_unmap(pte);
1062 __SetPageUptodate(new_page);
1063 pgtable = pmd_pgtable(_pmd);
1064
1065 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
Linus Torvaldsf55e1012017-11-29 09:01:01 -08001066 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001067
1068 /*
1069 * spin_lock() below is not the equivalent of smp_wmb(), so
1070 * this is needed to avoid the copy_huge_page writes to become
1071 * visible after the set_pmd_at() write.
1072 */
1073 smp_wmb();
1074
1075 spin_lock(pmd_ptl);
1076 BUG_ON(!pmd_none(*pmd));
1077 page_add_new_anon_rmap(new_page, vma, address, true);
1078 mem_cgroup_commit_charge(new_page, memcg, false, true);
1079 lru_cache_add_active_or_unevictable(new_page, vma);
1080 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1081 set_pmd_at(mm, address, pmd, _pmd);
1082 update_mmu_cache_pmd(vma, address, pmd);
1083 spin_unlock(pmd_ptl);
1084
1085 *hpage = NULL;
1086
1087 khugepaged_pages_collapsed++;
1088 result = SCAN_SUCCEED;
1089out_up_write:
1090 up_write(&mm->mmap_sem);
1091out_nolock:
1092 trace_mm_collapse_huge_page(mm, isolated, result);
1093 return;
1094out:
1095 mem_cgroup_cancel_charge(new_page, memcg, true);
1096 goto out_up_write;
1097}
1098
1099static int khugepaged_scan_pmd(struct mm_struct *mm,
1100 struct vm_area_struct *vma,
1101 unsigned long address,
1102 struct page **hpage)
1103{
1104 pmd_t *pmd;
1105 pte_t *pte, *_pte;
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001106 int ret = 0, none_or_zero = 0, result = 0, referenced = 0;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001107 struct page *page = NULL;
1108 unsigned long _address;
1109 spinlock_t *ptl;
1110 int node = NUMA_NO_NODE, unmapped = 0;
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001111 bool writable = false;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001112
1113 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1114
1115 pmd = mm_find_pmd(mm, address);
1116 if (!pmd) {
1117 result = SCAN_PMD_NULL;
1118 goto out;
1119 }
1120
1121 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1122 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1123 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1124 _pte++, _address += PAGE_SIZE) {
1125 pte_t pteval = *_pte;
1126 if (is_swap_pte(pteval)) {
1127 if (++unmapped <= khugepaged_max_ptes_swap) {
1128 continue;
1129 } else {
1130 result = SCAN_EXCEED_SWAP_PTE;
1131 goto out_unmap;
1132 }
1133 }
1134 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1135 if (!userfaultfd_armed(vma) &&
1136 ++none_or_zero <= khugepaged_max_ptes_none) {
1137 continue;
1138 } else {
1139 result = SCAN_EXCEED_NONE_PTE;
1140 goto out_unmap;
1141 }
1142 }
1143 if (!pte_present(pteval)) {
1144 result = SCAN_PTE_NON_PRESENT;
1145 goto out_unmap;
1146 }
1147 if (pte_write(pteval))
1148 writable = true;
1149
1150 page = vm_normal_page(vma, _address, pteval);
1151 if (unlikely(!page)) {
1152 result = SCAN_PAGE_NULL;
1153 goto out_unmap;
1154 }
1155
1156 /* TODO: teach khugepaged to collapse THP mapped with pte */
1157 if (PageCompound(page)) {
1158 result = SCAN_PAGE_COMPOUND;
1159 goto out_unmap;
1160 }
1161
1162 /*
1163 * Record which node the original page is from and save this
1164 * information to khugepaged_node_load[].
1165 * Khupaged will allocate hugepage from the node has the max
1166 * hit record.
1167 */
1168 node = page_to_nid(page);
1169 if (khugepaged_scan_abort(node)) {
1170 result = SCAN_SCAN_ABORT;
1171 goto out_unmap;
1172 }
1173 khugepaged_node_load[node]++;
1174 if (!PageLRU(page)) {
1175 result = SCAN_PAGE_LRU;
1176 goto out_unmap;
1177 }
1178 if (PageLocked(page)) {
1179 result = SCAN_PAGE_LOCK;
1180 goto out_unmap;
1181 }
1182 if (!PageAnon(page)) {
1183 result = SCAN_PAGE_ANON;
1184 goto out_unmap;
1185 }
1186
1187 /*
1188 * cannot use mapcount: can't collapse if there's a gup pin.
1189 * The page must only be referenced by the scanned process
1190 * and page swap cache.
1191 */
Minchan Kim2948be52017-05-03 14:53:35 -07001192 if (page_count(page) != 1 + PageSwapCache(page)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001193 result = SCAN_PAGE_COUNT;
1194 goto out_unmap;
1195 }
1196 if (pte_young(pteval) ||
1197 page_is_young(page) || PageReferenced(page) ||
1198 mmu_notifier_test_young(vma->vm_mm, address))
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001199 referenced++;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001200 }
1201 if (writable) {
1202 if (referenced) {
1203 result = SCAN_SUCCEED;
1204 ret = 1;
1205 } else {
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001206 result = SCAN_LACK_REFERENCED_PAGE;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001207 }
1208 } else {
1209 result = SCAN_PAGE_RO;
1210 }
1211out_unmap:
1212 pte_unmap_unlock(pte, ptl);
1213 if (ret) {
1214 node = khugepaged_find_target_node();
1215 /* collapse_huge_page will return with the mmap_sem released */
Kirill A. Shutemovc131f752016-09-19 14:44:01 -07001216 collapse_huge_page(mm, address, hpage, node, referenced);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001217 }
1218out:
1219 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1220 none_or_zero, result, unmapped);
1221 return ret;
1222}
1223
1224static void collect_mm_slot(struct mm_slot *mm_slot)
1225{
1226 struct mm_struct *mm = mm_slot->mm;
1227
1228 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
1229
1230 if (khugepaged_test_exit(mm)) {
1231 /* free mm_slot */
1232 hash_del(&mm_slot->hash);
1233 list_del(&mm_slot->mm_node);
1234
1235 /*
1236 * Not strictly needed because the mm exited already.
1237 *
1238 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1239 */
1240
1241 /* khugepaged_mm_lock actually not necessary for the below */
1242 free_mm_slot(mm_slot);
1243 mmdrop(mm);
1244 }
1245}
1246
Kirill A. Shutemove496cf32016-07-26 15:26:35 -07001247#if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001248static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1249{
1250 struct vm_area_struct *vma;
1251 unsigned long addr;
1252 pmd_t *pmd, _pmd;
1253
1254 i_mmap_lock_write(mapping);
1255 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1256 /* probably overkill */
1257 if (vma->anon_vma)
1258 continue;
1259 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1260 if (addr & ~HPAGE_PMD_MASK)
1261 continue;
1262 if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1263 continue;
1264 pmd = mm_find_pmd(vma->vm_mm, addr);
1265 if (!pmd)
1266 continue;
1267 /*
1268 * We need exclusive mmap_sem to retract page table.
1269 * If trylock fails we would end up with pte-mapped THP after
1270 * re-fault. Not ideal, but it's more important to not disturb
1271 * the system too much.
1272 */
1273 if (down_write_trylock(&vma->vm_mm->mmap_sem)) {
1274 spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
1275 /* assume page table is clear */
1276 _pmd = pmdp_collapse_flush(vma, addr, pmd);
1277 spin_unlock(ptl);
1278 up_write(&vma->vm_mm->mmap_sem);
Kirill A. Shutemovc4812902017-11-15 17:35:37 -08001279 mm_dec_nr_ptes(vma->vm_mm);
Aneesh Kumar K.Vd670ffd2017-01-10 16:57:18 -08001280 pte_free(vma->vm_mm, pmd_pgtable(_pmd));
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001281 }
1282 }
1283 i_mmap_unlock_write(mapping);
1284}
1285
1286/**
1287 * collapse_shmem - collapse small tmpfs/shmem pages into huge one.
1288 *
1289 * Basic scheme is simple, details are more complex:
1290 * - allocate and freeze a new huge page;
1291 * - scan over radix tree replacing old pages the new one
1292 * + swap in pages if necessary;
1293 * + fill in gaps;
1294 * + keep old pages around in case if rollback is required;
1295 * - if replacing succeed:
1296 * + copy data over;
1297 * + free old pages;
1298 * + unfreeze huge page;
1299 * - if replacing failed;
1300 * + put all pages back and unfreeze them;
1301 * + restore gaps in the radix-tree;
1302 * + free huge page;
1303 */
1304static void collapse_shmem(struct mm_struct *mm,
1305 struct address_space *mapping, pgoff_t start,
1306 struct page **hpage, int node)
1307{
1308 gfp_t gfp;
1309 struct page *page, *new_page, *tmp;
1310 struct mem_cgroup *memcg;
1311 pgoff_t index, end = start + HPAGE_PMD_NR;
1312 LIST_HEAD(pagelist);
1313 struct radix_tree_iter iter;
1314 void **slot;
1315 int nr_none = 0, result = SCAN_SUCCEED;
1316
1317 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1318
1319 /* Only allocate from the target node */
Michal Hocko41b61672017-01-10 16:57:42 -08001320 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001321
1322 new_page = khugepaged_alloc_page(hpage, gfp, node);
1323 if (!new_page) {
1324 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1325 goto out;
1326 }
1327
Michal Hocko2a70f6a2018-04-10 16:29:30 -07001328 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001329 result = SCAN_CGROUP_CHARGE_FAIL;
1330 goto out;
1331 }
1332
1333 new_page->index = start;
1334 new_page->mapping = mapping;
1335 __SetPageSwapBacked(new_page);
1336 __SetPageLocked(new_page);
1337 BUG_ON(!page_ref_freeze(new_page, 1));
1338
1339
1340 /*
1341 * At this point the new_page is 'frozen' (page_count() is zero), locked
1342 * and not up-to-date. It's safe to insert it into radix tree, because
1343 * nobody would be able to map it or use it in other way until we
1344 * unfreeze it.
1345 */
1346
1347 index = start;
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001348 xa_lock_irq(&mapping->i_pages);
1349 radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001350 int n = min(iter.index, end) - index;
1351
1352 /*
Hugh Dickins8797f2f2018-11-30 14:10:25 -08001353 * Stop if extent has been hole-punched, and is now completely
1354 * empty (the more obvious i_size_read() check would take an
1355 * irq-unsafe seqlock on 32-bit).
1356 */
1357 if (n >= HPAGE_PMD_NR) {
1358 result = SCAN_TRUNCATED;
1359 goto tree_locked;
1360 }
1361
1362 /*
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001363 * Handle holes in the radix tree: charge it from shmem and
1364 * insert relevant subpage of new_page into the radix-tree.
1365 */
1366 if (n && !shmem_charge(mapping->host, n)) {
1367 result = SCAN_FAIL;
1368 break;
1369 }
1370 nr_none += n;
1371 for (; index < min(iter.index, end); index++) {
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001372 radix_tree_insert(&mapping->i_pages, index,
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001373 new_page + (index % HPAGE_PMD_NR));
1374 }
1375
1376 /* We are done. */
1377 if (index >= end)
1378 break;
1379
1380 page = radix_tree_deref_slot_protected(slot,
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001381 &mapping->i_pages.xa_lock);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001382 if (radix_tree_exceptional_entry(page) || !PageUptodate(page)) {
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001383 xa_unlock_irq(&mapping->i_pages);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001384 /* swap in or instantiate fallocated page */
1385 if (shmem_getpage(mapping->host, index, &page,
1386 SGP_NOHUGE)) {
1387 result = SCAN_FAIL;
1388 goto tree_unlocked;
1389 }
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001390 xa_lock_irq(&mapping->i_pages);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001391 } else if (trylock_page(page)) {
1392 get_page(page);
1393 } else {
1394 result = SCAN_PAGE_LOCK;
1395 break;
1396 }
1397
1398 /*
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001399 * The page must be locked, so we can drop the i_pages lock
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001400 * without racing with truncate.
1401 */
1402 VM_BUG_ON_PAGE(!PageLocked(page), page);
1403 VM_BUG_ON_PAGE(!PageUptodate(page), page);
1404 VM_BUG_ON_PAGE(PageTransCompound(page), page);
1405
1406 if (page_mapping(page) != mapping) {
1407 result = SCAN_TRUNCATED;
1408 goto out_unlock;
1409 }
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001410 xa_unlock_irq(&mapping->i_pages);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001411
1412 if (isolate_lru_page(page)) {
1413 result = SCAN_DEL_PAGE_LRU;
1414 goto out_isolate_failed;
1415 }
1416
1417 if (page_mapped(page))
Matthew Wilcox977fbdc2018-01-31 16:17:36 -08001418 unmap_mapping_pages(mapping, index, 1, false);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001419
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001420 xa_lock_irq(&mapping->i_pages);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001421
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001422 slot = radix_tree_lookup_slot(&mapping->i_pages, index);
Johannes Weiner91a45f72016-12-12 16:43:32 -08001423 VM_BUG_ON_PAGE(page != radix_tree_deref_slot_protected(slot,
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001424 &mapping->i_pages.xa_lock), page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001425 VM_BUG_ON_PAGE(page_mapped(page), page);
1426
1427 /*
1428 * The page is expected to have page_count() == 3:
1429 * - we hold a pin on it;
1430 * - one reference from radix tree;
1431 * - one from isolate_lru_page;
1432 */
1433 if (!page_ref_freeze(page, 3)) {
1434 result = SCAN_PAGE_COUNT;
1435 goto out_lru;
1436 }
1437
1438 /*
1439 * Add the page to the list to be able to undo the collapse if
1440 * something go wrong.
1441 */
1442 list_add_tail(&page->lru, &pagelist);
1443
1444 /* Finally, replace with the new page. */
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001445 radix_tree_replace_slot(&mapping->i_pages, slot,
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001446 new_page + (index % HPAGE_PMD_NR));
1447
Matthew Wilcox148deab2016-12-14 15:08:49 -08001448 slot = radix_tree_iter_resume(slot, &iter);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001449 index++;
1450 continue;
1451out_lru:
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001452 xa_unlock_irq(&mapping->i_pages);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001453 putback_lru_page(page);
1454out_isolate_failed:
1455 unlock_page(page);
1456 put_page(page);
1457 goto tree_unlocked;
1458out_unlock:
1459 unlock_page(page);
1460 put_page(page);
1461 break;
1462 }
1463
1464 /*
1465 * Handle hole in radix tree at the end of the range.
1466 * This code only triggers if there's nothing in radix tree
1467 * beyond 'end'.
1468 */
1469 if (result == SCAN_SUCCEED && index < end) {
1470 int n = end - index;
1471
Hugh Dickins8797f2f2018-11-30 14:10:25 -08001472 /* Stop if extent has been truncated, and is now empty */
1473 if (n >= HPAGE_PMD_NR) {
1474 result = SCAN_TRUNCATED;
1475 goto tree_locked;
1476 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001477 if (!shmem_charge(mapping->host, n)) {
1478 result = SCAN_FAIL;
1479 goto tree_locked;
1480 }
1481
1482 for (; index < end; index++) {
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001483 radix_tree_insert(&mapping->i_pages, index,
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001484 new_page + (index % HPAGE_PMD_NR));
1485 }
1486 nr_none += n;
1487 }
1488
1489tree_locked:
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001490 xa_unlock_irq(&mapping->i_pages);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001491tree_unlocked:
1492
1493 if (result == SCAN_SUCCEED) {
1494 unsigned long flags;
1495 struct zone *zone = page_zone(new_page);
1496
1497 /*
1498 * Replacing old pages with new one has succeed, now we need to
1499 * copy the content and free old pages.
1500 */
1501 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
1502 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1503 page);
1504 list_del(&page->lru);
1505 unlock_page(page);
1506 page_ref_unfreeze(page, 1);
1507 page->mapping = NULL;
1508 ClearPageActive(page);
1509 ClearPageUnevictable(page);
1510 put_page(page);
1511 }
1512
1513 local_irq_save(flags);
Mel Gorman11fb9982016-07-28 15:46:20 -07001514 __inc_node_page_state(new_page, NR_SHMEM_THPS);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001515 if (nr_none) {
Mel Gorman11fb9982016-07-28 15:46:20 -07001516 __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
1517 __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001518 }
1519 local_irq_restore(flags);
1520
1521 /*
1522 * Remove pte page tables, so we can re-faulti
1523 * the page as huge.
1524 */
1525 retract_page_tables(mapping, start);
1526
1527 /* Everything is ready, let's unfreeze the new_page */
1528 set_page_dirty(new_page);
1529 SetPageUptodate(new_page);
1530 page_ref_unfreeze(new_page, HPAGE_PMD_NR);
1531 mem_cgroup_commit_charge(new_page, memcg, false, true);
1532 lru_cache_add_anon(new_page);
1533 unlock_page(new_page);
1534
1535 *hpage = NULL;
Yang Shi87aa7522018-08-17 15:45:29 -07001536
1537 khugepaged_pages_collapsed++;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001538 } else {
1539 /* Something went wrong: rollback changes to the radix-tree */
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001540 xa_lock_irq(&mapping->i_pages);
Hugh Dickins78141aa2018-11-30 14:10:29 -08001541 mapping->nrpages -= nr_none;
1542 shmem_uncharge(mapping->host, nr_none);
1543
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001544 radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001545 if (iter.index >= end)
1546 break;
1547 page = list_first_entry_or_null(&pagelist,
1548 struct page, lru);
1549 if (!page || iter.index < page->index) {
1550 if (!nr_none)
1551 break;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001552 nr_none--;
Johannes Weiner59749e62016-12-12 16:43:35 -08001553 /* Put holes back where they were */
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001554 radix_tree_delete(&mapping->i_pages, iter.index);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001555 continue;
1556 }
1557
1558 VM_BUG_ON_PAGE(page->index != iter.index, page);
1559
1560 /* Unfreeze the page. */
1561 list_del(&page->lru);
1562 page_ref_unfreeze(page, 2);
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001563 radix_tree_replace_slot(&mapping->i_pages, slot, page);
Matthew Wilcox148deab2016-12-14 15:08:49 -08001564 slot = radix_tree_iter_resume(slot, &iter);
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001565 xa_unlock_irq(&mapping->i_pages);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001566 putback_lru_page(page);
1567 unlock_page(page);
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001568 xa_lock_irq(&mapping->i_pages);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001569 }
1570 VM_BUG_ON(nr_none);
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001571 xa_unlock_irq(&mapping->i_pages);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001572
1573 /* Unfreeze new_page, caller would take care about freeing it */
1574 page_ref_unfreeze(new_page, 1);
1575 mem_cgroup_cancel_charge(new_page, memcg, true);
1576 unlock_page(new_page);
1577 new_page->mapping = NULL;
1578 }
1579out:
1580 VM_BUG_ON(!list_empty(&pagelist));
1581 /* TODO: tracepoints */
1582}
1583
1584static void khugepaged_scan_shmem(struct mm_struct *mm,
1585 struct address_space *mapping,
1586 pgoff_t start, struct page **hpage)
1587{
1588 struct page *page = NULL;
1589 struct radix_tree_iter iter;
1590 void **slot;
1591 int present, swap;
1592 int node = NUMA_NO_NODE;
1593 int result = SCAN_SUCCEED;
1594
1595 present = 0;
1596 swap = 0;
1597 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1598 rcu_read_lock();
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001599 radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001600 if (iter.index >= start + HPAGE_PMD_NR)
1601 break;
1602
1603 page = radix_tree_deref_slot(slot);
1604 if (radix_tree_deref_retry(page)) {
1605 slot = radix_tree_iter_retry(&iter);
1606 continue;
1607 }
1608
1609 if (radix_tree_exception(page)) {
1610 if (++swap > khugepaged_max_ptes_swap) {
1611 result = SCAN_EXCEED_SWAP_PTE;
1612 break;
1613 }
1614 continue;
1615 }
1616
1617 if (PageTransCompound(page)) {
1618 result = SCAN_PAGE_COMPOUND;
1619 break;
1620 }
1621
1622 node = page_to_nid(page);
1623 if (khugepaged_scan_abort(node)) {
1624 result = SCAN_SCAN_ABORT;
1625 break;
1626 }
1627 khugepaged_node_load[node]++;
1628
1629 if (!PageLRU(page)) {
1630 result = SCAN_PAGE_LRU;
1631 break;
1632 }
1633
1634 if (page_count(page) != 1 + page_mapcount(page)) {
1635 result = SCAN_PAGE_COUNT;
1636 break;
1637 }
1638
1639 /*
1640 * We probably should check if the page is referenced here, but
1641 * nobody would transfer pte_young() to PageReferenced() for us.
1642 * And rmap walk here is just too costly...
1643 */
1644
1645 present++;
1646
1647 if (need_resched()) {
Matthew Wilcox148deab2016-12-14 15:08:49 -08001648 slot = radix_tree_iter_resume(slot, &iter);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001649 cond_resched_rcu();
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001650 }
1651 }
1652 rcu_read_unlock();
1653
1654 if (result == SCAN_SUCCEED) {
1655 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
1656 result = SCAN_EXCEED_NONE_PTE;
1657 } else {
1658 node = khugepaged_find_target_node();
1659 collapse_shmem(mm, mapping, start, hpage, node);
1660 }
1661 }
1662
1663 /* TODO: tracepoints */
1664}
1665#else
1666static void khugepaged_scan_shmem(struct mm_struct *mm,
1667 struct address_space *mapping,
1668 pgoff_t start, struct page **hpage)
1669{
1670 BUILD_BUG();
1671}
1672#endif
1673
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001674static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
1675 struct page **hpage)
1676 __releases(&khugepaged_mm_lock)
1677 __acquires(&khugepaged_mm_lock)
1678{
1679 struct mm_slot *mm_slot;
1680 struct mm_struct *mm;
1681 struct vm_area_struct *vma;
1682 int progress = 0;
1683
1684 VM_BUG_ON(!pages);
1685 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
1686
1687 if (khugepaged_scan.mm_slot)
1688 mm_slot = khugepaged_scan.mm_slot;
1689 else {
1690 mm_slot = list_entry(khugepaged_scan.mm_head.next,
1691 struct mm_slot, mm_node);
1692 khugepaged_scan.address = 0;
1693 khugepaged_scan.mm_slot = mm_slot;
1694 }
1695 spin_unlock(&khugepaged_mm_lock);
1696
1697 mm = mm_slot->mm;
Yang Shi3b454ad2018-01-31 16:18:28 -08001698 /*
1699 * Don't wait for semaphore (to avoid long wait times). Just move to
1700 * the next mm on the list.
1701 */
1702 vma = NULL;
1703 if (unlikely(!down_read_trylock(&mm->mmap_sem)))
1704 goto breakouterloop_mmap_sem;
1705 if (likely(!khugepaged_test_exit(mm)))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001706 vma = find_vma(mm, khugepaged_scan.address);
1707
1708 progress++;
1709 for (; vma; vma = vma->vm_next) {
1710 unsigned long hstart, hend;
1711
1712 cond_resched();
1713 if (unlikely(khugepaged_test_exit(mm))) {
1714 progress++;
1715 break;
1716 }
Song Liu50f8b922018-08-17 15:47:00 -07001717 if (!hugepage_vma_check(vma, vma->vm_flags)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001718skip:
1719 progress++;
1720 continue;
1721 }
1722 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1723 hend = vma->vm_end & HPAGE_PMD_MASK;
1724 if (hstart >= hend)
1725 goto skip;
1726 if (khugepaged_scan.address > hend)
1727 goto skip;
1728 if (khugepaged_scan.address < hstart)
1729 khugepaged_scan.address = hstart;
1730 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
1731
1732 while (khugepaged_scan.address < hend) {
1733 int ret;
1734 cond_resched();
1735 if (unlikely(khugepaged_test_exit(mm)))
1736 goto breakouterloop;
1737
1738 VM_BUG_ON(khugepaged_scan.address < hstart ||
1739 khugepaged_scan.address + HPAGE_PMD_SIZE >
1740 hend);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001741 if (shmem_file(vma->vm_file)) {
Kirill A. Shutemove496cf32016-07-26 15:26:35 -07001742 struct file *file;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001743 pgoff_t pgoff = linear_page_index(vma,
1744 khugepaged_scan.address);
Kirill A. Shutemove496cf32016-07-26 15:26:35 -07001745 if (!shmem_huge_enabled(vma))
1746 goto skip;
1747 file = get_file(vma->vm_file);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001748 up_read(&mm->mmap_sem);
1749 ret = 1;
1750 khugepaged_scan_shmem(mm, file->f_mapping,
1751 pgoff, hpage);
1752 fput(file);
1753 } else {
1754 ret = khugepaged_scan_pmd(mm, vma,
1755 khugepaged_scan.address,
1756 hpage);
1757 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001758 /* move to next address */
1759 khugepaged_scan.address += HPAGE_PMD_SIZE;
1760 progress += HPAGE_PMD_NR;
1761 if (ret)
1762 /* we released mmap_sem so break loop */
1763 goto breakouterloop_mmap_sem;
1764 if (progress >= pages)
1765 goto breakouterloop;
1766 }
1767 }
1768breakouterloop:
1769 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
1770breakouterloop_mmap_sem:
1771
1772 spin_lock(&khugepaged_mm_lock);
1773 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
1774 /*
1775 * Release the current mm_slot if this mm is about to die, or
1776 * if we scanned all vmas of this mm.
1777 */
1778 if (khugepaged_test_exit(mm) || !vma) {
1779 /*
1780 * Make sure that if mm_users is reaching zero while
1781 * khugepaged runs here, khugepaged_exit will find
1782 * mm_slot not pointing to the exiting mm.
1783 */
1784 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
1785 khugepaged_scan.mm_slot = list_entry(
1786 mm_slot->mm_node.next,
1787 struct mm_slot, mm_node);
1788 khugepaged_scan.address = 0;
1789 } else {
1790 khugepaged_scan.mm_slot = NULL;
1791 khugepaged_full_scans++;
1792 }
1793
1794 collect_mm_slot(mm_slot);
1795 }
1796
1797 return progress;
1798}
1799
1800static int khugepaged_has_work(void)
1801{
1802 return !list_empty(&khugepaged_scan.mm_head) &&
1803 khugepaged_enabled();
1804}
1805
1806static int khugepaged_wait_event(void)
1807{
1808 return !list_empty(&khugepaged_scan.mm_head) ||
1809 kthread_should_stop();
1810}
1811
1812static void khugepaged_do_scan(void)
1813{
1814 struct page *hpage = NULL;
1815 unsigned int progress = 0, pass_through_head = 0;
1816 unsigned int pages = khugepaged_pages_to_scan;
1817 bool wait = true;
1818
1819 barrier(); /* write khugepaged_pages_to_scan to local stack */
1820
1821 while (progress < pages) {
1822 if (!khugepaged_prealloc_page(&hpage, &wait))
1823 break;
1824
1825 cond_resched();
1826
1827 if (unlikely(kthread_should_stop() || try_to_freeze()))
1828 break;
1829
1830 spin_lock(&khugepaged_mm_lock);
1831 if (!khugepaged_scan.mm_slot)
1832 pass_through_head++;
1833 if (khugepaged_has_work() &&
1834 pass_through_head < 2)
1835 progress += khugepaged_scan_mm_slot(pages - progress,
1836 &hpage);
1837 else
1838 progress = pages;
1839 spin_unlock(&khugepaged_mm_lock);
1840 }
1841
1842 if (!IS_ERR_OR_NULL(hpage))
1843 put_page(hpage);
1844}
1845
1846static bool khugepaged_should_wakeup(void)
1847{
1848 return kthread_should_stop() ||
1849 time_after_eq(jiffies, khugepaged_sleep_expire);
1850}
1851
1852static void khugepaged_wait_work(void)
1853{
1854 if (khugepaged_has_work()) {
1855 const unsigned long scan_sleep_jiffies =
1856 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
1857
1858 if (!scan_sleep_jiffies)
1859 return;
1860
1861 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
1862 wait_event_freezable_timeout(khugepaged_wait,
1863 khugepaged_should_wakeup(),
1864 scan_sleep_jiffies);
1865 return;
1866 }
1867
1868 if (khugepaged_enabled())
1869 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
1870}
1871
1872static int khugepaged(void *none)
1873{
1874 struct mm_slot *mm_slot;
1875
1876 set_freezable();
1877 set_user_nice(current, MAX_NICE);
1878
1879 while (!kthread_should_stop()) {
1880 khugepaged_do_scan();
1881 khugepaged_wait_work();
1882 }
1883
1884 spin_lock(&khugepaged_mm_lock);
1885 mm_slot = khugepaged_scan.mm_slot;
1886 khugepaged_scan.mm_slot = NULL;
1887 if (mm_slot)
1888 collect_mm_slot(mm_slot);
1889 spin_unlock(&khugepaged_mm_lock);
1890 return 0;
1891}
1892
1893static void set_recommended_min_free_kbytes(void)
1894{
1895 struct zone *zone;
1896 int nr_zones = 0;
1897 unsigned long recommended_min;
1898
Joonsoo Kimb7d349c2018-04-10 16:30:27 -07001899 for_each_populated_zone(zone) {
1900 /*
1901 * We don't need to worry about fragmentation of
1902 * ZONE_MOVABLE since it only has movable pages.
1903 */
1904 if (zone_idx(zone) > gfp_zone(GFP_USER))
1905 continue;
1906
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001907 nr_zones++;
Joonsoo Kimb7d349c2018-04-10 16:30:27 -07001908 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001909
1910 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
1911 recommended_min = pageblock_nr_pages * nr_zones * 2;
1912
1913 /*
1914 * Make sure that on average at least two pageblocks are almost free
1915 * of another type, one for a migratetype to fall back to and a
1916 * second to avoid subsequent fallbacks of other types There are 3
1917 * MIGRATE_TYPES we care about.
1918 */
1919 recommended_min += pageblock_nr_pages * nr_zones *
1920 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
1921
1922 /* don't ever allow to reserve more than 5% of the lowmem */
1923 recommended_min = min(recommended_min,
1924 (unsigned long) nr_free_buffer_pages() / 20);
1925 recommended_min <<= (PAGE_SHIFT-10);
1926
1927 if (recommended_min > min_free_kbytes) {
1928 if (user_min_free_kbytes >= 0)
1929 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
1930 min_free_kbytes, recommended_min);
1931
1932 min_free_kbytes = recommended_min;
1933 }
1934 setup_per_zone_wmarks();
1935}
1936
1937int start_stop_khugepaged(void)
1938{
1939 static struct task_struct *khugepaged_thread __read_mostly;
1940 static DEFINE_MUTEX(khugepaged_mutex);
1941 int err = 0;
1942
1943 mutex_lock(&khugepaged_mutex);
1944 if (khugepaged_enabled()) {
1945 if (!khugepaged_thread)
1946 khugepaged_thread = kthread_run(khugepaged, NULL,
1947 "khugepaged");
1948 if (IS_ERR(khugepaged_thread)) {
1949 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
1950 err = PTR_ERR(khugepaged_thread);
1951 khugepaged_thread = NULL;
1952 goto fail;
1953 }
1954
1955 if (!list_empty(&khugepaged_scan.mm_head))
1956 wake_up_interruptible(&khugepaged_wait);
1957
1958 set_recommended_min_free_kbytes();
1959 } else if (khugepaged_thread) {
1960 kthread_stop(khugepaged_thread);
1961 khugepaged_thread = NULL;
1962 }
1963fail:
1964 mutex_unlock(&khugepaged_mutex);
1965 return err;
1966}