blob: aedfaf0801556d50e93265e3824b83361cd86cd2 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4#include <linux/mm.h>
5#include <linux/sched.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +01006#include <linux/sched/mm.h>
Ingo Molnarf7ccbae2017-02-08 18:51:30 +01007#include <linux/sched/coredump.h>
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07008#include <linux/mmu_notifier.h>
9#include <linux/rmap.h>
10#include <linux/swap.h>
11#include <linux/mm_inline.h>
12#include <linux/kthread.h>
13#include <linux/khugepaged.h>
14#include <linux/freezer.h>
15#include <linux/mman.h>
16#include <linux/hashtable.h>
17#include <linux/userfaultfd_k.h>
18#include <linux/page_idle.h>
19#include <linux/swapops.h>
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -070020#include <linux/shmem_fs.h>
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070021
22#include <asm/tlb.h>
23#include <asm/pgalloc.h>
24#include "internal.h"
25
26enum scan_result {
27 SCAN_FAIL,
28 SCAN_SUCCEED,
29 SCAN_PMD_NULL,
30 SCAN_EXCEED_NONE_PTE,
31 SCAN_PTE_NON_PRESENT,
32 SCAN_PAGE_RO,
Ebru Akagunduz0db501f2016-07-26 15:26:46 -070033 SCAN_LACK_REFERENCED_PAGE,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070034 SCAN_PAGE_NULL,
35 SCAN_SCAN_ABORT,
36 SCAN_PAGE_COUNT,
37 SCAN_PAGE_LRU,
38 SCAN_PAGE_LOCK,
39 SCAN_PAGE_ANON,
40 SCAN_PAGE_COMPOUND,
41 SCAN_ANY_PROCESS,
42 SCAN_VMA_NULL,
43 SCAN_VMA_CHECK,
44 SCAN_ADDRESS_RANGE,
45 SCAN_SWAP_CACHE_PAGE,
46 SCAN_DEL_PAGE_LRU,
47 SCAN_ALLOC_HUGE_PAGE_FAIL,
48 SCAN_CGROUP_CHARGE_FAIL,
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -070049 SCAN_EXCEED_SWAP_PTE,
50 SCAN_TRUNCATED,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070051};
52
53#define CREATE_TRACE_POINTS
54#include <trace/events/huge_memory.h>
55
Vijay Balakrishna94c51672020-10-10 23:16:40 -070056static struct task_struct *khugepaged_thread __read_mostly;
57static DEFINE_MUTEX(khugepaged_mutex);
58
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070059/* default scan 8*512 pte (or vmas) every 30 second */
60static unsigned int khugepaged_pages_to_scan __read_mostly;
61static unsigned int khugepaged_pages_collapsed;
62static unsigned int khugepaged_full_scans;
63static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
64/* during fragmentation poll the hugepage allocator once every minute */
65static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
66static unsigned long khugepaged_sleep_expire;
67static DEFINE_SPINLOCK(khugepaged_mm_lock);
68static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
69/*
70 * default collapse hugepages if there is at least one pte mapped like
71 * it would have happened if the vma was large enough during page
72 * fault.
73 */
74static unsigned int khugepaged_max_ptes_none __read_mostly;
75static unsigned int khugepaged_max_ptes_swap __read_mostly;
76
77#define MM_SLOTS_HASH_BITS 10
78static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
79
80static struct kmem_cache *mm_slot_cache __read_mostly;
81
82/**
83 * struct mm_slot - hash lookup from mm to mm_slot
84 * @hash: hash collision list
85 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
86 * @mm: the mm that this information is valid for
87 */
88struct mm_slot {
89 struct hlist_node hash;
90 struct list_head mm_node;
91 struct mm_struct *mm;
92};
93
94/**
95 * struct khugepaged_scan - cursor for scanning
96 * @mm_head: the head of the mm list to scan
97 * @mm_slot: the current mm_slot we are scanning
98 * @address: the next address inside that to be scanned
99 *
100 * There is only the one khugepaged_scan instance of this cursor structure.
101 */
102struct khugepaged_scan {
103 struct list_head mm_head;
104 struct mm_slot *mm_slot;
105 unsigned long address;
106};
107
108static struct khugepaged_scan khugepaged_scan = {
109 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
110};
111
Jérémy Lefauree1465d12016-11-30 15:54:02 -0800112#ifdef CONFIG_SYSFS
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700113static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
114 struct kobj_attribute *attr,
115 char *buf)
116{
117 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
118}
119
120static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
121 struct kobj_attribute *attr,
122 const char *buf, size_t count)
123{
124 unsigned long msecs;
125 int err;
126
127 err = kstrtoul(buf, 10, &msecs);
128 if (err || msecs > UINT_MAX)
129 return -EINVAL;
130
131 khugepaged_scan_sleep_millisecs = msecs;
132 khugepaged_sleep_expire = 0;
133 wake_up_interruptible(&khugepaged_wait);
134
135 return count;
136}
137static struct kobj_attribute scan_sleep_millisecs_attr =
138 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
139 scan_sleep_millisecs_store);
140
141static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
142 struct kobj_attribute *attr,
143 char *buf)
144{
145 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
146}
147
148static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
149 struct kobj_attribute *attr,
150 const char *buf, size_t count)
151{
152 unsigned long msecs;
153 int err;
154
155 err = kstrtoul(buf, 10, &msecs);
156 if (err || msecs > UINT_MAX)
157 return -EINVAL;
158
159 khugepaged_alloc_sleep_millisecs = msecs;
160 khugepaged_sleep_expire = 0;
161 wake_up_interruptible(&khugepaged_wait);
162
163 return count;
164}
165static struct kobj_attribute alloc_sleep_millisecs_attr =
166 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
167 alloc_sleep_millisecs_store);
168
169static ssize_t pages_to_scan_show(struct kobject *kobj,
170 struct kobj_attribute *attr,
171 char *buf)
172{
173 return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
174}
175static ssize_t pages_to_scan_store(struct kobject *kobj,
176 struct kobj_attribute *attr,
177 const char *buf, size_t count)
178{
179 int err;
180 unsigned long pages;
181
182 err = kstrtoul(buf, 10, &pages);
183 if (err || !pages || pages > UINT_MAX)
184 return -EINVAL;
185
186 khugepaged_pages_to_scan = pages;
187
188 return count;
189}
190static struct kobj_attribute pages_to_scan_attr =
191 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
192 pages_to_scan_store);
193
194static ssize_t pages_collapsed_show(struct kobject *kobj,
195 struct kobj_attribute *attr,
196 char *buf)
197{
198 return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
199}
200static struct kobj_attribute pages_collapsed_attr =
201 __ATTR_RO(pages_collapsed);
202
203static ssize_t full_scans_show(struct kobject *kobj,
204 struct kobj_attribute *attr,
205 char *buf)
206{
207 return sprintf(buf, "%u\n", khugepaged_full_scans);
208}
209static struct kobj_attribute full_scans_attr =
210 __ATTR_RO(full_scans);
211
212static ssize_t khugepaged_defrag_show(struct kobject *kobj,
213 struct kobj_attribute *attr, char *buf)
214{
215 return single_hugepage_flag_show(kobj, attr, buf,
216 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
217}
218static ssize_t khugepaged_defrag_store(struct kobject *kobj,
219 struct kobj_attribute *attr,
220 const char *buf, size_t count)
221{
222 return single_hugepage_flag_store(kobj, attr, buf, count,
223 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
224}
225static struct kobj_attribute khugepaged_defrag_attr =
226 __ATTR(defrag, 0644, khugepaged_defrag_show,
227 khugepaged_defrag_store);
228
229/*
230 * max_ptes_none controls if khugepaged should collapse hugepages over
231 * any unmapped ptes in turn potentially increasing the memory
232 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
233 * reduce the available free memory in the system as it
234 * runs. Increasing max_ptes_none will instead potentially reduce the
235 * free memory in the system during the khugepaged scan.
236 */
237static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
238 struct kobj_attribute *attr,
239 char *buf)
240{
241 return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
242}
243static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
244 struct kobj_attribute *attr,
245 const char *buf, size_t count)
246{
247 int err;
248 unsigned long max_ptes_none;
249
250 err = kstrtoul(buf, 10, &max_ptes_none);
251 if (err || max_ptes_none > HPAGE_PMD_NR-1)
252 return -EINVAL;
253
254 khugepaged_max_ptes_none = max_ptes_none;
255
256 return count;
257}
258static struct kobj_attribute khugepaged_max_ptes_none_attr =
259 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
260 khugepaged_max_ptes_none_store);
261
262static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
263 struct kobj_attribute *attr,
264 char *buf)
265{
266 return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
267}
268
269static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
270 struct kobj_attribute *attr,
271 const char *buf, size_t count)
272{
273 int err;
274 unsigned long max_ptes_swap;
275
276 err = kstrtoul(buf, 10, &max_ptes_swap);
277 if (err || max_ptes_swap > HPAGE_PMD_NR-1)
278 return -EINVAL;
279
280 khugepaged_max_ptes_swap = max_ptes_swap;
281
282 return count;
283}
284
285static struct kobj_attribute khugepaged_max_ptes_swap_attr =
286 __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
287 khugepaged_max_ptes_swap_store);
288
289static struct attribute *khugepaged_attr[] = {
290 &khugepaged_defrag_attr.attr,
291 &khugepaged_max_ptes_none_attr.attr,
292 &pages_to_scan_attr.attr,
293 &pages_collapsed_attr.attr,
294 &full_scans_attr.attr,
295 &scan_sleep_millisecs_attr.attr,
296 &alloc_sleep_millisecs_attr.attr,
297 &khugepaged_max_ptes_swap_attr.attr,
298 NULL,
299};
300
301struct attribute_group khugepaged_attr_group = {
302 .attrs = khugepaged_attr,
303 .name = "khugepaged",
304};
Jérémy Lefauree1465d12016-11-30 15:54:02 -0800305#endif /* CONFIG_SYSFS */
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700306
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -0700307#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700308
309int hugepage_madvise(struct vm_area_struct *vma,
310 unsigned long *vm_flags, int advice)
311{
312 switch (advice) {
313 case MADV_HUGEPAGE:
314#ifdef CONFIG_S390
315 /*
316 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
317 * can't handle this properly after s390_enable_sie, so we simply
318 * ignore the madvise to prevent qemu from causing a SIGSEGV.
319 */
320 if (mm_has_pgste(vma->vm_mm))
321 return 0;
322#endif
323 *vm_flags &= ~VM_NOHUGEPAGE;
324 *vm_flags |= VM_HUGEPAGE;
325 /*
326 * If the vma become good for khugepaged to scan,
327 * register it here without waiting a page fault that
328 * may not happen any time soon.
329 */
330 if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
331 khugepaged_enter_vma_merge(vma, *vm_flags))
332 return -ENOMEM;
333 break;
334 case MADV_NOHUGEPAGE:
335 *vm_flags &= ~VM_HUGEPAGE;
336 *vm_flags |= VM_NOHUGEPAGE;
337 /*
338 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
339 * this vma even if we leave the mm registered in khugepaged if
340 * it got registered before VM_NOHUGEPAGE was set.
341 */
342 break;
343 }
344
345 return 0;
346}
347
348int __init khugepaged_init(void)
349{
350 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
351 sizeof(struct mm_slot),
352 __alignof__(struct mm_slot), 0, NULL);
353 if (!mm_slot_cache)
354 return -ENOMEM;
355
356 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
357 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
358 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
359
360 return 0;
361}
362
363void __init khugepaged_destroy(void)
364{
365 kmem_cache_destroy(mm_slot_cache);
366}
367
368static inline struct mm_slot *alloc_mm_slot(void)
369{
370 if (!mm_slot_cache) /* initialization failed */
371 return NULL;
372 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
373}
374
375static inline void free_mm_slot(struct mm_slot *mm_slot)
376{
377 kmem_cache_free(mm_slot_cache, mm_slot);
378}
379
380static struct mm_slot *get_mm_slot(struct mm_struct *mm)
381{
382 struct mm_slot *mm_slot;
383
384 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
385 if (mm == mm_slot->mm)
386 return mm_slot;
387
388 return NULL;
389}
390
391static void insert_to_mm_slots_hash(struct mm_struct *mm,
392 struct mm_slot *mm_slot)
393{
394 mm_slot->mm = mm;
395 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
396}
397
398static inline int khugepaged_test_exit(struct mm_struct *mm)
399{
Hugh Dickins17c08ee2020-08-06 23:26:25 -0700400 return atomic_read(&mm->mm_users) == 0 || !mmget_still_valid(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700401}
402
Song Liu50f8b922018-08-17 15:47:00 -0700403static bool hugepage_vma_check(struct vm_area_struct *vma,
404 unsigned long vm_flags)
Yang Shic2231022018-08-17 15:45:26 -0700405{
Song Liu50f8b922018-08-17 15:47:00 -0700406 if ((!(vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
407 (vm_flags & VM_NOHUGEPAGE) ||
Yang Shic2231022018-08-17 15:45:26 -0700408 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
409 return false;
410 if (shmem_file(vma->vm_file)) {
411 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
412 return false;
413 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
414 HPAGE_PMD_NR);
415 }
416 if (!vma->anon_vma || vma->vm_ops)
417 return false;
418 if (is_vma_temporary_stack(vma))
419 return false;
Song Liu50f8b922018-08-17 15:47:00 -0700420 return !(vm_flags & VM_NO_KHUGEPAGED);
Yang Shic2231022018-08-17 15:45:26 -0700421}
422
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700423int __khugepaged_enter(struct mm_struct *mm)
424{
425 struct mm_slot *mm_slot;
426 int wakeup;
427
428 mm_slot = alloc_mm_slot();
429 if (!mm_slot)
430 return -ENOMEM;
431
432 /* __khugepaged_exit() must not run from under us */
Hugh Dickins2ef7ebb2020-08-20 17:42:02 -0700433 VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700434 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
435 free_mm_slot(mm_slot);
436 return 0;
437 }
438
439 spin_lock(&khugepaged_mm_lock);
440 insert_to_mm_slots_hash(mm, mm_slot);
441 /*
442 * Insert just behind the scanning cursor, to let the area settle
443 * down a little.
444 */
445 wakeup = list_empty(&khugepaged_scan.mm_head);
446 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
447 spin_unlock(&khugepaged_mm_lock);
448
Vegard Nossumf1f10072017-02-27 14:30:07 -0800449 mmgrab(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700450 if (wakeup)
451 wake_up_interruptible(&khugepaged_wait);
452
453 return 0;
454}
455
456int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
457 unsigned long vm_flags)
458{
459 unsigned long hstart, hend;
Yang Shic2231022018-08-17 15:45:26 -0700460
461 /*
462 * khugepaged does not yet work on non-shmem files or special
463 * mappings. And file-private shmem THP is not supported.
464 */
Song Liu50f8b922018-08-17 15:47:00 -0700465 if (!hugepage_vma_check(vma, vm_flags))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700466 return 0;
Yang Shic2231022018-08-17 15:45:26 -0700467
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700468 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
469 hend = vma->vm_end & HPAGE_PMD_MASK;
470 if (hstart < hend)
471 return khugepaged_enter(vma, vm_flags);
472 return 0;
473}
474
475void __khugepaged_exit(struct mm_struct *mm)
476{
477 struct mm_slot *mm_slot;
478 int free = 0;
479
480 spin_lock(&khugepaged_mm_lock);
481 mm_slot = get_mm_slot(mm);
482 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
483 hash_del(&mm_slot->hash);
484 list_del(&mm_slot->mm_node);
485 free = 1;
486 }
487 spin_unlock(&khugepaged_mm_lock);
488
489 if (free) {
490 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
491 free_mm_slot(mm_slot);
492 mmdrop(mm);
493 } else if (mm_slot) {
494 /*
495 * This is required to serialize against
496 * khugepaged_test_exit() (which is guaranteed to run
497 * under mmap sem read mode). Stop here (after we
498 * return all pagetables will be destroyed) until
499 * khugepaged has finished working on the pagetables
500 * under the mmap_sem.
501 */
502 down_write(&mm->mmap_sem);
503 up_write(&mm->mmap_sem);
504 }
505}
506
507static void release_pte_page(struct page *page)
508{
Shaohua Lid44d3632017-05-03 14:52:26 -0700509 dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page));
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700510 unlock_page(page);
511 putback_lru_page(page);
512}
513
514static void release_pte_pages(pte_t *pte, pte_t *_pte)
515{
516 while (--_pte >= pte) {
517 pte_t pteval = *_pte;
518 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)))
519 release_pte_page(pte_page(pteval));
520 }
521}
522
523static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
524 unsigned long address,
525 pte_t *pte)
526{
527 struct page *page = NULL;
528 pte_t *_pte;
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700529 int none_or_zero = 0, result = 0, referenced = 0;
530 bool writable = false;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700531
532 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
533 _pte++, address += PAGE_SIZE) {
534 pte_t pteval = *_pte;
535 if (pte_none(pteval) || (pte_present(pteval) &&
536 is_zero_pfn(pte_pfn(pteval)))) {
537 if (!userfaultfd_armed(vma) &&
538 ++none_or_zero <= khugepaged_max_ptes_none) {
539 continue;
540 } else {
541 result = SCAN_EXCEED_NONE_PTE;
542 goto out;
543 }
544 }
545 if (!pte_present(pteval)) {
546 result = SCAN_PTE_NON_PRESENT;
547 goto out;
548 }
549 page = vm_normal_page(vma, address, pteval);
550 if (unlikely(!page)) {
551 result = SCAN_PAGE_NULL;
552 goto out;
553 }
554
Kirill A. Shutemovfece2022018-03-22 16:17:28 -0700555 /* TODO: teach khugepaged to collapse THP mapped with pte */
556 if (PageCompound(page)) {
557 result = SCAN_PAGE_COMPOUND;
558 goto out;
559 }
560
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700561 VM_BUG_ON_PAGE(!PageAnon(page), page);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700562
563 /*
564 * We can do it before isolate_lru_page because the
565 * page can't be freed from under us. NOTE: PG_lock
566 * is needed to serialize against split_huge_page
567 * when invoked from the VM.
568 */
569 if (!trylock_page(page)) {
570 result = SCAN_PAGE_LOCK;
571 goto out;
572 }
573
574 /*
575 * cannot use mapcount: can't collapse if there's a gup pin.
576 * The page must only be referenced by the scanned process
577 * and page swap cache.
578 */
Minchan Kim2948be52017-05-03 14:53:35 -0700579 if (page_count(page) != 1 + PageSwapCache(page)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700580 unlock_page(page);
581 result = SCAN_PAGE_COUNT;
582 goto out;
583 }
584 if (pte_write(pteval)) {
585 writable = true;
586 } else {
587 if (PageSwapCache(page) &&
588 !reuse_swap_page(page, NULL)) {
589 unlock_page(page);
590 result = SCAN_SWAP_CACHE_PAGE;
591 goto out;
592 }
593 /*
594 * Page is not in the swap cache. It can be collapsed
595 * into a THP.
596 */
597 }
598
599 /*
600 * Isolate the page to avoid collapsing an hugepage
601 * currently in use by the VM.
602 */
603 if (isolate_lru_page(page)) {
604 unlock_page(page);
605 result = SCAN_DEL_PAGE_LRU;
606 goto out;
607 }
Shaohua Lid44d3632017-05-03 14:52:26 -0700608 inc_node_page_state(page,
609 NR_ISOLATED_ANON + page_is_file_cache(page));
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700610 VM_BUG_ON_PAGE(!PageLocked(page), page);
611 VM_BUG_ON_PAGE(PageLRU(page), page);
612
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700613 /* There should be enough young pte to collapse the page */
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700614 if (pte_young(pteval) ||
615 page_is_young(page) || PageReferenced(page) ||
616 mmu_notifier_test_young(vma->vm_mm, address))
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700617 referenced++;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700618 }
619 if (likely(writable)) {
620 if (likely(referenced)) {
621 result = SCAN_SUCCEED;
622 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
623 referenced, writable, result);
624 return 1;
625 }
626 } else {
627 result = SCAN_PAGE_RO;
628 }
629
630out:
631 release_pte_pages(pte, _pte);
632 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
633 referenced, writable, result);
634 return 0;
635}
636
637static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
638 struct vm_area_struct *vma,
639 unsigned long address,
640 spinlock_t *ptl)
641{
642 pte_t *_pte;
David Rientjes338a16b2017-05-12 15:47:03 -0700643 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
644 _pte++, page++, address += PAGE_SIZE) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700645 pte_t pteval = *_pte;
646 struct page *src_page;
647
648 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
649 clear_user_highpage(page, address);
650 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
651 if (is_zero_pfn(pte_pfn(pteval))) {
652 /*
653 * ptl mostly unnecessary.
654 */
655 spin_lock(ptl);
656 /*
657 * paravirt calls inside pte_clear here are
658 * superfluous.
659 */
660 pte_clear(vma->vm_mm, address, _pte);
661 spin_unlock(ptl);
662 }
663 } else {
664 src_page = pte_page(pteval);
665 copy_user_highpage(page, src_page, address, vma);
666 VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
667 release_pte_page(src_page);
668 /*
669 * ptl mostly unnecessary, but preempt has to
670 * be disabled to update the per-cpu stats
671 * inside page_remove_rmap().
672 */
673 spin_lock(ptl);
674 /*
675 * paravirt calls inside pte_clear here are
676 * superfluous.
677 */
678 pte_clear(vma->vm_mm, address, _pte);
679 page_remove_rmap(src_page, false);
680 spin_unlock(ptl);
681 free_page_and_swap_cache(src_page);
682 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700683 }
684}
685
686static void khugepaged_alloc_sleep(void)
687{
688 DEFINE_WAIT(wait);
689
690 add_wait_queue(&khugepaged_wait, &wait);
691 freezable_schedule_timeout_interruptible(
692 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
693 remove_wait_queue(&khugepaged_wait, &wait);
694}
695
696static int khugepaged_node_load[MAX_NUMNODES];
697
698static bool khugepaged_scan_abort(int nid)
699{
700 int i;
701
702 /*
Mel Gormana5f5f912016-07-28 15:46:32 -0700703 * If node_reclaim_mode is disabled, then no extra effort is made to
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700704 * allocate memory locally.
705 */
Mel Gormana5f5f912016-07-28 15:46:32 -0700706 if (!node_reclaim_mode)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700707 return false;
708
709 /* If there is a count for this node already, it must be acceptable */
710 if (khugepaged_node_load[nid])
711 return false;
712
713 for (i = 0; i < MAX_NUMNODES; i++) {
714 if (!khugepaged_node_load[i])
715 continue;
716 if (node_distance(nid, i) > RECLAIM_DISTANCE)
717 return true;
718 }
719 return false;
720}
721
722/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
723static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
724{
Vlastimil Babka25160352016-07-28 15:49:25 -0700725 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700726}
727
728#ifdef CONFIG_NUMA
729static int khugepaged_find_target_node(void)
730{
731 static int last_khugepaged_target_node = NUMA_NO_NODE;
732 int nid, target_node = 0, max_value = 0;
733
734 /* find first node with max normal pages hit */
735 for (nid = 0; nid < MAX_NUMNODES; nid++)
736 if (khugepaged_node_load[nid] > max_value) {
737 max_value = khugepaged_node_load[nid];
738 target_node = nid;
739 }
740
741 /* do some balance if several nodes have the same hit record */
742 if (target_node <= last_khugepaged_target_node)
743 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
744 nid++)
745 if (max_value == khugepaged_node_load[nid]) {
746 target_node = nid;
747 break;
748 }
749
750 last_khugepaged_target_node = target_node;
751 return target_node;
752}
753
754static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
755{
756 if (IS_ERR(*hpage)) {
757 if (!*wait)
758 return false;
759
760 *wait = false;
761 *hpage = NULL;
762 khugepaged_alloc_sleep();
763 } else if (*hpage) {
764 put_page(*hpage);
765 *hpage = NULL;
766 }
767
768 return true;
769}
770
771static struct page *
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -0700772khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700773{
774 VM_BUG_ON_PAGE(*hpage, *hpage);
775
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700776 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
777 if (unlikely(!*hpage)) {
778 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
779 *hpage = ERR_PTR(-ENOMEM);
780 return NULL;
781 }
782
783 prep_transhuge_page(*hpage);
784 count_vm_event(THP_COLLAPSE_ALLOC);
785 return *hpage;
786}
787#else
788static int khugepaged_find_target_node(void)
789{
790 return 0;
791}
792
793static inline struct page *alloc_khugepaged_hugepage(void)
794{
795 struct page *page;
796
797 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
798 HPAGE_PMD_ORDER);
799 if (page)
800 prep_transhuge_page(page);
801 return page;
802}
803
804static struct page *khugepaged_alloc_hugepage(bool *wait)
805{
806 struct page *hpage;
807
808 do {
809 hpage = alloc_khugepaged_hugepage();
810 if (!hpage) {
811 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
812 if (!*wait)
813 return NULL;
814
815 *wait = false;
816 khugepaged_alloc_sleep();
817 } else
818 count_vm_event(THP_COLLAPSE_ALLOC);
819 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
820
821 return hpage;
822}
823
824static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
825{
Hugh Dickinsfbe96d52020-10-09 20:07:59 -0700826 /*
827 * If the hpage allocated earlier was briefly exposed in page cache
828 * before collapse_file() failed, it is possible that racing lookups
829 * have not yet completed, and would then be unpleasantly surprised by
830 * finding the hpage reused for the same mapping at a different offset.
831 * Just release the previous allocation if there is any danger of that.
832 */
833 if (*hpage && page_count(*hpage) > 1) {
834 put_page(*hpage);
835 *hpage = NULL;
836 }
837
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700838 if (!*hpage)
839 *hpage = khugepaged_alloc_hugepage(wait);
840
841 if (unlikely(!*hpage))
842 return false;
843
844 return true;
845}
846
847static struct page *
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -0700848khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700849{
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700850 VM_BUG_ON(!*hpage);
851
852 return *hpage;
853}
854#endif
855
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700856/*
857 * If mmap_sem temporarily dropped, revalidate vma
858 * before taking mmap_sem.
859 * Return 0 if succeeds, otherwise return none-zero
860 * value (scan code).
861 */
862
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700863static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
864 struct vm_area_struct **vmap)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700865{
866 struct vm_area_struct *vma;
867 unsigned long hstart, hend;
868
869 if (unlikely(khugepaged_test_exit(mm)))
870 return SCAN_ANY_PROCESS;
871
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700872 *vmap = vma = find_vma(mm, address);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700873 if (!vma)
874 return SCAN_VMA_NULL;
875
876 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
877 hend = vma->vm_end & HPAGE_PMD_MASK;
878 if (address < hstart || address + HPAGE_PMD_SIZE > hend)
879 return SCAN_ADDRESS_RANGE;
Song Liu50f8b922018-08-17 15:47:00 -0700880 if (!hugepage_vma_check(vma, vma->vm_flags))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700881 return SCAN_VMA_CHECK;
882 return 0;
883}
884
885/*
886 * Bring missing pages in from swap, to complete THP collapse.
887 * Only done if khugepaged_scan_pmd believes it is worthwhile.
888 *
889 * Called and returns without pte mapped or spinlocks held,
890 * but with mmap_sem held to protect against vma changes.
891 */
892
893static bool __collapse_huge_page_swapin(struct mm_struct *mm,
894 struct vm_area_struct *vma,
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700895 unsigned long address, pmd_t *pmd,
896 int referenced)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700897{
Souptick Joarder2b740302018-08-23 17:01:36 -0700898 int swapped_in = 0;
899 vm_fault_t ret = 0;
Jan Kara82b0f8c2016-12-14 15:06:58 -0800900 struct vm_fault vmf = {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700901 .vma = vma,
902 .address = address,
903 .flags = FAULT_FLAG_ALLOW_RETRY,
904 .pmd = pmd,
Jan Kara0721ec82016-12-14 15:07:04 -0800905 .pgoff = linear_page_index(vma, address),
Laurent Dufour47e3eb12018-04-17 16:33:18 +0200906 .vma_flags = vma->vm_flags,
907 .vma_page_prot = vma->vm_page_prot,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700908 };
909
Ebru Akagunduz982785c2016-09-19 14:44:04 -0700910 /* we only decide to swapin, if there is enough young ptes */
911 if (referenced < HPAGE_PMD_NR/2) {
912 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
913 return false;
914 }
Jan Kara82b0f8c2016-12-14 15:06:58 -0800915 vmf.pte = pte_offset_map(pmd, address);
916 for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
917 vmf.pte++, vmf.address += PAGE_SIZE) {
Jan Kara29943022016-12-14 15:07:16 -0800918 vmf.orig_pte = *vmf.pte;
919 if (!is_swap_pte(vmf.orig_pte))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700920 continue;
921 swapped_in++;
Jan Kara29943022016-12-14 15:07:16 -0800922 ret = do_swap_page(&vmf);
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700923
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700924 /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
925 if (ret & VM_FAULT_RETRY) {
926 down_read(&mm->mmap_sem);
Jan Kara82b0f8c2016-12-14 15:06:58 -0800927 if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
Ebru Akagunduz47f863e2016-07-26 15:26:43 -0700928 /* vma is no longer available, don't continue to swapin */
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700929 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700930 return false;
Ebru Akagunduz47f863e2016-07-26 15:26:43 -0700931 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700932 /* check if the pmd is still valid */
SeongJae Park835152a2017-05-12 15:46:38 -0700933 if (mm_find_pmd(mm, address) != pmd) {
934 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700935 return false;
SeongJae Park835152a2017-05-12 15:46:38 -0700936 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700937 }
938 if (ret & VM_FAULT_ERROR) {
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700939 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700940 return false;
941 }
942 /* pte is unmapped now, we need to map it */
Jan Kara82b0f8c2016-12-14 15:06:58 -0800943 vmf.pte = pte_offset_map(pmd, vmf.address);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700944 }
Jan Kara82b0f8c2016-12-14 15:06:58 -0800945 vmf.pte--;
946 pte_unmap(vmf.pte);
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700947 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700948 return true;
949}
950
951static void collapse_huge_page(struct mm_struct *mm,
952 unsigned long address,
953 struct page **hpage,
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700954 int node, int referenced)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700955{
956 pmd_t *pmd, _pmd;
957 pte_t *pte;
958 pgtable_t pgtable;
959 struct page *new_page;
960 spinlock_t *pmd_ptl, *pte_ptl;
961 int isolated = 0, result = 0;
962 struct mem_cgroup *memcg;
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700963 struct vm_area_struct *vma;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700964 unsigned long mmun_start; /* For mmu_notifiers */
965 unsigned long mmun_end; /* For mmu_notifiers */
966 gfp_t gfp;
967
968 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
969
970 /* Only allocate from the target node */
Michal Hocko41b61672017-01-10 16:57:42 -0800971 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700972
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -0700973 /*
974 * Before allocating the hugepage, release the mmap_sem read lock.
975 * The allocation can take potentially a long time if it involves
976 * sync compaction, and we do not need to hold the mmap_sem during
977 * that. We will recheck the vma after taking it again in write mode.
978 */
979 up_read(&mm->mmap_sem);
980 new_page = khugepaged_alloc_page(hpage, gfp, node);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700981 if (!new_page) {
982 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
983 goto out_nolock;
984 }
985
Michal Hocko2a70f6a2018-04-10 16:29:30 -0700986 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700987 result = SCAN_CGROUP_CHARGE_FAIL;
988 goto out_nolock;
989 }
990
991 down_read(&mm->mmap_sem);
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700992 result = hugepage_vma_revalidate(mm, address, &vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700993 if (result) {
994 mem_cgroup_cancel_charge(new_page, memcg, true);
995 up_read(&mm->mmap_sem);
996 goto out_nolock;
997 }
998
999 pmd = mm_find_pmd(mm, address);
1000 if (!pmd) {
1001 result = SCAN_PMD_NULL;
1002 mem_cgroup_cancel_charge(new_page, memcg, true);
1003 up_read(&mm->mmap_sem);
1004 goto out_nolock;
1005 }
1006
1007 /*
1008 * __collapse_huge_page_swapin always returns with mmap_sem locked.
Ebru Akagunduz47f863e2016-07-26 15:26:43 -07001009 * If it fails, we release mmap_sem and jump out_nolock.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001010 * Continuing to collapse causes inconsistency.
1011 */
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001012 if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001013 mem_cgroup_cancel_charge(new_page, memcg, true);
1014 up_read(&mm->mmap_sem);
1015 goto out_nolock;
1016 }
1017
1018 up_read(&mm->mmap_sem);
1019 /*
1020 * Prevent all access to pagetables with the exception of
1021 * gup_fast later handled by the ptep_clear_flush and the VM
1022 * handled by the anon_vma lock + PG_lock.
1023 */
1024 down_write(&mm->mmap_sem);
Kirill A. Shutemovc131f752016-09-19 14:44:01 -07001025 result = hugepage_vma_revalidate(mm, address, &vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001026 if (result)
1027 goto out;
1028 /* check if the pmd is still valid */
1029 if (mm_find_pmd(mm, address) != pmd)
1030 goto out;
1031
Laurent Dufour3cfc37d2018-04-17 16:33:15 +02001032 vm_write_begin(vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001033 anon_vma_lock_write(vma->anon_vma);
1034
1035 pte = pte_offset_map(pmd, address);
1036 pte_ptl = pte_lockptr(mm, pmd);
1037
1038 mmun_start = address;
1039 mmun_end = address + HPAGE_PMD_SIZE;
1040 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1041 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1042 /*
1043 * After this gup_fast can't run anymore. This also removes
1044 * any huge TLB entry from the CPU so we won't allow
1045 * huge and small TLB entries for the same virtual address
1046 * to avoid the risk of CPU bugs in that area.
1047 */
1048 _pmd = pmdp_collapse_flush(vma, address, pmd);
1049 spin_unlock(pmd_ptl);
1050 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1051
1052 spin_lock(pte_ptl);
1053 isolated = __collapse_huge_page_isolate(vma, address, pte);
1054 spin_unlock(pte_ptl);
1055
1056 if (unlikely(!isolated)) {
1057 pte_unmap(pte);
1058 spin_lock(pmd_ptl);
1059 BUG_ON(!pmd_none(*pmd));
1060 /*
1061 * We can only use set_pmd_at when establishing
1062 * hugepmds and never for establishing regular pmds that
1063 * points to regular pagetables. Use pmd_populate for that
1064 */
1065 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1066 spin_unlock(pmd_ptl);
1067 anon_vma_unlock_write(vma->anon_vma);
Laurent Dufour3cfc37d2018-04-17 16:33:15 +02001068 vm_write_end(vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001069 result = SCAN_FAIL;
1070 goto out;
1071 }
1072
1073 /*
1074 * All pages are isolated and locked so anon_vma rmap
1075 * can't run anymore.
1076 */
1077 anon_vma_unlock_write(vma->anon_vma);
1078
1079 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
1080 pte_unmap(pte);
1081 __SetPageUptodate(new_page);
1082 pgtable = pmd_pgtable(_pmd);
1083
1084 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
Linus Torvaldsf55e1012017-11-29 09:01:01 -08001085 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001086
1087 /*
1088 * spin_lock() below is not the equivalent of smp_wmb(), so
1089 * this is needed to avoid the copy_huge_page writes to become
1090 * visible after the set_pmd_at() write.
1091 */
1092 smp_wmb();
1093
1094 spin_lock(pmd_ptl);
1095 BUG_ON(!pmd_none(*pmd));
1096 page_add_new_anon_rmap(new_page, vma, address, true);
1097 mem_cgroup_commit_charge(new_page, memcg, false, true);
1098 lru_cache_add_active_or_unevictable(new_page, vma);
1099 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1100 set_pmd_at(mm, address, pmd, _pmd);
1101 update_mmu_cache_pmd(vma, address, pmd);
1102 spin_unlock(pmd_ptl);
Laurent Dufour3cfc37d2018-04-17 16:33:15 +02001103 vm_write_end(vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001104
1105 *hpage = NULL;
1106
1107 khugepaged_pages_collapsed++;
1108 result = SCAN_SUCCEED;
1109out_up_write:
1110 up_write(&mm->mmap_sem);
1111out_nolock:
1112 trace_mm_collapse_huge_page(mm, isolated, result);
1113 return;
1114out:
1115 mem_cgroup_cancel_charge(new_page, memcg, true);
1116 goto out_up_write;
1117}
1118
1119static int khugepaged_scan_pmd(struct mm_struct *mm,
1120 struct vm_area_struct *vma,
1121 unsigned long address,
1122 struct page **hpage)
1123{
1124 pmd_t *pmd;
1125 pte_t *pte, *_pte;
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001126 int ret = 0, none_or_zero = 0, result = 0, referenced = 0;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001127 struct page *page = NULL;
1128 unsigned long _address;
1129 spinlock_t *ptl;
1130 int node = NUMA_NO_NODE, unmapped = 0;
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001131 bool writable = false;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001132
1133 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1134
1135 pmd = mm_find_pmd(mm, address);
1136 if (!pmd) {
1137 result = SCAN_PMD_NULL;
1138 goto out;
1139 }
1140
1141 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1142 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1143 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1144 _pte++, _address += PAGE_SIZE) {
1145 pte_t pteval = *_pte;
1146 if (is_swap_pte(pteval)) {
1147 if (++unmapped <= khugepaged_max_ptes_swap) {
1148 continue;
1149 } else {
1150 result = SCAN_EXCEED_SWAP_PTE;
1151 goto out_unmap;
1152 }
1153 }
1154 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1155 if (!userfaultfd_armed(vma) &&
1156 ++none_or_zero <= khugepaged_max_ptes_none) {
1157 continue;
1158 } else {
1159 result = SCAN_EXCEED_NONE_PTE;
1160 goto out_unmap;
1161 }
1162 }
1163 if (!pte_present(pteval)) {
1164 result = SCAN_PTE_NON_PRESENT;
1165 goto out_unmap;
1166 }
1167 if (pte_write(pteval))
1168 writable = true;
1169
1170 page = vm_normal_page(vma, _address, pteval);
1171 if (unlikely(!page)) {
1172 result = SCAN_PAGE_NULL;
1173 goto out_unmap;
1174 }
1175
1176 /* TODO: teach khugepaged to collapse THP mapped with pte */
1177 if (PageCompound(page)) {
1178 result = SCAN_PAGE_COMPOUND;
1179 goto out_unmap;
1180 }
1181
1182 /*
1183 * Record which node the original page is from and save this
1184 * information to khugepaged_node_load[].
1185 * Khupaged will allocate hugepage from the node has the max
1186 * hit record.
1187 */
1188 node = page_to_nid(page);
1189 if (khugepaged_scan_abort(node)) {
1190 result = SCAN_SCAN_ABORT;
1191 goto out_unmap;
1192 }
1193 khugepaged_node_load[node]++;
1194 if (!PageLRU(page)) {
1195 result = SCAN_PAGE_LRU;
1196 goto out_unmap;
1197 }
1198 if (PageLocked(page)) {
1199 result = SCAN_PAGE_LOCK;
1200 goto out_unmap;
1201 }
1202 if (!PageAnon(page)) {
1203 result = SCAN_PAGE_ANON;
1204 goto out_unmap;
1205 }
1206
1207 /*
1208 * cannot use mapcount: can't collapse if there's a gup pin.
1209 * The page must only be referenced by the scanned process
1210 * and page swap cache.
1211 */
Minchan Kim2948be52017-05-03 14:53:35 -07001212 if (page_count(page) != 1 + PageSwapCache(page)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001213 result = SCAN_PAGE_COUNT;
1214 goto out_unmap;
1215 }
1216 if (pte_young(pteval) ||
1217 page_is_young(page) || PageReferenced(page) ||
1218 mmu_notifier_test_young(vma->vm_mm, address))
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001219 referenced++;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001220 }
1221 if (writable) {
1222 if (referenced) {
1223 result = SCAN_SUCCEED;
1224 ret = 1;
1225 } else {
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001226 result = SCAN_LACK_REFERENCED_PAGE;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001227 }
1228 } else {
1229 result = SCAN_PAGE_RO;
1230 }
1231out_unmap:
1232 pte_unmap_unlock(pte, ptl);
1233 if (ret) {
1234 node = khugepaged_find_target_node();
1235 /* collapse_huge_page will return with the mmap_sem released */
Kirill A. Shutemovc131f752016-09-19 14:44:01 -07001236 collapse_huge_page(mm, address, hpage, node, referenced);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001237 }
1238out:
1239 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1240 none_or_zero, result, unmapped);
1241 return ret;
1242}
1243
1244static void collect_mm_slot(struct mm_slot *mm_slot)
1245{
1246 struct mm_struct *mm = mm_slot->mm;
1247
1248 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
1249
1250 if (khugepaged_test_exit(mm)) {
1251 /* free mm_slot */
1252 hash_del(&mm_slot->hash);
1253 list_del(&mm_slot->mm_node);
1254
1255 /*
1256 * Not strictly needed because the mm exited already.
1257 *
1258 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1259 */
1260
1261 /* khugepaged_mm_lock actually not necessary for the below */
1262 free_mm_slot(mm_slot);
1263 mmdrop(mm);
1264 }
1265}
1266
Kirill A. Shutemove496cf32016-07-26 15:26:35 -07001267#if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001268static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1269{
1270 struct vm_area_struct *vma;
Hugh Dickins2406c452020-08-06 23:26:22 -07001271 struct mm_struct *mm;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001272 unsigned long addr;
1273 pmd_t *pmd, _pmd;
1274
1275 i_mmap_lock_write(mapping);
1276 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1277 /* probably overkill */
1278 if (vma->anon_vma)
1279 continue;
1280 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1281 if (addr & ~HPAGE_PMD_MASK)
1282 continue;
1283 if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1284 continue;
Hugh Dickins2406c452020-08-06 23:26:22 -07001285 mm = vma->vm_mm;
1286 pmd = mm_find_pmd(mm, addr);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001287 if (!pmd)
1288 continue;
1289 /*
1290 * We need exclusive mmap_sem to retract page table.
1291 * If trylock fails we would end up with pte-mapped THP after
1292 * re-fault. Not ideal, but it's more important to not disturb
1293 * the system too much.
1294 */
Hugh Dickins2406c452020-08-06 23:26:22 -07001295 if (down_write_trylock(&mm->mmap_sem)) {
1296 if (!khugepaged_test_exit(mm)) {
1297 spinlock_t *ptl = pmd_lock(mm, pmd);
1298 /* assume page table is clear */
1299 _pmd = pmdp_collapse_flush(vma, addr, pmd);
1300 spin_unlock(ptl);
1301 mm_dec_nr_ptes(mm);
1302 pte_free(mm, pmd_pgtable(_pmd));
1303 }
1304 up_write(&mm->mmap_sem);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001305 }
1306 }
1307 i_mmap_unlock_write(mapping);
1308}
1309
1310/**
1311 * collapse_shmem - collapse small tmpfs/shmem pages into huge one.
1312 *
1313 * Basic scheme is simple, details are more complex:
Hugh Dickinsaf24c012018-11-30 14:10:43 -08001314 * - allocate and lock a new huge page;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001315 * - scan over radix tree replacing old pages the new one
1316 * + swap in pages if necessary;
1317 * + fill in gaps;
1318 * + keep old pages around in case if rollback is required;
1319 * - if replacing succeed:
1320 * + copy data over;
1321 * + free old pages;
Hugh Dickinsaf24c012018-11-30 14:10:43 -08001322 * + unlock huge page;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001323 * - if replacing failed;
1324 * + put all pages back and unfreeze them;
1325 * + restore gaps in the radix-tree;
Hugh Dickinsaf24c012018-11-30 14:10:43 -08001326 * + unlock and free huge page;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001327 */
1328static void collapse_shmem(struct mm_struct *mm,
1329 struct address_space *mapping, pgoff_t start,
1330 struct page **hpage, int node)
1331{
1332 gfp_t gfp;
1333 struct page *page, *new_page, *tmp;
1334 struct mem_cgroup *memcg;
1335 pgoff_t index, end = start + HPAGE_PMD_NR;
1336 LIST_HEAD(pagelist);
1337 struct radix_tree_iter iter;
1338 void **slot;
1339 int nr_none = 0, result = SCAN_SUCCEED;
1340
1341 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1342
1343 /* Only allocate from the target node */
Michal Hocko41b61672017-01-10 16:57:42 -08001344 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001345
1346 new_page = khugepaged_alloc_page(hpage, gfp, node);
1347 if (!new_page) {
1348 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1349 goto out;
1350 }
1351
Michal Hocko2a70f6a2018-04-10 16:29:30 -07001352 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001353 result = SCAN_CGROUP_CHARGE_FAIL;
1354 goto out;
1355 }
1356
Hugh Dickins3e9646c2018-11-30 14:10:39 -08001357 __SetPageLocked(new_page);
1358 __SetPageSwapBacked(new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001359 new_page->index = start;
1360 new_page->mapping = mapping;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001361
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001362 /*
Hugh Dickinsaf24c012018-11-30 14:10:43 -08001363 * At this point the new_page is locked and not up-to-date.
1364 * It's safe to insert it into the page cache, because nobody would
1365 * be able to map it or use it in another way until we unlock it.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001366 */
1367
1368 index = start;
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001369 xa_lock_irq(&mapping->i_pages);
1370 radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001371 int n = min(iter.index, end) - index;
1372
1373 /*
Hugh Dickins8797f2f2018-11-30 14:10:25 -08001374 * Stop if extent has been hole-punched, and is now completely
1375 * empty (the more obvious i_size_read() check would take an
1376 * irq-unsafe seqlock on 32-bit).
1377 */
1378 if (n >= HPAGE_PMD_NR) {
1379 result = SCAN_TRUNCATED;
1380 goto tree_locked;
1381 }
1382
1383 /*
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001384 * Handle holes in the radix tree: charge it from shmem and
1385 * insert relevant subpage of new_page into the radix-tree.
1386 */
1387 if (n && !shmem_charge(mapping->host, n)) {
1388 result = SCAN_FAIL;
Hugh Dickins3e9646c2018-11-30 14:10:39 -08001389 goto tree_locked;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001390 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001391 for (; index < min(iter.index, end); index++) {
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001392 radix_tree_insert(&mapping->i_pages, index,
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001393 new_page + (index % HPAGE_PMD_NR));
1394 }
Hugh Dickins3e9646c2018-11-30 14:10:39 -08001395 nr_none += n;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001396
1397 /* We are done. */
1398 if (index >= end)
1399 break;
1400
1401 page = radix_tree_deref_slot_protected(slot,
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001402 &mapping->i_pages.xa_lock);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001403 if (radix_tree_exceptional_entry(page) || !PageUptodate(page)) {
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001404 xa_unlock_irq(&mapping->i_pages);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001405 /* swap in or instantiate fallocated page */
1406 if (shmem_getpage(mapping->host, index, &page,
1407 SGP_NOHUGE)) {
1408 result = SCAN_FAIL;
1409 goto tree_unlocked;
1410 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001411 } else if (trylock_page(page)) {
1412 get_page(page);
Hugh Dickins3e9646c2018-11-30 14:10:39 -08001413 xa_unlock_irq(&mapping->i_pages);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001414 } else {
1415 result = SCAN_PAGE_LOCK;
Hugh Dickins3e9646c2018-11-30 14:10:39 -08001416 goto tree_locked;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001417 }
1418
1419 /*
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001420 * The page must be locked, so we can drop the i_pages lock
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001421 * without racing with truncate.
1422 */
1423 VM_BUG_ON_PAGE(!PageLocked(page), page);
1424 VM_BUG_ON_PAGE(!PageUptodate(page), page);
Hugh Dickins8b37c402018-11-30 14:10:47 -08001425
1426 /*
1427 * If file was truncated then extended, or hole-punched, before
1428 * we locked the first page, then a THP might be there already.
1429 */
1430 if (PageTransCompound(page)) {
1431 result = SCAN_PAGE_COMPOUND;
1432 goto out_unlock;
1433 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001434
1435 if (page_mapping(page) != mapping) {
1436 result = SCAN_TRUNCATED;
1437 goto out_unlock;
1438 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001439
1440 if (isolate_lru_page(page)) {
1441 result = SCAN_DEL_PAGE_LRU;
Hugh Dickins3e9646c2018-11-30 14:10:39 -08001442 goto out_unlock;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001443 }
1444
1445 if (page_mapped(page))
Matthew Wilcox977fbdc2018-01-31 16:17:36 -08001446 unmap_mapping_pages(mapping, index, 1, false);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001447
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001448 xa_lock_irq(&mapping->i_pages);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001449
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001450 slot = radix_tree_lookup_slot(&mapping->i_pages, index);
Johannes Weiner91a45f72016-12-12 16:43:32 -08001451 VM_BUG_ON_PAGE(page != radix_tree_deref_slot_protected(slot,
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001452 &mapping->i_pages.xa_lock), page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001453 VM_BUG_ON_PAGE(page_mapped(page), page);
1454
1455 /*
1456 * The page is expected to have page_count() == 3:
1457 * - we hold a pin on it;
1458 * - one reference from radix tree;
1459 * - one from isolate_lru_page;
1460 */
1461 if (!page_ref_freeze(page, 3)) {
1462 result = SCAN_PAGE_COUNT;
Hugh Dickins3e9646c2018-11-30 14:10:39 -08001463 xa_unlock_irq(&mapping->i_pages);
1464 putback_lru_page(page);
1465 goto out_unlock;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001466 }
1467
1468 /*
1469 * Add the page to the list to be able to undo the collapse if
1470 * something go wrong.
1471 */
1472 list_add_tail(&page->lru, &pagelist);
1473
1474 /* Finally, replace with the new page. */
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001475 radix_tree_replace_slot(&mapping->i_pages, slot,
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001476 new_page + (index % HPAGE_PMD_NR));
1477
Matthew Wilcox148deab2016-12-14 15:08:49 -08001478 slot = radix_tree_iter_resume(slot, &iter);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001479 index++;
1480 continue;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001481out_unlock:
1482 unlock_page(page);
1483 put_page(page);
Hugh Dickins3e9646c2018-11-30 14:10:39 -08001484 goto tree_unlocked;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001485 }
1486
1487 /*
1488 * Handle hole in radix tree at the end of the range.
1489 * This code only triggers if there's nothing in radix tree
1490 * beyond 'end'.
1491 */
Hugh Dickins3e9646c2018-11-30 14:10:39 -08001492 if (index < end) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001493 int n = end - index;
1494
Hugh Dickins8797f2f2018-11-30 14:10:25 -08001495 /* Stop if extent has been truncated, and is now empty */
1496 if (n >= HPAGE_PMD_NR) {
1497 result = SCAN_TRUNCATED;
1498 goto tree_locked;
1499 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001500 if (!shmem_charge(mapping->host, n)) {
1501 result = SCAN_FAIL;
1502 goto tree_locked;
1503 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001504 for (; index < end; index++) {
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001505 radix_tree_insert(&mapping->i_pages, index,
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001506 new_page + (index % HPAGE_PMD_NR));
1507 }
1508 nr_none += n;
1509 }
1510
Hugh Dickins3e9646c2018-11-30 14:10:39 -08001511 __inc_node_page_state(new_page, NR_SHMEM_THPS);
1512 if (nr_none) {
1513 struct zone *zone = page_zone(new_page);
1514
1515 __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
1516 __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
1517 }
1518
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001519tree_locked:
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001520 xa_unlock_irq(&mapping->i_pages);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001521tree_unlocked:
1522
1523 if (result == SCAN_SUCCEED) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001524 /*
1525 * Replacing old pages with new one has succeed, now we need to
1526 * copy the content and free old pages.
1527 */
Hugh Dickinsee13d692018-11-30 14:10:35 -08001528 index = start;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001529 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
Hugh Dickinsee13d692018-11-30 14:10:35 -08001530 while (index < page->index) {
1531 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1532 index++;
1533 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001534 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1535 page);
1536 list_del(&page->lru);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001537 page->mapping = NULL;
Hugh Dickins3e9646c2018-11-30 14:10:39 -08001538 page_ref_unfreeze(page, 1);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001539 ClearPageActive(page);
1540 ClearPageUnevictable(page);
Hugh Dickins3e9646c2018-11-30 14:10:39 -08001541 unlock_page(page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001542 put_page(page);
Hugh Dickinsee13d692018-11-30 14:10:35 -08001543 index++;
1544 }
1545 while (index < end) {
1546 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1547 index++;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001548 }
1549
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001550 SetPageUptodate(new_page);
Hugh Dickinsaf24c012018-11-30 14:10:43 -08001551 page_ref_add(new_page, HPAGE_PMD_NR - 1);
Hugh Dickins3e9646c2018-11-30 14:10:39 -08001552 set_page_dirty(new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001553 mem_cgroup_commit_charge(new_page, memcg, false, true);
1554 lru_cache_add_anon(new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001555
Hugh Dickins3e9646c2018-11-30 14:10:39 -08001556 /*
1557 * Remove pte page tables, so we can re-fault the page as huge.
1558 */
1559 retract_page_tables(mapping, start);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001560 *hpage = NULL;
Yang Shi87aa7522018-08-17 15:45:29 -07001561
1562 khugepaged_pages_collapsed++;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001563 } else {
1564 /* Something went wrong: rollback changes to the radix-tree */
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001565 xa_lock_irq(&mapping->i_pages);
Hugh Dickins78141aa2018-11-30 14:10:29 -08001566 mapping->nrpages -= nr_none;
1567 shmem_uncharge(mapping->host, nr_none);
1568
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001569 radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001570 if (iter.index >= end)
1571 break;
1572 page = list_first_entry_or_null(&pagelist,
1573 struct page, lru);
1574 if (!page || iter.index < page->index) {
1575 if (!nr_none)
1576 break;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001577 nr_none--;
Johannes Weiner59749e62016-12-12 16:43:35 -08001578 /* Put holes back where they were */
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001579 radix_tree_delete(&mapping->i_pages, iter.index);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001580 continue;
1581 }
1582
1583 VM_BUG_ON_PAGE(page->index != iter.index, page);
1584
1585 /* Unfreeze the page. */
1586 list_del(&page->lru);
1587 page_ref_unfreeze(page, 2);
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001588 radix_tree_replace_slot(&mapping->i_pages, slot, page);
Matthew Wilcox148deab2016-12-14 15:08:49 -08001589 slot = radix_tree_iter_resume(slot, &iter);
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001590 xa_unlock_irq(&mapping->i_pages);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001591 unlock_page(page);
Hugh Dickins3e9646c2018-11-30 14:10:39 -08001592 putback_lru_page(page);
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001593 xa_lock_irq(&mapping->i_pages);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001594 }
1595 VM_BUG_ON(nr_none);
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001596 xa_unlock_irq(&mapping->i_pages);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001597
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001598 mem_cgroup_cancel_charge(new_page, memcg, true);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001599 new_page->mapping = NULL;
1600 }
Hugh Dickins3e9646c2018-11-30 14:10:39 -08001601
1602 unlock_page(new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001603out:
1604 VM_BUG_ON(!list_empty(&pagelist));
1605 /* TODO: tracepoints */
1606}
1607
1608static void khugepaged_scan_shmem(struct mm_struct *mm,
1609 struct address_space *mapping,
1610 pgoff_t start, struct page **hpage)
1611{
1612 struct page *page = NULL;
1613 struct radix_tree_iter iter;
1614 void **slot;
1615 int present, swap;
1616 int node = NUMA_NO_NODE;
1617 int result = SCAN_SUCCEED;
1618
1619 present = 0;
1620 swap = 0;
1621 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1622 rcu_read_lock();
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001623 radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001624 if (iter.index >= start + HPAGE_PMD_NR)
1625 break;
1626
1627 page = radix_tree_deref_slot(slot);
1628 if (radix_tree_deref_retry(page)) {
1629 slot = radix_tree_iter_retry(&iter);
1630 continue;
1631 }
1632
1633 if (radix_tree_exception(page)) {
1634 if (++swap > khugepaged_max_ptes_swap) {
1635 result = SCAN_EXCEED_SWAP_PTE;
1636 break;
1637 }
1638 continue;
1639 }
1640
1641 if (PageTransCompound(page)) {
1642 result = SCAN_PAGE_COMPOUND;
1643 break;
1644 }
1645
1646 node = page_to_nid(page);
1647 if (khugepaged_scan_abort(node)) {
1648 result = SCAN_SCAN_ABORT;
1649 break;
1650 }
1651 khugepaged_node_load[node]++;
1652
1653 if (!PageLRU(page)) {
1654 result = SCAN_PAGE_LRU;
1655 break;
1656 }
1657
1658 if (page_count(page) != 1 + page_mapcount(page)) {
1659 result = SCAN_PAGE_COUNT;
1660 break;
1661 }
1662
1663 /*
1664 * We probably should check if the page is referenced here, but
1665 * nobody would transfer pte_young() to PageReferenced() for us.
1666 * And rmap walk here is just too costly...
1667 */
1668
1669 present++;
1670
1671 if (need_resched()) {
Matthew Wilcox148deab2016-12-14 15:08:49 -08001672 slot = radix_tree_iter_resume(slot, &iter);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001673 cond_resched_rcu();
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001674 }
1675 }
1676 rcu_read_unlock();
1677
1678 if (result == SCAN_SUCCEED) {
1679 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
1680 result = SCAN_EXCEED_NONE_PTE;
1681 } else {
1682 node = khugepaged_find_target_node();
1683 collapse_shmem(mm, mapping, start, hpage, node);
1684 }
1685 }
1686
1687 /* TODO: tracepoints */
1688}
1689#else
1690static void khugepaged_scan_shmem(struct mm_struct *mm,
1691 struct address_space *mapping,
1692 pgoff_t start, struct page **hpage)
1693{
1694 BUILD_BUG();
1695}
1696#endif
1697
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001698static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
1699 struct page **hpage)
1700 __releases(&khugepaged_mm_lock)
1701 __acquires(&khugepaged_mm_lock)
1702{
1703 struct mm_slot *mm_slot;
1704 struct mm_struct *mm;
1705 struct vm_area_struct *vma;
1706 int progress = 0;
1707
1708 VM_BUG_ON(!pages);
1709 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
1710
1711 if (khugepaged_scan.mm_slot)
1712 mm_slot = khugepaged_scan.mm_slot;
1713 else {
1714 mm_slot = list_entry(khugepaged_scan.mm_head.next,
1715 struct mm_slot, mm_node);
1716 khugepaged_scan.address = 0;
1717 khugepaged_scan.mm_slot = mm_slot;
1718 }
1719 spin_unlock(&khugepaged_mm_lock);
1720
1721 mm = mm_slot->mm;
Yang Shi3b454ad2018-01-31 16:18:28 -08001722 /*
1723 * Don't wait for semaphore (to avoid long wait times). Just move to
1724 * the next mm on the list.
1725 */
1726 vma = NULL;
1727 if (unlikely(!down_read_trylock(&mm->mmap_sem)))
1728 goto breakouterloop_mmap_sem;
1729 if (likely(!khugepaged_test_exit(mm)))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001730 vma = find_vma(mm, khugepaged_scan.address);
1731
1732 progress++;
1733 for (; vma; vma = vma->vm_next) {
1734 unsigned long hstart, hend;
1735
1736 cond_resched();
1737 if (unlikely(khugepaged_test_exit(mm))) {
1738 progress++;
1739 break;
1740 }
Song Liu50f8b922018-08-17 15:47:00 -07001741 if (!hugepage_vma_check(vma, vma->vm_flags)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001742skip:
1743 progress++;
1744 continue;
1745 }
1746 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1747 hend = vma->vm_end & HPAGE_PMD_MASK;
1748 if (hstart >= hend)
1749 goto skip;
1750 if (khugepaged_scan.address > hend)
1751 goto skip;
1752 if (khugepaged_scan.address < hstart)
1753 khugepaged_scan.address = hstart;
1754 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
1755
1756 while (khugepaged_scan.address < hend) {
1757 int ret;
1758 cond_resched();
1759 if (unlikely(khugepaged_test_exit(mm)))
1760 goto breakouterloop;
1761
1762 VM_BUG_ON(khugepaged_scan.address < hstart ||
1763 khugepaged_scan.address + HPAGE_PMD_SIZE >
1764 hend);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001765 if (shmem_file(vma->vm_file)) {
Kirill A. Shutemove496cf32016-07-26 15:26:35 -07001766 struct file *file;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001767 pgoff_t pgoff = linear_page_index(vma,
1768 khugepaged_scan.address);
Kirill A. Shutemove496cf32016-07-26 15:26:35 -07001769 if (!shmem_huge_enabled(vma))
1770 goto skip;
1771 file = get_file(vma->vm_file);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001772 up_read(&mm->mmap_sem);
1773 ret = 1;
1774 khugepaged_scan_shmem(mm, file->f_mapping,
1775 pgoff, hpage);
1776 fput(file);
1777 } else {
1778 ret = khugepaged_scan_pmd(mm, vma,
1779 khugepaged_scan.address,
1780 hpage);
1781 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001782 /* move to next address */
1783 khugepaged_scan.address += HPAGE_PMD_SIZE;
1784 progress += HPAGE_PMD_NR;
1785 if (ret)
1786 /* we released mmap_sem so break loop */
1787 goto breakouterloop_mmap_sem;
1788 if (progress >= pages)
1789 goto breakouterloop;
1790 }
1791 }
1792breakouterloop:
1793 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
1794breakouterloop_mmap_sem:
1795
1796 spin_lock(&khugepaged_mm_lock);
1797 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
1798 /*
1799 * Release the current mm_slot if this mm is about to die, or
1800 * if we scanned all vmas of this mm.
1801 */
1802 if (khugepaged_test_exit(mm) || !vma) {
1803 /*
1804 * Make sure that if mm_users is reaching zero while
1805 * khugepaged runs here, khugepaged_exit will find
1806 * mm_slot not pointing to the exiting mm.
1807 */
1808 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
1809 khugepaged_scan.mm_slot = list_entry(
1810 mm_slot->mm_node.next,
1811 struct mm_slot, mm_node);
1812 khugepaged_scan.address = 0;
1813 } else {
1814 khugepaged_scan.mm_slot = NULL;
1815 khugepaged_full_scans++;
1816 }
1817
1818 collect_mm_slot(mm_slot);
1819 }
1820
1821 return progress;
1822}
1823
1824static int khugepaged_has_work(void)
1825{
1826 return !list_empty(&khugepaged_scan.mm_head) &&
1827 khugepaged_enabled();
1828}
1829
1830static int khugepaged_wait_event(void)
1831{
1832 return !list_empty(&khugepaged_scan.mm_head) ||
1833 kthread_should_stop();
1834}
1835
1836static void khugepaged_do_scan(void)
1837{
1838 struct page *hpage = NULL;
1839 unsigned int progress = 0, pass_through_head = 0;
1840 unsigned int pages = khugepaged_pages_to_scan;
1841 bool wait = true;
1842
1843 barrier(); /* write khugepaged_pages_to_scan to local stack */
1844
1845 while (progress < pages) {
1846 if (!khugepaged_prealloc_page(&hpage, &wait))
1847 break;
1848
1849 cond_resched();
1850
1851 if (unlikely(kthread_should_stop() || try_to_freeze()))
1852 break;
1853
1854 spin_lock(&khugepaged_mm_lock);
1855 if (!khugepaged_scan.mm_slot)
1856 pass_through_head++;
1857 if (khugepaged_has_work() &&
1858 pass_through_head < 2)
1859 progress += khugepaged_scan_mm_slot(pages - progress,
1860 &hpage);
1861 else
1862 progress = pages;
1863 spin_unlock(&khugepaged_mm_lock);
1864 }
1865
1866 if (!IS_ERR_OR_NULL(hpage))
1867 put_page(hpage);
1868}
1869
1870static bool khugepaged_should_wakeup(void)
1871{
1872 return kthread_should_stop() ||
1873 time_after_eq(jiffies, khugepaged_sleep_expire);
1874}
1875
1876static void khugepaged_wait_work(void)
1877{
1878 if (khugepaged_has_work()) {
1879 const unsigned long scan_sleep_jiffies =
1880 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
1881
1882 if (!scan_sleep_jiffies)
1883 return;
1884
1885 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
1886 wait_event_freezable_timeout(khugepaged_wait,
1887 khugepaged_should_wakeup(),
1888 scan_sleep_jiffies);
1889 return;
1890 }
1891
1892 if (khugepaged_enabled())
1893 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
1894}
1895
1896static int khugepaged(void *none)
1897{
1898 struct mm_slot *mm_slot;
1899
1900 set_freezable();
1901 set_user_nice(current, MAX_NICE);
1902
1903 while (!kthread_should_stop()) {
1904 khugepaged_do_scan();
1905 khugepaged_wait_work();
1906 }
1907
1908 spin_lock(&khugepaged_mm_lock);
1909 mm_slot = khugepaged_scan.mm_slot;
1910 khugepaged_scan.mm_slot = NULL;
1911 if (mm_slot)
1912 collect_mm_slot(mm_slot);
1913 spin_unlock(&khugepaged_mm_lock);
1914 return 0;
1915}
1916
1917static void set_recommended_min_free_kbytes(void)
1918{
1919 struct zone *zone;
1920 int nr_zones = 0;
1921 unsigned long recommended_min;
1922
Joonsoo Kimb7d349c2018-04-10 16:30:27 -07001923 for_each_populated_zone(zone) {
1924 /*
1925 * We don't need to worry about fragmentation of
1926 * ZONE_MOVABLE since it only has movable pages.
1927 */
1928 if (zone_idx(zone) > gfp_zone(GFP_USER))
1929 continue;
1930
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001931 nr_zones++;
Joonsoo Kimb7d349c2018-04-10 16:30:27 -07001932 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001933
1934 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
1935 recommended_min = pageblock_nr_pages * nr_zones * 2;
1936
1937 /*
1938 * Make sure that on average at least two pageblocks are almost free
1939 * of another type, one for a migratetype to fall back to and a
1940 * second to avoid subsequent fallbacks of other types There are 3
1941 * MIGRATE_TYPES we care about.
1942 */
1943 recommended_min += pageblock_nr_pages * nr_zones *
1944 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
1945
1946 /* don't ever allow to reserve more than 5% of the lowmem */
1947 recommended_min = min(recommended_min,
1948 (unsigned long) nr_free_buffer_pages() / 20);
1949 recommended_min <<= (PAGE_SHIFT-10);
1950
1951 if (recommended_min > min_free_kbytes) {
1952 if (user_min_free_kbytes >= 0)
1953 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
1954 min_free_kbytes, recommended_min);
1955
1956 min_free_kbytes = recommended_min;
1957 }
1958 setup_per_zone_wmarks();
1959}
1960
1961int start_stop_khugepaged(void)
1962{
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001963 int err = 0;
1964
1965 mutex_lock(&khugepaged_mutex);
1966 if (khugepaged_enabled()) {
1967 if (!khugepaged_thread)
1968 khugepaged_thread = kthread_run(khugepaged, NULL,
1969 "khugepaged");
1970 if (IS_ERR(khugepaged_thread)) {
1971 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
1972 err = PTR_ERR(khugepaged_thread);
1973 khugepaged_thread = NULL;
1974 goto fail;
1975 }
1976
1977 if (!list_empty(&khugepaged_scan.mm_head))
1978 wake_up_interruptible(&khugepaged_wait);
1979
1980 set_recommended_min_free_kbytes();
1981 } else if (khugepaged_thread) {
1982 kthread_stop(khugepaged_thread);
1983 khugepaged_thread = NULL;
1984 }
1985fail:
1986 mutex_unlock(&khugepaged_mutex);
1987 return err;
1988}
Vijay Balakrishna94c51672020-10-10 23:16:40 -07001989
1990void khugepaged_min_free_kbytes_update(void)
1991{
1992 mutex_lock(&khugepaged_mutex);
1993 if (khugepaged_enabled() && khugepaged_thread)
1994 set_recommended_min_free_kbytes();
1995 mutex_unlock(&khugepaged_mutex);
1996}