blob: a8a57bebb5fac3b72a48f187d878b6e257ddc479 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4#include <linux/mm.h>
5#include <linux/sched.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +01006#include <linux/sched/mm.h>
Ingo Molnarf7ccbae2017-02-08 18:51:30 +01007#include <linux/sched/coredump.h>
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07008#include <linux/mmu_notifier.h>
9#include <linux/rmap.h>
10#include <linux/swap.h>
11#include <linux/mm_inline.h>
12#include <linux/kthread.h>
13#include <linux/khugepaged.h>
14#include <linux/freezer.h>
15#include <linux/mman.h>
16#include <linux/hashtable.h>
17#include <linux/userfaultfd_k.h>
18#include <linux/page_idle.h>
19#include <linux/swapops.h>
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -070020#include <linux/shmem_fs.h>
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070021
22#include <asm/tlb.h>
23#include <asm/pgalloc.h>
24#include "internal.h"
25
26enum scan_result {
27 SCAN_FAIL,
28 SCAN_SUCCEED,
29 SCAN_PMD_NULL,
30 SCAN_EXCEED_NONE_PTE,
31 SCAN_PTE_NON_PRESENT,
32 SCAN_PAGE_RO,
Ebru Akagunduz0db501f2016-07-26 15:26:46 -070033 SCAN_LACK_REFERENCED_PAGE,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070034 SCAN_PAGE_NULL,
35 SCAN_SCAN_ABORT,
36 SCAN_PAGE_COUNT,
37 SCAN_PAGE_LRU,
38 SCAN_PAGE_LOCK,
39 SCAN_PAGE_ANON,
40 SCAN_PAGE_COMPOUND,
41 SCAN_ANY_PROCESS,
42 SCAN_VMA_NULL,
43 SCAN_VMA_CHECK,
44 SCAN_ADDRESS_RANGE,
45 SCAN_SWAP_CACHE_PAGE,
46 SCAN_DEL_PAGE_LRU,
47 SCAN_ALLOC_HUGE_PAGE_FAIL,
48 SCAN_CGROUP_CHARGE_FAIL,
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -070049 SCAN_EXCEED_SWAP_PTE,
50 SCAN_TRUNCATED,
Song Liu99cb0db2019-09-23 15:38:00 -070051 SCAN_PAGE_HAS_PRIVATE,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070052};
53
54#define CREATE_TRACE_POINTS
55#include <trace/events/huge_memory.h>
56
57/* default scan 8*512 pte (or vmas) every 30 second */
58static unsigned int khugepaged_pages_to_scan __read_mostly;
59static unsigned int khugepaged_pages_collapsed;
60static unsigned int khugepaged_full_scans;
61static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
62/* during fragmentation poll the hugepage allocator once every minute */
63static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
64static unsigned long khugepaged_sleep_expire;
65static DEFINE_SPINLOCK(khugepaged_mm_lock);
66static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
67/*
68 * default collapse hugepages if there is at least one pte mapped like
69 * it would have happened if the vma was large enough during page
70 * fault.
71 */
72static unsigned int khugepaged_max_ptes_none __read_mostly;
73static unsigned int khugepaged_max_ptes_swap __read_mostly;
74
75#define MM_SLOTS_HASH_BITS 10
76static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
77
78static struct kmem_cache *mm_slot_cache __read_mostly;
79
Song Liu27e1f822019-09-23 15:38:30 -070080#define MAX_PTE_MAPPED_THP 8
81
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070082/**
83 * struct mm_slot - hash lookup from mm to mm_slot
84 * @hash: hash collision list
85 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
86 * @mm: the mm that this information is valid for
87 */
88struct mm_slot {
89 struct hlist_node hash;
90 struct list_head mm_node;
91 struct mm_struct *mm;
Song Liu27e1f822019-09-23 15:38:30 -070092
93 /* pte-mapped THP in this mm */
94 int nr_pte_mapped_thp;
95 unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070096};
97
98/**
99 * struct khugepaged_scan - cursor for scanning
100 * @mm_head: the head of the mm list to scan
101 * @mm_slot: the current mm_slot we are scanning
102 * @address: the next address inside that to be scanned
103 *
104 * There is only the one khugepaged_scan instance of this cursor structure.
105 */
106struct khugepaged_scan {
107 struct list_head mm_head;
108 struct mm_slot *mm_slot;
109 unsigned long address;
110};
111
112static struct khugepaged_scan khugepaged_scan = {
113 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
114};
115
Jérémy Lefauree1465d12016-11-30 15:54:02 -0800116#ifdef CONFIG_SYSFS
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700117static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
118 struct kobj_attribute *attr,
119 char *buf)
120{
121 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
122}
123
124static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
125 struct kobj_attribute *attr,
126 const char *buf, size_t count)
127{
128 unsigned long msecs;
129 int err;
130
131 err = kstrtoul(buf, 10, &msecs);
132 if (err || msecs > UINT_MAX)
133 return -EINVAL;
134
135 khugepaged_scan_sleep_millisecs = msecs;
136 khugepaged_sleep_expire = 0;
137 wake_up_interruptible(&khugepaged_wait);
138
139 return count;
140}
141static struct kobj_attribute scan_sleep_millisecs_attr =
142 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
143 scan_sleep_millisecs_store);
144
145static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
146 struct kobj_attribute *attr,
147 char *buf)
148{
149 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
150}
151
152static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
153 struct kobj_attribute *attr,
154 const char *buf, size_t count)
155{
156 unsigned long msecs;
157 int err;
158
159 err = kstrtoul(buf, 10, &msecs);
160 if (err || msecs > UINT_MAX)
161 return -EINVAL;
162
163 khugepaged_alloc_sleep_millisecs = msecs;
164 khugepaged_sleep_expire = 0;
165 wake_up_interruptible(&khugepaged_wait);
166
167 return count;
168}
169static struct kobj_attribute alloc_sleep_millisecs_attr =
170 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
171 alloc_sleep_millisecs_store);
172
173static ssize_t pages_to_scan_show(struct kobject *kobj,
174 struct kobj_attribute *attr,
175 char *buf)
176{
177 return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
178}
179static ssize_t pages_to_scan_store(struct kobject *kobj,
180 struct kobj_attribute *attr,
181 const char *buf, size_t count)
182{
183 int err;
184 unsigned long pages;
185
186 err = kstrtoul(buf, 10, &pages);
187 if (err || !pages || pages > UINT_MAX)
188 return -EINVAL;
189
190 khugepaged_pages_to_scan = pages;
191
192 return count;
193}
194static struct kobj_attribute pages_to_scan_attr =
195 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
196 pages_to_scan_store);
197
198static ssize_t pages_collapsed_show(struct kobject *kobj,
199 struct kobj_attribute *attr,
200 char *buf)
201{
202 return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
203}
204static struct kobj_attribute pages_collapsed_attr =
205 __ATTR_RO(pages_collapsed);
206
207static ssize_t full_scans_show(struct kobject *kobj,
208 struct kobj_attribute *attr,
209 char *buf)
210{
211 return sprintf(buf, "%u\n", khugepaged_full_scans);
212}
213static struct kobj_attribute full_scans_attr =
214 __ATTR_RO(full_scans);
215
216static ssize_t khugepaged_defrag_show(struct kobject *kobj,
217 struct kobj_attribute *attr, char *buf)
218{
219 return single_hugepage_flag_show(kobj, attr, buf,
220 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
221}
222static ssize_t khugepaged_defrag_store(struct kobject *kobj,
223 struct kobj_attribute *attr,
224 const char *buf, size_t count)
225{
226 return single_hugepage_flag_store(kobj, attr, buf, count,
227 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
228}
229static struct kobj_attribute khugepaged_defrag_attr =
230 __ATTR(defrag, 0644, khugepaged_defrag_show,
231 khugepaged_defrag_store);
232
233/*
234 * max_ptes_none controls if khugepaged should collapse hugepages over
235 * any unmapped ptes in turn potentially increasing the memory
236 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
237 * reduce the available free memory in the system as it
238 * runs. Increasing max_ptes_none will instead potentially reduce the
239 * free memory in the system during the khugepaged scan.
240 */
241static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
242 struct kobj_attribute *attr,
243 char *buf)
244{
245 return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
246}
247static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
248 struct kobj_attribute *attr,
249 const char *buf, size_t count)
250{
251 int err;
252 unsigned long max_ptes_none;
253
254 err = kstrtoul(buf, 10, &max_ptes_none);
255 if (err || max_ptes_none > HPAGE_PMD_NR-1)
256 return -EINVAL;
257
258 khugepaged_max_ptes_none = max_ptes_none;
259
260 return count;
261}
262static struct kobj_attribute khugepaged_max_ptes_none_attr =
263 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
264 khugepaged_max_ptes_none_store);
265
266static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
267 struct kobj_attribute *attr,
268 char *buf)
269{
270 return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
271}
272
273static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
274 struct kobj_attribute *attr,
275 const char *buf, size_t count)
276{
277 int err;
278 unsigned long max_ptes_swap;
279
280 err = kstrtoul(buf, 10, &max_ptes_swap);
281 if (err || max_ptes_swap > HPAGE_PMD_NR-1)
282 return -EINVAL;
283
284 khugepaged_max_ptes_swap = max_ptes_swap;
285
286 return count;
287}
288
289static struct kobj_attribute khugepaged_max_ptes_swap_attr =
290 __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
291 khugepaged_max_ptes_swap_store);
292
293static struct attribute *khugepaged_attr[] = {
294 &khugepaged_defrag_attr.attr,
295 &khugepaged_max_ptes_none_attr.attr,
296 &pages_to_scan_attr.attr,
297 &pages_collapsed_attr.attr,
298 &full_scans_attr.attr,
299 &scan_sleep_millisecs_attr.attr,
300 &alloc_sleep_millisecs_attr.attr,
301 &khugepaged_max_ptes_swap_attr.attr,
302 NULL,
303};
304
305struct attribute_group khugepaged_attr_group = {
306 .attrs = khugepaged_attr,
307 .name = "khugepaged",
308};
Jérémy Lefauree1465d12016-11-30 15:54:02 -0800309#endif /* CONFIG_SYSFS */
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700310
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -0700311#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700312
313int hugepage_madvise(struct vm_area_struct *vma,
314 unsigned long *vm_flags, int advice)
315{
316 switch (advice) {
317 case MADV_HUGEPAGE:
318#ifdef CONFIG_S390
319 /*
320 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
321 * can't handle this properly after s390_enable_sie, so we simply
322 * ignore the madvise to prevent qemu from causing a SIGSEGV.
323 */
324 if (mm_has_pgste(vma->vm_mm))
325 return 0;
326#endif
327 *vm_flags &= ~VM_NOHUGEPAGE;
328 *vm_flags |= VM_HUGEPAGE;
329 /*
330 * If the vma become good for khugepaged to scan,
331 * register it here without waiting a page fault that
332 * may not happen any time soon.
333 */
334 if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
335 khugepaged_enter_vma_merge(vma, *vm_flags))
336 return -ENOMEM;
337 break;
338 case MADV_NOHUGEPAGE:
339 *vm_flags &= ~VM_HUGEPAGE;
340 *vm_flags |= VM_NOHUGEPAGE;
341 /*
342 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
343 * this vma even if we leave the mm registered in khugepaged if
344 * it got registered before VM_NOHUGEPAGE was set.
345 */
346 break;
347 }
348
349 return 0;
350}
351
352int __init khugepaged_init(void)
353{
354 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
355 sizeof(struct mm_slot),
356 __alignof__(struct mm_slot), 0, NULL);
357 if (!mm_slot_cache)
358 return -ENOMEM;
359
360 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
361 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
362 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
363
364 return 0;
365}
366
367void __init khugepaged_destroy(void)
368{
369 kmem_cache_destroy(mm_slot_cache);
370}
371
372static inline struct mm_slot *alloc_mm_slot(void)
373{
374 if (!mm_slot_cache) /* initialization failed */
375 return NULL;
376 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
377}
378
379static inline void free_mm_slot(struct mm_slot *mm_slot)
380{
381 kmem_cache_free(mm_slot_cache, mm_slot);
382}
383
384static struct mm_slot *get_mm_slot(struct mm_struct *mm)
385{
386 struct mm_slot *mm_slot;
387
388 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
389 if (mm == mm_slot->mm)
390 return mm_slot;
391
392 return NULL;
393}
394
395static void insert_to_mm_slots_hash(struct mm_struct *mm,
396 struct mm_slot *mm_slot)
397{
398 mm_slot->mm = mm;
399 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
400}
401
402static inline int khugepaged_test_exit(struct mm_struct *mm)
403{
404 return atomic_read(&mm->mm_users) == 0;
405}
406
Song Liu50f8b922018-08-17 15:47:00 -0700407static bool hugepage_vma_check(struct vm_area_struct *vma,
408 unsigned long vm_flags)
Yang Shic2231022018-08-17 15:45:26 -0700409{
Song Liu50f8b922018-08-17 15:47:00 -0700410 if ((!(vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
411 (vm_flags & VM_NOHUGEPAGE) ||
Yang Shic2231022018-08-17 15:45:26 -0700412 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
413 return false;
Song Liu99cb0db2019-09-23 15:38:00 -0700414
415 if (shmem_file(vma->vm_file) ||
416 (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
417 vma->vm_file &&
418 (vm_flags & VM_DENYWRITE))) {
Yang Shic2231022018-08-17 15:45:26 -0700419 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
420 return false;
421 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
422 HPAGE_PMD_NR);
423 }
424 if (!vma->anon_vma || vma->vm_ops)
425 return false;
426 if (is_vma_temporary_stack(vma))
427 return false;
Song Liu50f8b922018-08-17 15:47:00 -0700428 return !(vm_flags & VM_NO_KHUGEPAGED);
Yang Shic2231022018-08-17 15:45:26 -0700429}
430
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700431int __khugepaged_enter(struct mm_struct *mm)
432{
433 struct mm_slot *mm_slot;
434 int wakeup;
435
436 mm_slot = alloc_mm_slot();
437 if (!mm_slot)
438 return -ENOMEM;
439
440 /* __khugepaged_exit() must not run from under us */
441 VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
442 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
443 free_mm_slot(mm_slot);
444 return 0;
445 }
446
447 spin_lock(&khugepaged_mm_lock);
448 insert_to_mm_slots_hash(mm, mm_slot);
449 /*
450 * Insert just behind the scanning cursor, to let the area settle
451 * down a little.
452 */
453 wakeup = list_empty(&khugepaged_scan.mm_head);
454 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
455 spin_unlock(&khugepaged_mm_lock);
456
Vegard Nossumf1f10072017-02-27 14:30:07 -0800457 mmgrab(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700458 if (wakeup)
459 wake_up_interruptible(&khugepaged_wait);
460
461 return 0;
462}
463
464int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
465 unsigned long vm_flags)
466{
467 unsigned long hstart, hend;
Yang Shic2231022018-08-17 15:45:26 -0700468
469 /*
Song Liu99cb0db2019-09-23 15:38:00 -0700470 * khugepaged only supports read-only files for non-shmem files.
471 * khugepaged does not yet work on special mappings. And
472 * file-private shmem THP is not supported.
Yang Shic2231022018-08-17 15:45:26 -0700473 */
Song Liu50f8b922018-08-17 15:47:00 -0700474 if (!hugepage_vma_check(vma, vm_flags))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700475 return 0;
Yang Shic2231022018-08-17 15:45:26 -0700476
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700477 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
478 hend = vma->vm_end & HPAGE_PMD_MASK;
479 if (hstart < hend)
480 return khugepaged_enter(vma, vm_flags);
481 return 0;
482}
483
484void __khugepaged_exit(struct mm_struct *mm)
485{
486 struct mm_slot *mm_slot;
487 int free = 0;
488
489 spin_lock(&khugepaged_mm_lock);
490 mm_slot = get_mm_slot(mm);
491 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
492 hash_del(&mm_slot->hash);
493 list_del(&mm_slot->mm_node);
494 free = 1;
495 }
496 spin_unlock(&khugepaged_mm_lock);
497
498 if (free) {
499 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
500 free_mm_slot(mm_slot);
501 mmdrop(mm);
502 } else if (mm_slot) {
503 /*
504 * This is required to serialize against
505 * khugepaged_test_exit() (which is guaranteed to run
506 * under mmap sem read mode). Stop here (after we
507 * return all pagetables will be destroyed) until
508 * khugepaged has finished working on the pagetables
509 * under the mmap_sem.
510 */
511 down_write(&mm->mmap_sem);
512 up_write(&mm->mmap_sem);
513 }
514}
515
516static void release_pte_page(struct page *page)
517{
Shaohua Lid44d3632017-05-03 14:52:26 -0700518 dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page));
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700519 unlock_page(page);
520 putback_lru_page(page);
521}
522
523static void release_pte_pages(pte_t *pte, pte_t *_pte)
524{
525 while (--_pte >= pte) {
526 pte_t pteval = *_pte;
527 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)))
528 release_pte_page(pte_page(pteval));
529 }
530}
531
532static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
533 unsigned long address,
534 pte_t *pte)
535{
536 struct page *page = NULL;
537 pte_t *_pte;
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700538 int none_or_zero = 0, result = 0, referenced = 0;
539 bool writable = false;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700540
541 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
542 _pte++, address += PAGE_SIZE) {
543 pte_t pteval = *_pte;
544 if (pte_none(pteval) || (pte_present(pteval) &&
545 is_zero_pfn(pte_pfn(pteval)))) {
546 if (!userfaultfd_armed(vma) &&
547 ++none_or_zero <= khugepaged_max_ptes_none) {
548 continue;
549 } else {
550 result = SCAN_EXCEED_NONE_PTE;
551 goto out;
552 }
553 }
554 if (!pte_present(pteval)) {
555 result = SCAN_PTE_NON_PRESENT;
556 goto out;
557 }
558 page = vm_normal_page(vma, address, pteval);
559 if (unlikely(!page)) {
560 result = SCAN_PAGE_NULL;
561 goto out;
562 }
563
Kirill A. Shutemovfece2022018-03-22 16:17:28 -0700564 /* TODO: teach khugepaged to collapse THP mapped with pte */
565 if (PageCompound(page)) {
566 result = SCAN_PAGE_COMPOUND;
567 goto out;
568 }
569
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700570 VM_BUG_ON_PAGE(!PageAnon(page), page);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700571
572 /*
573 * We can do it before isolate_lru_page because the
574 * page can't be freed from under us. NOTE: PG_lock
575 * is needed to serialize against split_huge_page
576 * when invoked from the VM.
577 */
578 if (!trylock_page(page)) {
579 result = SCAN_PAGE_LOCK;
580 goto out;
581 }
582
583 /*
584 * cannot use mapcount: can't collapse if there's a gup pin.
585 * The page must only be referenced by the scanned process
586 * and page swap cache.
587 */
Minchan Kim2948be52017-05-03 14:53:35 -0700588 if (page_count(page) != 1 + PageSwapCache(page)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700589 unlock_page(page);
590 result = SCAN_PAGE_COUNT;
591 goto out;
592 }
593 if (pte_write(pteval)) {
594 writable = true;
595 } else {
596 if (PageSwapCache(page) &&
597 !reuse_swap_page(page, NULL)) {
598 unlock_page(page);
599 result = SCAN_SWAP_CACHE_PAGE;
600 goto out;
601 }
602 /*
603 * Page is not in the swap cache. It can be collapsed
604 * into a THP.
605 */
606 }
607
608 /*
609 * Isolate the page to avoid collapsing an hugepage
610 * currently in use by the VM.
611 */
612 if (isolate_lru_page(page)) {
613 unlock_page(page);
614 result = SCAN_DEL_PAGE_LRU;
615 goto out;
616 }
Shaohua Lid44d3632017-05-03 14:52:26 -0700617 inc_node_page_state(page,
618 NR_ISOLATED_ANON + page_is_file_cache(page));
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700619 VM_BUG_ON_PAGE(!PageLocked(page), page);
620 VM_BUG_ON_PAGE(PageLRU(page), page);
621
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700622 /* There should be enough young pte to collapse the page */
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700623 if (pte_young(pteval) ||
624 page_is_young(page) || PageReferenced(page) ||
625 mmu_notifier_test_young(vma->vm_mm, address))
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700626 referenced++;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700627 }
628 if (likely(writable)) {
629 if (likely(referenced)) {
630 result = SCAN_SUCCEED;
631 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
632 referenced, writable, result);
633 return 1;
634 }
635 } else {
636 result = SCAN_PAGE_RO;
637 }
638
639out:
640 release_pte_pages(pte, _pte);
641 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
642 referenced, writable, result);
643 return 0;
644}
645
646static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
647 struct vm_area_struct *vma,
648 unsigned long address,
649 spinlock_t *ptl)
650{
651 pte_t *_pte;
David Rientjes338a16b2017-05-12 15:47:03 -0700652 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
653 _pte++, page++, address += PAGE_SIZE) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700654 pte_t pteval = *_pte;
655 struct page *src_page;
656
657 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
658 clear_user_highpage(page, address);
659 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
660 if (is_zero_pfn(pte_pfn(pteval))) {
661 /*
662 * ptl mostly unnecessary.
663 */
664 spin_lock(ptl);
665 /*
666 * paravirt calls inside pte_clear here are
667 * superfluous.
668 */
669 pte_clear(vma->vm_mm, address, _pte);
670 spin_unlock(ptl);
671 }
672 } else {
673 src_page = pte_page(pteval);
674 copy_user_highpage(page, src_page, address, vma);
675 VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
676 release_pte_page(src_page);
677 /*
678 * ptl mostly unnecessary, but preempt has to
679 * be disabled to update the per-cpu stats
680 * inside page_remove_rmap().
681 */
682 spin_lock(ptl);
683 /*
684 * paravirt calls inside pte_clear here are
685 * superfluous.
686 */
687 pte_clear(vma->vm_mm, address, _pte);
688 page_remove_rmap(src_page, false);
689 spin_unlock(ptl);
690 free_page_and_swap_cache(src_page);
691 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700692 }
693}
694
695static void khugepaged_alloc_sleep(void)
696{
697 DEFINE_WAIT(wait);
698
699 add_wait_queue(&khugepaged_wait, &wait);
700 freezable_schedule_timeout_interruptible(
701 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
702 remove_wait_queue(&khugepaged_wait, &wait);
703}
704
705static int khugepaged_node_load[MAX_NUMNODES];
706
707static bool khugepaged_scan_abort(int nid)
708{
709 int i;
710
711 /*
Mel Gormana5f5f912016-07-28 15:46:32 -0700712 * If node_reclaim_mode is disabled, then no extra effort is made to
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700713 * allocate memory locally.
714 */
Mel Gormana5f5f912016-07-28 15:46:32 -0700715 if (!node_reclaim_mode)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700716 return false;
717
718 /* If there is a count for this node already, it must be acceptable */
719 if (khugepaged_node_load[nid])
720 return false;
721
722 for (i = 0; i < MAX_NUMNODES; i++) {
723 if (!khugepaged_node_load[i])
724 continue;
Matt Fleminga55c7452019-08-08 20:53:01 +0100725 if (node_distance(nid, i) > node_reclaim_distance)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700726 return true;
727 }
728 return false;
729}
730
731/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
732static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
733{
Vlastimil Babka25160352016-07-28 15:49:25 -0700734 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700735}
736
737#ifdef CONFIG_NUMA
738static int khugepaged_find_target_node(void)
739{
740 static int last_khugepaged_target_node = NUMA_NO_NODE;
741 int nid, target_node = 0, max_value = 0;
742
743 /* find first node with max normal pages hit */
744 for (nid = 0; nid < MAX_NUMNODES; nid++)
745 if (khugepaged_node_load[nid] > max_value) {
746 max_value = khugepaged_node_load[nid];
747 target_node = nid;
748 }
749
750 /* do some balance if several nodes have the same hit record */
751 if (target_node <= last_khugepaged_target_node)
752 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
753 nid++)
754 if (max_value == khugepaged_node_load[nid]) {
755 target_node = nid;
756 break;
757 }
758
759 last_khugepaged_target_node = target_node;
760 return target_node;
761}
762
763static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
764{
765 if (IS_ERR(*hpage)) {
766 if (!*wait)
767 return false;
768
769 *wait = false;
770 *hpage = NULL;
771 khugepaged_alloc_sleep();
772 } else if (*hpage) {
773 put_page(*hpage);
774 *hpage = NULL;
775 }
776
777 return true;
778}
779
780static struct page *
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -0700781khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700782{
783 VM_BUG_ON_PAGE(*hpage, *hpage);
784
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700785 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
786 if (unlikely(!*hpage)) {
787 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
788 *hpage = ERR_PTR(-ENOMEM);
789 return NULL;
790 }
791
792 prep_transhuge_page(*hpage);
793 count_vm_event(THP_COLLAPSE_ALLOC);
794 return *hpage;
795}
796#else
797static int khugepaged_find_target_node(void)
798{
799 return 0;
800}
801
802static inline struct page *alloc_khugepaged_hugepage(void)
803{
804 struct page *page;
805
806 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
807 HPAGE_PMD_ORDER);
808 if (page)
809 prep_transhuge_page(page);
810 return page;
811}
812
813static struct page *khugepaged_alloc_hugepage(bool *wait)
814{
815 struct page *hpage;
816
817 do {
818 hpage = alloc_khugepaged_hugepage();
819 if (!hpage) {
820 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
821 if (!*wait)
822 return NULL;
823
824 *wait = false;
825 khugepaged_alloc_sleep();
826 } else
827 count_vm_event(THP_COLLAPSE_ALLOC);
828 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
829
830 return hpage;
831}
832
833static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
834{
835 if (!*hpage)
836 *hpage = khugepaged_alloc_hugepage(wait);
837
838 if (unlikely(!*hpage))
839 return false;
840
841 return true;
842}
843
844static struct page *
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -0700845khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700846{
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700847 VM_BUG_ON(!*hpage);
848
849 return *hpage;
850}
851#endif
852
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700853/*
854 * If mmap_sem temporarily dropped, revalidate vma
855 * before taking mmap_sem.
856 * Return 0 if succeeds, otherwise return none-zero
857 * value (scan code).
858 */
859
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700860static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
861 struct vm_area_struct **vmap)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700862{
863 struct vm_area_struct *vma;
864 unsigned long hstart, hend;
865
866 if (unlikely(khugepaged_test_exit(mm)))
867 return SCAN_ANY_PROCESS;
868
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700869 *vmap = vma = find_vma(mm, address);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700870 if (!vma)
871 return SCAN_VMA_NULL;
872
873 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
874 hend = vma->vm_end & HPAGE_PMD_MASK;
875 if (address < hstart || address + HPAGE_PMD_SIZE > hend)
876 return SCAN_ADDRESS_RANGE;
Song Liu50f8b922018-08-17 15:47:00 -0700877 if (!hugepage_vma_check(vma, vma->vm_flags))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700878 return SCAN_VMA_CHECK;
879 return 0;
880}
881
882/*
883 * Bring missing pages in from swap, to complete THP collapse.
884 * Only done if khugepaged_scan_pmd believes it is worthwhile.
885 *
886 * Called and returns without pte mapped or spinlocks held,
887 * but with mmap_sem held to protect against vma changes.
888 */
889
890static bool __collapse_huge_page_swapin(struct mm_struct *mm,
891 struct vm_area_struct *vma,
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700892 unsigned long address, pmd_t *pmd,
893 int referenced)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700894{
Souptick Joarder2b740302018-08-23 17:01:36 -0700895 int swapped_in = 0;
896 vm_fault_t ret = 0;
Jan Kara82b0f8c2016-12-14 15:06:58 -0800897 struct vm_fault vmf = {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700898 .vma = vma,
899 .address = address,
900 .flags = FAULT_FLAG_ALLOW_RETRY,
901 .pmd = pmd,
Jan Kara0721ec82016-12-14 15:07:04 -0800902 .pgoff = linear_page_index(vma, address),
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700903 };
904
Ebru Akagunduz982785c2016-09-19 14:44:04 -0700905 /* we only decide to swapin, if there is enough young ptes */
906 if (referenced < HPAGE_PMD_NR/2) {
907 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
908 return false;
909 }
Jan Kara82b0f8c2016-12-14 15:06:58 -0800910 vmf.pte = pte_offset_map(pmd, address);
911 for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
912 vmf.pte++, vmf.address += PAGE_SIZE) {
Jan Kara29943022016-12-14 15:07:16 -0800913 vmf.orig_pte = *vmf.pte;
914 if (!is_swap_pte(vmf.orig_pte))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700915 continue;
916 swapped_in++;
Jan Kara29943022016-12-14 15:07:16 -0800917 ret = do_swap_page(&vmf);
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700918
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700919 /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
920 if (ret & VM_FAULT_RETRY) {
921 down_read(&mm->mmap_sem);
Jan Kara82b0f8c2016-12-14 15:06:58 -0800922 if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
Ebru Akagunduz47f863e2016-07-26 15:26:43 -0700923 /* vma is no longer available, don't continue to swapin */
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700924 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700925 return false;
Ebru Akagunduz47f863e2016-07-26 15:26:43 -0700926 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700927 /* check if the pmd is still valid */
SeongJae Park835152a2017-05-12 15:46:38 -0700928 if (mm_find_pmd(mm, address) != pmd) {
929 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700930 return false;
SeongJae Park835152a2017-05-12 15:46:38 -0700931 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700932 }
933 if (ret & VM_FAULT_ERROR) {
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700934 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700935 return false;
936 }
937 /* pte is unmapped now, we need to map it */
Jan Kara82b0f8c2016-12-14 15:06:58 -0800938 vmf.pte = pte_offset_map(pmd, vmf.address);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700939 }
Jan Kara82b0f8c2016-12-14 15:06:58 -0800940 vmf.pte--;
941 pte_unmap(vmf.pte);
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700942 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700943 return true;
944}
945
946static void collapse_huge_page(struct mm_struct *mm,
947 unsigned long address,
948 struct page **hpage,
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700949 int node, int referenced)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700950{
951 pmd_t *pmd, _pmd;
952 pte_t *pte;
953 pgtable_t pgtable;
954 struct page *new_page;
955 spinlock_t *pmd_ptl, *pte_ptl;
956 int isolated = 0, result = 0;
957 struct mem_cgroup *memcg;
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700958 struct vm_area_struct *vma;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800959 struct mmu_notifier_range range;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700960 gfp_t gfp;
961
962 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
963
964 /* Only allocate from the target node */
Michal Hocko41b61672017-01-10 16:57:42 -0800965 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700966
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -0700967 /*
968 * Before allocating the hugepage, release the mmap_sem read lock.
969 * The allocation can take potentially a long time if it involves
970 * sync compaction, and we do not need to hold the mmap_sem during
971 * that. We will recheck the vma after taking it again in write mode.
972 */
973 up_read(&mm->mmap_sem);
974 new_page = khugepaged_alloc_page(hpage, gfp, node);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700975 if (!new_page) {
976 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
977 goto out_nolock;
978 }
979
Michal Hocko2a70f6a2018-04-10 16:29:30 -0700980 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700981 result = SCAN_CGROUP_CHARGE_FAIL;
982 goto out_nolock;
983 }
984
985 down_read(&mm->mmap_sem);
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700986 result = hugepage_vma_revalidate(mm, address, &vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700987 if (result) {
988 mem_cgroup_cancel_charge(new_page, memcg, true);
989 up_read(&mm->mmap_sem);
990 goto out_nolock;
991 }
992
993 pmd = mm_find_pmd(mm, address);
994 if (!pmd) {
995 result = SCAN_PMD_NULL;
996 mem_cgroup_cancel_charge(new_page, memcg, true);
997 up_read(&mm->mmap_sem);
998 goto out_nolock;
999 }
1000
1001 /*
1002 * __collapse_huge_page_swapin always returns with mmap_sem locked.
Ebru Akagunduz47f863e2016-07-26 15:26:43 -07001003 * If it fails, we release mmap_sem and jump out_nolock.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001004 * Continuing to collapse causes inconsistency.
1005 */
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001006 if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001007 mem_cgroup_cancel_charge(new_page, memcg, true);
1008 up_read(&mm->mmap_sem);
1009 goto out_nolock;
1010 }
1011
1012 up_read(&mm->mmap_sem);
1013 /*
1014 * Prevent all access to pagetables with the exception of
1015 * gup_fast later handled by the ptep_clear_flush and the VM
1016 * handled by the anon_vma lock + PG_lock.
1017 */
1018 down_write(&mm->mmap_sem);
Andrea Arcangeli59ea6d02019-06-13 15:56:11 -07001019 result = SCAN_ANY_PROCESS;
1020 if (!mmget_still_valid(mm))
1021 goto out;
Kirill A. Shutemovc131f752016-09-19 14:44:01 -07001022 result = hugepage_vma_revalidate(mm, address, &vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001023 if (result)
1024 goto out;
1025 /* check if the pmd is still valid */
1026 if (mm_find_pmd(mm, address) != pmd)
1027 goto out;
1028
1029 anon_vma_lock_write(vma->anon_vma);
1030
Jérôme Glisse7269f992019-05-13 17:20:53 -07001031 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -07001032 address, address + HPAGE_PMD_SIZE);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001033 mmu_notifier_invalidate_range_start(&range);
Ville Syrjäläec649c9d2019-11-05 21:16:48 -08001034
1035 pte = pte_offset_map(pmd, address);
1036 pte_ptl = pte_lockptr(mm, pmd);
1037
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001038 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1039 /*
1040 * After this gup_fast can't run anymore. This also removes
1041 * any huge TLB entry from the CPU so we won't allow
1042 * huge and small TLB entries for the same virtual address
1043 * to avoid the risk of CPU bugs in that area.
1044 */
1045 _pmd = pmdp_collapse_flush(vma, address, pmd);
1046 spin_unlock(pmd_ptl);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001047 mmu_notifier_invalidate_range_end(&range);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001048
1049 spin_lock(pte_ptl);
1050 isolated = __collapse_huge_page_isolate(vma, address, pte);
1051 spin_unlock(pte_ptl);
1052
1053 if (unlikely(!isolated)) {
1054 pte_unmap(pte);
1055 spin_lock(pmd_ptl);
1056 BUG_ON(!pmd_none(*pmd));
1057 /*
1058 * We can only use set_pmd_at when establishing
1059 * hugepmds and never for establishing regular pmds that
1060 * points to regular pagetables. Use pmd_populate for that
1061 */
1062 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1063 spin_unlock(pmd_ptl);
1064 anon_vma_unlock_write(vma->anon_vma);
1065 result = SCAN_FAIL;
1066 goto out;
1067 }
1068
1069 /*
1070 * All pages are isolated and locked so anon_vma rmap
1071 * can't run anymore.
1072 */
1073 anon_vma_unlock_write(vma->anon_vma);
1074
1075 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
1076 pte_unmap(pte);
1077 __SetPageUptodate(new_page);
1078 pgtable = pmd_pgtable(_pmd);
1079
1080 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
Linus Torvaldsf55e1012017-11-29 09:01:01 -08001081 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001082
1083 /*
1084 * spin_lock() below is not the equivalent of smp_wmb(), so
1085 * this is needed to avoid the copy_huge_page writes to become
1086 * visible after the set_pmd_at() write.
1087 */
1088 smp_wmb();
1089
1090 spin_lock(pmd_ptl);
1091 BUG_ON(!pmd_none(*pmd));
1092 page_add_new_anon_rmap(new_page, vma, address, true);
1093 mem_cgroup_commit_charge(new_page, memcg, false, true);
Chris Down1ff9e6e2019-03-05 15:48:09 -08001094 count_memcg_events(memcg, THP_COLLAPSE_ALLOC, 1);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001095 lru_cache_add_active_or_unevictable(new_page, vma);
1096 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1097 set_pmd_at(mm, address, pmd, _pmd);
1098 update_mmu_cache_pmd(vma, address, pmd);
1099 spin_unlock(pmd_ptl);
1100
1101 *hpage = NULL;
1102
1103 khugepaged_pages_collapsed++;
1104 result = SCAN_SUCCEED;
1105out_up_write:
1106 up_write(&mm->mmap_sem);
1107out_nolock:
1108 trace_mm_collapse_huge_page(mm, isolated, result);
1109 return;
1110out:
1111 mem_cgroup_cancel_charge(new_page, memcg, true);
1112 goto out_up_write;
1113}
1114
1115static int khugepaged_scan_pmd(struct mm_struct *mm,
1116 struct vm_area_struct *vma,
1117 unsigned long address,
1118 struct page **hpage)
1119{
1120 pmd_t *pmd;
1121 pte_t *pte, *_pte;
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001122 int ret = 0, none_or_zero = 0, result = 0, referenced = 0;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001123 struct page *page = NULL;
1124 unsigned long _address;
1125 spinlock_t *ptl;
1126 int node = NUMA_NO_NODE, unmapped = 0;
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001127 bool writable = false;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001128
1129 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1130
1131 pmd = mm_find_pmd(mm, address);
1132 if (!pmd) {
1133 result = SCAN_PMD_NULL;
1134 goto out;
1135 }
1136
1137 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1138 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1139 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1140 _pte++, _address += PAGE_SIZE) {
1141 pte_t pteval = *_pte;
1142 if (is_swap_pte(pteval)) {
1143 if (++unmapped <= khugepaged_max_ptes_swap) {
1144 continue;
1145 } else {
1146 result = SCAN_EXCEED_SWAP_PTE;
1147 goto out_unmap;
1148 }
1149 }
1150 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1151 if (!userfaultfd_armed(vma) &&
1152 ++none_or_zero <= khugepaged_max_ptes_none) {
1153 continue;
1154 } else {
1155 result = SCAN_EXCEED_NONE_PTE;
1156 goto out_unmap;
1157 }
1158 }
1159 if (!pte_present(pteval)) {
1160 result = SCAN_PTE_NON_PRESENT;
1161 goto out_unmap;
1162 }
1163 if (pte_write(pteval))
1164 writable = true;
1165
1166 page = vm_normal_page(vma, _address, pteval);
1167 if (unlikely(!page)) {
1168 result = SCAN_PAGE_NULL;
1169 goto out_unmap;
1170 }
1171
1172 /* TODO: teach khugepaged to collapse THP mapped with pte */
1173 if (PageCompound(page)) {
1174 result = SCAN_PAGE_COMPOUND;
1175 goto out_unmap;
1176 }
1177
1178 /*
1179 * Record which node the original page is from and save this
1180 * information to khugepaged_node_load[].
1181 * Khupaged will allocate hugepage from the node has the max
1182 * hit record.
1183 */
1184 node = page_to_nid(page);
1185 if (khugepaged_scan_abort(node)) {
1186 result = SCAN_SCAN_ABORT;
1187 goto out_unmap;
1188 }
1189 khugepaged_node_load[node]++;
1190 if (!PageLRU(page)) {
1191 result = SCAN_PAGE_LRU;
1192 goto out_unmap;
1193 }
1194 if (PageLocked(page)) {
1195 result = SCAN_PAGE_LOCK;
1196 goto out_unmap;
1197 }
1198 if (!PageAnon(page)) {
1199 result = SCAN_PAGE_ANON;
1200 goto out_unmap;
1201 }
1202
1203 /*
1204 * cannot use mapcount: can't collapse if there's a gup pin.
1205 * The page must only be referenced by the scanned process
1206 * and page swap cache.
1207 */
Minchan Kim2948be52017-05-03 14:53:35 -07001208 if (page_count(page) != 1 + PageSwapCache(page)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001209 result = SCAN_PAGE_COUNT;
1210 goto out_unmap;
1211 }
1212 if (pte_young(pteval) ||
1213 page_is_young(page) || PageReferenced(page) ||
1214 mmu_notifier_test_young(vma->vm_mm, address))
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001215 referenced++;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001216 }
1217 if (writable) {
1218 if (referenced) {
1219 result = SCAN_SUCCEED;
1220 ret = 1;
1221 } else {
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001222 result = SCAN_LACK_REFERENCED_PAGE;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001223 }
1224 } else {
1225 result = SCAN_PAGE_RO;
1226 }
1227out_unmap:
1228 pte_unmap_unlock(pte, ptl);
1229 if (ret) {
1230 node = khugepaged_find_target_node();
1231 /* collapse_huge_page will return with the mmap_sem released */
Kirill A. Shutemovc131f752016-09-19 14:44:01 -07001232 collapse_huge_page(mm, address, hpage, node, referenced);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001233 }
1234out:
1235 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1236 none_or_zero, result, unmapped);
1237 return ret;
1238}
1239
1240static void collect_mm_slot(struct mm_slot *mm_slot)
1241{
1242 struct mm_struct *mm = mm_slot->mm;
1243
Lance Roy35f3aa32018-10-04 23:45:47 -07001244 lockdep_assert_held(&khugepaged_mm_lock);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001245
1246 if (khugepaged_test_exit(mm)) {
1247 /* free mm_slot */
1248 hash_del(&mm_slot->hash);
1249 list_del(&mm_slot->mm_node);
1250
1251 /*
1252 * Not strictly needed because the mm exited already.
1253 *
1254 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1255 */
1256
1257 /* khugepaged_mm_lock actually not necessary for the below */
1258 free_mm_slot(mm_slot);
1259 mmdrop(mm);
1260 }
1261}
1262
Kirill A. Shutemove496cf32016-07-26 15:26:35 -07001263#if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
Song Liu27e1f822019-09-23 15:38:30 -07001264/*
1265 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1266 * khugepaged should try to collapse the page table.
1267 */
1268static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1269 unsigned long addr)
1270{
1271 struct mm_slot *mm_slot;
1272
1273 VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1274
1275 spin_lock(&khugepaged_mm_lock);
1276 mm_slot = get_mm_slot(mm);
1277 if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
1278 mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1279 spin_unlock(&khugepaged_mm_lock);
1280 return 0;
1281}
1282
1283/**
1284 * Try to collapse a pte-mapped THP for mm at address haddr.
1285 *
1286 * This function checks whether all the PTEs in the PMD are pointing to the
1287 * right THP. If so, retract the page table so the THP can refault in with
1288 * as pmd-mapped.
1289 */
1290void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
1291{
1292 unsigned long haddr = addr & HPAGE_PMD_MASK;
1293 struct vm_area_struct *vma = find_vma(mm, haddr);
1294 struct page *hpage = NULL;
1295 pte_t *start_pte, *pte;
1296 pmd_t *pmd, _pmd;
1297 spinlock_t *ptl;
1298 int count = 0;
1299 int i;
1300
1301 if (!vma || !vma->vm_file ||
1302 vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE)
1303 return;
1304
1305 /*
1306 * This vm_flags may not have VM_HUGEPAGE if the page was not
1307 * collapsed by this mm. But we can still collapse if the page is
1308 * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
1309 * will not fail the vma for missing VM_HUGEPAGE
1310 */
1311 if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
1312 return;
1313
1314 pmd = mm_find_pmd(mm, haddr);
1315 if (!pmd)
1316 return;
1317
1318 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1319
1320 /* step 1: check all mapped PTEs are to the right huge page */
1321 for (i = 0, addr = haddr, pte = start_pte;
1322 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1323 struct page *page;
1324
1325 /* empty pte, skip */
1326 if (pte_none(*pte))
1327 continue;
1328
1329 /* page swapped out, abort */
1330 if (!pte_present(*pte))
1331 goto abort;
1332
1333 page = vm_normal_page(vma, addr, *pte);
1334
1335 if (!page || !PageCompound(page))
1336 goto abort;
1337
1338 if (!hpage) {
1339 hpage = compound_head(page);
1340 /*
1341 * The mapping of the THP should not change.
1342 *
1343 * Note that uprobe, debugger, or MAP_PRIVATE may
1344 * change the page table, but the new page will
1345 * not pass PageCompound() check.
1346 */
1347 if (WARN_ON(hpage->mapping != vma->vm_file->f_mapping))
1348 goto abort;
1349 }
1350
1351 /*
1352 * Confirm the page maps to the correct subpage.
1353 *
1354 * Note that uprobe, debugger, or MAP_PRIVATE may change
1355 * the page table, but the new page will not pass
1356 * PageCompound() check.
1357 */
1358 if (WARN_ON(hpage + i != page))
1359 goto abort;
1360 count++;
1361 }
1362
1363 /* step 2: adjust rmap */
1364 for (i = 0, addr = haddr, pte = start_pte;
1365 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1366 struct page *page;
1367
1368 if (pte_none(*pte))
1369 continue;
1370 page = vm_normal_page(vma, addr, *pte);
1371 page_remove_rmap(page, false);
1372 }
1373
1374 pte_unmap_unlock(start_pte, ptl);
1375
1376 /* step 3: set proper refcount and mm_counters. */
1377 if (hpage) {
1378 page_ref_sub(hpage, count);
1379 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1380 }
1381
1382 /* step 4: collapse pmd */
1383 ptl = pmd_lock(vma->vm_mm, pmd);
1384 _pmd = pmdp_collapse_flush(vma, addr, pmd);
1385 spin_unlock(ptl);
1386 mm_dec_nr_ptes(mm);
1387 pte_free(mm, pmd_pgtable(_pmd));
1388 return;
1389
1390abort:
1391 pte_unmap_unlock(start_pte, ptl);
1392}
1393
1394static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
1395{
1396 struct mm_struct *mm = mm_slot->mm;
1397 int i;
1398
1399 if (likely(mm_slot->nr_pte_mapped_thp == 0))
1400 return 0;
1401
1402 if (!down_write_trylock(&mm->mmap_sem))
1403 return -EBUSY;
1404
1405 if (unlikely(khugepaged_test_exit(mm)))
1406 goto out;
1407
1408 for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1409 collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
1410
1411out:
1412 mm_slot->nr_pte_mapped_thp = 0;
1413 up_write(&mm->mmap_sem);
1414 return 0;
1415}
1416
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001417static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1418{
1419 struct vm_area_struct *vma;
1420 unsigned long addr;
1421 pmd_t *pmd, _pmd;
1422
1423 i_mmap_lock_write(mapping);
1424 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
Song Liu27e1f822019-09-23 15:38:30 -07001425 /*
1426 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1427 * got written to. These VMAs are likely not worth investing
1428 * down_write(mmap_sem) as PMD-mapping is likely to be split
1429 * later.
1430 *
1431 * Not that vma->anon_vma check is racy: it can be set up after
1432 * the check but before we took mmap_sem by the fault path.
1433 * But page lock would prevent establishing any new ptes of the
1434 * page, so we are safe.
1435 *
1436 * An alternative would be drop the check, but check that page
1437 * table is clear before calling pmdp_collapse_flush() under
1438 * ptl. It has higher chance to recover THP for the VMA, but
1439 * has higher cost too.
1440 */
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001441 if (vma->anon_vma)
1442 continue;
1443 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1444 if (addr & ~HPAGE_PMD_MASK)
1445 continue;
1446 if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1447 continue;
1448 pmd = mm_find_pmd(vma->vm_mm, addr);
1449 if (!pmd)
1450 continue;
1451 /*
1452 * We need exclusive mmap_sem to retract page table.
Song Liu27e1f822019-09-23 15:38:30 -07001453 *
1454 * We use trylock due to lock inversion: we need to acquire
1455 * mmap_sem while holding page lock. Fault path does it in
1456 * reverse order. Trylock is a way to avoid deadlock.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001457 */
1458 if (down_write_trylock(&vma->vm_mm->mmap_sem)) {
1459 spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
1460 /* assume page table is clear */
1461 _pmd = pmdp_collapse_flush(vma, addr, pmd);
1462 spin_unlock(ptl);
1463 up_write(&vma->vm_mm->mmap_sem);
Kirill A. Shutemovc4812902017-11-15 17:35:37 -08001464 mm_dec_nr_ptes(vma->vm_mm);
Aneesh Kumar K.Vd670ffd2017-01-10 16:57:18 -08001465 pte_free(vma->vm_mm, pmd_pgtable(_pmd));
Song Liu27e1f822019-09-23 15:38:30 -07001466 } else {
1467 /* Try again later */
1468 khugepaged_add_pte_mapped_thp(vma->vm_mm, addr);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001469 }
1470 }
1471 i_mmap_unlock_write(mapping);
1472}
1473
1474/**
Song Liu99cb0db2019-09-23 15:38:00 -07001475 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001476 *
1477 * Basic scheme is simple, details are more complex:
Hugh Dickins87c460a2018-11-30 14:10:43 -08001478 * - allocate and lock a new huge page;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001479 * - scan page cache replacing old pages with the new one
Song Liu99cb0db2019-09-23 15:38:00 -07001480 * + swap/gup in pages if necessary;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001481 * + fill in gaps;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001482 * + keep old pages around in case rollback is required;
1483 * - if replacing succeeds:
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001484 * + copy data over;
1485 * + free old pages;
Hugh Dickins87c460a2018-11-30 14:10:43 -08001486 * + unlock huge page;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001487 * - if replacing failed;
1488 * + put all pages back and unfreeze them;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001489 * + restore gaps in the page cache;
Hugh Dickins87c460a2018-11-30 14:10:43 -08001490 * + unlock and free huge page;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001491 */
Song Liu579c5712019-09-23 15:37:57 -07001492static void collapse_file(struct mm_struct *mm,
1493 struct file *file, pgoff_t start,
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001494 struct page **hpage, int node)
1495{
Song Liu579c5712019-09-23 15:37:57 -07001496 struct address_space *mapping = file->f_mapping;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001497 gfp_t gfp;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001498 struct page *new_page;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001499 struct mem_cgroup *memcg;
1500 pgoff_t index, end = start + HPAGE_PMD_NR;
1501 LIST_HEAD(pagelist);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001502 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001503 int nr_none = 0, result = SCAN_SUCCEED;
Song Liu99cb0db2019-09-23 15:38:00 -07001504 bool is_shmem = shmem_file(file);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001505
Song Liu99cb0db2019-09-23 15:38:00 -07001506 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001507 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1508
1509 /* Only allocate from the target node */
Michal Hocko41b61672017-01-10 16:57:42 -08001510 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001511
1512 new_page = khugepaged_alloc_page(hpage, gfp, node);
1513 if (!new_page) {
1514 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1515 goto out;
1516 }
1517
Michal Hocko2a70f6a2018-04-10 16:29:30 -07001518 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001519 result = SCAN_CGROUP_CHARGE_FAIL;
1520 goto out;
1521 }
1522
Hugh Dickins95feeab2018-11-30 14:10:50 -08001523 /* This will be less messy when we use multi-index entries */
1524 do {
1525 xas_lock_irq(&xas);
1526 xas_create_range(&xas);
1527 if (!xas_error(&xas))
1528 break;
1529 xas_unlock_irq(&xas);
1530 if (!xas_nomem(&xas, GFP_KERNEL)) {
1531 mem_cgroup_cancel_charge(new_page, memcg, true);
1532 result = SCAN_FAIL;
1533 goto out;
1534 }
1535 } while (1);
1536
Hugh Dickins042a3082018-11-30 14:10:39 -08001537 __SetPageLocked(new_page);
Song Liu99cb0db2019-09-23 15:38:00 -07001538 if (is_shmem)
1539 __SetPageSwapBacked(new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001540 new_page->index = start;
1541 new_page->mapping = mapping;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001542
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001543 /*
Hugh Dickins87c460a2018-11-30 14:10:43 -08001544 * At this point the new_page is locked and not up-to-date.
1545 * It's safe to insert it into the page cache, because nobody would
1546 * be able to map it or use it in another way until we unlock it.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001547 */
1548
Matthew Wilcox77da9382017-12-04 14:56:08 -05001549 xas_set(&xas, start);
1550 for (index = start; index < end; index++) {
1551 struct page *page = xas_next(&xas);
1552
1553 VM_BUG_ON(index != xas.xa_index);
Song Liu99cb0db2019-09-23 15:38:00 -07001554 if (is_shmem) {
1555 if (!page) {
1556 /*
1557 * Stop if extent has been truncated or
1558 * hole-punched, and is now completely
1559 * empty.
1560 */
1561 if (index == start) {
1562 if (!xas_next_entry(&xas, end - 1)) {
1563 result = SCAN_TRUNCATED;
1564 goto xa_locked;
1565 }
1566 xas_set(&xas, index);
1567 }
1568 if (!shmem_charge(mapping->host, 1)) {
1569 result = SCAN_FAIL;
Hugh Dickins042a3082018-11-30 14:10:39 -08001570 goto xa_locked;
Hugh Dickins701270f2018-11-30 14:10:25 -08001571 }
Song Liu99cb0db2019-09-23 15:38:00 -07001572 xas_store(&xas, new_page);
1573 nr_none++;
1574 continue;
Hugh Dickins701270f2018-11-30 14:10:25 -08001575 }
Song Liu99cb0db2019-09-23 15:38:00 -07001576
1577 if (xa_is_value(page) || !PageUptodate(page)) {
1578 xas_unlock_irq(&xas);
1579 /* swap in or instantiate fallocated page */
1580 if (shmem_getpage(mapping->host, index, &page,
1581 SGP_NOHUGE)) {
1582 result = SCAN_FAIL;
1583 goto xa_unlocked;
1584 }
1585 } else if (trylock_page(page)) {
1586 get_page(page);
1587 xas_unlock_irq(&xas);
1588 } else {
1589 result = SCAN_PAGE_LOCK;
Hugh Dickins042a3082018-11-30 14:10:39 -08001590 goto xa_locked;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001591 }
Song Liu99cb0db2019-09-23 15:38:00 -07001592 } else { /* !is_shmem */
1593 if (!page || xa_is_value(page)) {
1594 xas_unlock_irq(&xas);
1595 page_cache_sync_readahead(mapping, &file->f_ra,
1596 file, index,
1597 PAGE_SIZE);
1598 /* drain pagevecs to help isolate_lru_page() */
1599 lru_add_drain();
1600 page = find_lock_page(mapping, index);
1601 if (unlikely(page == NULL)) {
1602 result = SCAN_FAIL;
1603 goto xa_unlocked;
1604 }
Song Liu99cb0db2019-09-23 15:38:00 -07001605 } else if (trylock_page(page)) {
1606 get_page(page);
1607 xas_unlock_irq(&xas);
1608 } else {
1609 result = SCAN_PAGE_LOCK;
1610 goto xa_locked;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001611 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001612 }
1613
1614 /*
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001615 * The page must be locked, so we can drop the i_pages lock
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001616 * without racing with truncate.
1617 */
1618 VM_BUG_ON_PAGE(!PageLocked(page), page);
Song Liu4655e5e2019-11-15 17:34:53 -08001619
1620 /* make sure the page is up to date */
1621 if (unlikely(!PageUptodate(page))) {
1622 result = SCAN_FAIL;
1623 goto out_unlock;
1624 }
Hugh Dickins06a5e122018-11-30 14:10:47 -08001625
1626 /*
1627 * If file was truncated then extended, or hole-punched, before
1628 * we locked the first page, then a THP might be there already.
1629 */
1630 if (PageTransCompound(page)) {
1631 result = SCAN_PAGE_COMPOUND;
1632 goto out_unlock;
1633 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001634
1635 if (page_mapping(page) != mapping) {
1636 result = SCAN_TRUNCATED;
1637 goto out_unlock;
1638 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001639
Song Liu4655e5e2019-11-15 17:34:53 -08001640 if (!is_shmem && PageDirty(page)) {
1641 /*
1642 * khugepaged only works on read-only fd, so this
1643 * page is dirty because it hasn't been flushed
1644 * since first write.
1645 */
1646 result = SCAN_FAIL;
1647 goto out_unlock;
1648 }
1649
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001650 if (isolate_lru_page(page)) {
1651 result = SCAN_DEL_PAGE_LRU;
Hugh Dickins042a3082018-11-30 14:10:39 -08001652 goto out_unlock;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001653 }
1654
Song Liu99cb0db2019-09-23 15:38:00 -07001655 if (page_has_private(page) &&
1656 !try_to_release_page(page, GFP_KERNEL)) {
1657 result = SCAN_PAGE_HAS_PRIVATE;
1658 goto out_unlock;
1659 }
1660
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001661 if (page_mapped(page))
Matthew Wilcox977fbdc2018-01-31 16:17:36 -08001662 unmap_mapping_pages(mapping, index, 1, false);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001663
Matthew Wilcox77da9382017-12-04 14:56:08 -05001664 xas_lock_irq(&xas);
1665 xas_set(&xas, index);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001666
Matthew Wilcox77da9382017-12-04 14:56:08 -05001667 VM_BUG_ON_PAGE(page != xas_load(&xas), page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001668 VM_BUG_ON_PAGE(page_mapped(page), page);
1669
1670 /*
1671 * The page is expected to have page_count() == 3:
1672 * - we hold a pin on it;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001673 * - one reference from page cache;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001674 * - one from isolate_lru_page;
1675 */
1676 if (!page_ref_freeze(page, 3)) {
1677 result = SCAN_PAGE_COUNT;
Hugh Dickins042a3082018-11-30 14:10:39 -08001678 xas_unlock_irq(&xas);
1679 putback_lru_page(page);
1680 goto out_unlock;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001681 }
1682
1683 /*
1684 * Add the page to the list to be able to undo the collapse if
1685 * something go wrong.
1686 */
1687 list_add_tail(&page->lru, &pagelist);
1688
1689 /* Finally, replace with the new page. */
Matthew Wilcox (Oracle)41011962019-09-23 15:34:52 -07001690 xas_store(&xas, new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001691 continue;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001692out_unlock:
1693 unlock_page(page);
1694 put_page(page);
Hugh Dickins042a3082018-11-30 14:10:39 -08001695 goto xa_unlocked;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001696 }
1697
Song Liu99cb0db2019-09-23 15:38:00 -07001698 if (is_shmem)
1699 __inc_node_page_state(new_page, NR_SHMEM_THPS);
Song Liu09d91cd2019-09-23 15:38:03 -07001700 else {
Song Liu99cb0db2019-09-23 15:38:00 -07001701 __inc_node_page_state(new_page, NR_FILE_THPS);
Song Liu09d91cd2019-09-23 15:38:03 -07001702 filemap_nr_thps_inc(mapping);
1703 }
Song Liu99cb0db2019-09-23 15:38:00 -07001704
Hugh Dickins042a3082018-11-30 14:10:39 -08001705 if (nr_none) {
1706 struct zone *zone = page_zone(new_page);
1707
1708 __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
Song Liu99cb0db2019-09-23 15:38:00 -07001709 if (is_shmem)
1710 __mod_node_page_state(zone->zone_pgdat,
1711 NR_SHMEM, nr_none);
Hugh Dickins042a3082018-11-30 14:10:39 -08001712 }
1713
1714xa_locked:
1715 xas_unlock_irq(&xas);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001716xa_unlocked:
Hugh Dickins042a3082018-11-30 14:10:39 -08001717
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001718 if (result == SCAN_SUCCEED) {
Matthew Wilcox77da9382017-12-04 14:56:08 -05001719 struct page *page, *tmp;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001720
1721 /*
Matthew Wilcox77da9382017-12-04 14:56:08 -05001722 * Replacing old pages with new one has succeeded, now we
1723 * need to copy the content and free the old pages.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001724 */
Hugh Dickins2af8ff22018-11-30 14:10:35 -08001725 index = start;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001726 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
Hugh Dickins2af8ff22018-11-30 14:10:35 -08001727 while (index < page->index) {
1728 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1729 index++;
1730 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001731 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1732 page);
1733 list_del(&page->lru);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001734 page->mapping = NULL;
Hugh Dickins042a3082018-11-30 14:10:39 -08001735 page_ref_unfreeze(page, 1);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001736 ClearPageActive(page);
1737 ClearPageUnevictable(page);
Hugh Dickins042a3082018-11-30 14:10:39 -08001738 unlock_page(page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001739 put_page(page);
Hugh Dickins2af8ff22018-11-30 14:10:35 -08001740 index++;
1741 }
1742 while (index < end) {
1743 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1744 index++;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001745 }
1746
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001747 SetPageUptodate(new_page);
Hugh Dickins87c460a2018-11-30 14:10:43 -08001748 page_ref_add(new_page, HPAGE_PMD_NR - 1);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001749 mem_cgroup_commit_charge(new_page, memcg, false, true);
Song Liu99cb0db2019-09-23 15:38:00 -07001750
1751 if (is_shmem) {
1752 set_page_dirty(new_page);
1753 lru_cache_add_anon(new_page);
1754 } else {
1755 lru_cache_add_file(new_page);
1756 }
Chris Down1ff9e6e2019-03-05 15:48:09 -08001757 count_memcg_events(memcg, THP_COLLAPSE_ALLOC, 1);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001758
Hugh Dickins042a3082018-11-30 14:10:39 -08001759 /*
1760 * Remove pte page tables, so we can re-fault the page as huge.
1761 */
1762 retract_page_tables(mapping, start);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001763 *hpage = NULL;
Yang Shi87aa7522018-08-17 15:45:29 -07001764
1765 khugepaged_pages_collapsed++;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001766 } else {
Matthew Wilcox77da9382017-12-04 14:56:08 -05001767 struct page *page;
Hugh Dickinsaaa52e32018-11-30 14:10:29 -08001768
Matthew Wilcox77da9382017-12-04 14:56:08 -05001769 /* Something went wrong: roll back page cache changes */
Matthew Wilcox77da9382017-12-04 14:56:08 -05001770 xas_lock_irq(&xas);
Hugh Dickinsaaa52e32018-11-30 14:10:29 -08001771 mapping->nrpages -= nr_none;
Song Liu99cb0db2019-09-23 15:38:00 -07001772
1773 if (is_shmem)
1774 shmem_uncharge(mapping->host, nr_none);
Hugh Dickinsaaa52e32018-11-30 14:10:29 -08001775
Matthew Wilcox77da9382017-12-04 14:56:08 -05001776 xas_set(&xas, start);
1777 xas_for_each(&xas, page, end - 1) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001778 page = list_first_entry_or_null(&pagelist,
1779 struct page, lru);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001780 if (!page || xas.xa_index < page->index) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001781 if (!nr_none)
1782 break;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001783 nr_none--;
Johannes Weiner59749e62016-12-12 16:43:35 -08001784 /* Put holes back where they were */
Matthew Wilcox77da9382017-12-04 14:56:08 -05001785 xas_store(&xas, NULL);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001786 continue;
1787 }
1788
Matthew Wilcox77da9382017-12-04 14:56:08 -05001789 VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001790
1791 /* Unfreeze the page. */
1792 list_del(&page->lru);
1793 page_ref_unfreeze(page, 2);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001794 xas_store(&xas, page);
1795 xas_pause(&xas);
1796 xas_unlock_irq(&xas);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001797 unlock_page(page);
Hugh Dickins042a3082018-11-30 14:10:39 -08001798 putback_lru_page(page);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001799 xas_lock_irq(&xas);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001800 }
1801 VM_BUG_ON(nr_none);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001802 xas_unlock_irq(&xas);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001803
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001804 mem_cgroup_cancel_charge(new_page, memcg, true);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001805 new_page->mapping = NULL;
1806 }
Hugh Dickins042a3082018-11-30 14:10:39 -08001807
1808 unlock_page(new_page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001809out:
1810 VM_BUG_ON(!list_empty(&pagelist));
1811 /* TODO: tracepoints */
1812}
1813
Song Liu579c5712019-09-23 15:37:57 -07001814static void khugepaged_scan_file(struct mm_struct *mm,
1815 struct file *file, pgoff_t start, struct page **hpage)
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001816{
1817 struct page *page = NULL;
Song Liu579c5712019-09-23 15:37:57 -07001818 struct address_space *mapping = file->f_mapping;
Matthew Wilcox85b392d2017-12-04 15:06:23 -05001819 XA_STATE(xas, &mapping->i_pages, start);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001820 int present, swap;
1821 int node = NUMA_NO_NODE;
1822 int result = SCAN_SUCCEED;
1823
1824 present = 0;
1825 swap = 0;
1826 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1827 rcu_read_lock();
Matthew Wilcox85b392d2017-12-04 15:06:23 -05001828 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
1829 if (xas_retry(&xas, page))
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001830 continue;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001831
Matthew Wilcox85b392d2017-12-04 15:06:23 -05001832 if (xa_is_value(page)) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001833 if (++swap > khugepaged_max_ptes_swap) {
1834 result = SCAN_EXCEED_SWAP_PTE;
1835 break;
1836 }
1837 continue;
1838 }
1839
1840 if (PageTransCompound(page)) {
1841 result = SCAN_PAGE_COMPOUND;
1842 break;
1843 }
1844
1845 node = page_to_nid(page);
1846 if (khugepaged_scan_abort(node)) {
1847 result = SCAN_SCAN_ABORT;
1848 break;
1849 }
1850 khugepaged_node_load[node]++;
1851
1852 if (!PageLRU(page)) {
1853 result = SCAN_PAGE_LRU;
1854 break;
1855 }
1856
Song Liu99cb0db2019-09-23 15:38:00 -07001857 if (page_count(page) !=
1858 1 + page_mapcount(page) + page_has_private(page)) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001859 result = SCAN_PAGE_COUNT;
1860 break;
1861 }
1862
1863 /*
1864 * We probably should check if the page is referenced here, but
1865 * nobody would transfer pte_young() to PageReferenced() for us.
1866 * And rmap walk here is just too costly...
1867 */
1868
1869 present++;
1870
1871 if (need_resched()) {
Matthew Wilcox85b392d2017-12-04 15:06:23 -05001872 xas_pause(&xas);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001873 cond_resched_rcu();
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001874 }
1875 }
1876 rcu_read_unlock();
1877
1878 if (result == SCAN_SUCCEED) {
1879 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
1880 result = SCAN_EXCEED_NONE_PTE;
1881 } else {
1882 node = khugepaged_find_target_node();
Song Liu579c5712019-09-23 15:37:57 -07001883 collapse_file(mm, file, start, hpage, node);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001884 }
1885 }
1886
1887 /* TODO: tracepoints */
1888}
1889#else
Song Liu579c5712019-09-23 15:37:57 -07001890static void khugepaged_scan_file(struct mm_struct *mm,
1891 struct file *file, pgoff_t start, struct page **hpage)
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001892{
1893 BUILD_BUG();
1894}
Song Liu27e1f822019-09-23 15:38:30 -07001895
1896static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
1897{
1898 return 0;
1899}
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001900#endif
1901
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001902static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
1903 struct page **hpage)
1904 __releases(&khugepaged_mm_lock)
1905 __acquires(&khugepaged_mm_lock)
1906{
1907 struct mm_slot *mm_slot;
1908 struct mm_struct *mm;
1909 struct vm_area_struct *vma;
1910 int progress = 0;
1911
1912 VM_BUG_ON(!pages);
Lance Roy35f3aa32018-10-04 23:45:47 -07001913 lockdep_assert_held(&khugepaged_mm_lock);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001914
1915 if (khugepaged_scan.mm_slot)
1916 mm_slot = khugepaged_scan.mm_slot;
1917 else {
1918 mm_slot = list_entry(khugepaged_scan.mm_head.next,
1919 struct mm_slot, mm_node);
1920 khugepaged_scan.address = 0;
1921 khugepaged_scan.mm_slot = mm_slot;
1922 }
1923 spin_unlock(&khugepaged_mm_lock);
Song Liu27e1f822019-09-23 15:38:30 -07001924 khugepaged_collapse_pte_mapped_thps(mm_slot);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001925
1926 mm = mm_slot->mm;
Yang Shi3b454ad2018-01-31 16:18:28 -08001927 /*
1928 * Don't wait for semaphore (to avoid long wait times). Just move to
1929 * the next mm on the list.
1930 */
1931 vma = NULL;
1932 if (unlikely(!down_read_trylock(&mm->mmap_sem)))
1933 goto breakouterloop_mmap_sem;
1934 if (likely(!khugepaged_test_exit(mm)))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001935 vma = find_vma(mm, khugepaged_scan.address);
1936
1937 progress++;
1938 for (; vma; vma = vma->vm_next) {
1939 unsigned long hstart, hend;
1940
1941 cond_resched();
1942 if (unlikely(khugepaged_test_exit(mm))) {
1943 progress++;
1944 break;
1945 }
Song Liu50f8b922018-08-17 15:47:00 -07001946 if (!hugepage_vma_check(vma, vma->vm_flags)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001947skip:
1948 progress++;
1949 continue;
1950 }
1951 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1952 hend = vma->vm_end & HPAGE_PMD_MASK;
1953 if (hstart >= hend)
1954 goto skip;
1955 if (khugepaged_scan.address > hend)
1956 goto skip;
1957 if (khugepaged_scan.address < hstart)
1958 khugepaged_scan.address = hstart;
1959 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
1960
1961 while (khugepaged_scan.address < hend) {
1962 int ret;
1963 cond_resched();
1964 if (unlikely(khugepaged_test_exit(mm)))
1965 goto breakouterloop;
1966
1967 VM_BUG_ON(khugepaged_scan.address < hstart ||
1968 khugepaged_scan.address + HPAGE_PMD_SIZE >
1969 hend);
Song Liu99cb0db2019-09-23 15:38:00 -07001970 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
Kirill A. Shutemove496cf32016-07-26 15:26:35 -07001971 struct file *file;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001972 pgoff_t pgoff = linear_page_index(vma,
1973 khugepaged_scan.address);
Song Liu99cb0db2019-09-23 15:38:00 -07001974
1975 if (shmem_file(vma->vm_file)
1976 && !shmem_huge_enabled(vma))
Kirill A. Shutemove496cf32016-07-26 15:26:35 -07001977 goto skip;
1978 file = get_file(vma->vm_file);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001979 up_read(&mm->mmap_sem);
1980 ret = 1;
Song Liu579c5712019-09-23 15:37:57 -07001981 khugepaged_scan_file(mm, file, pgoff, hpage);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001982 fput(file);
1983 } else {
1984 ret = khugepaged_scan_pmd(mm, vma,
1985 khugepaged_scan.address,
1986 hpage);
1987 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001988 /* move to next address */
1989 khugepaged_scan.address += HPAGE_PMD_SIZE;
1990 progress += HPAGE_PMD_NR;
1991 if (ret)
1992 /* we released mmap_sem so break loop */
1993 goto breakouterloop_mmap_sem;
1994 if (progress >= pages)
1995 goto breakouterloop;
1996 }
1997 }
1998breakouterloop:
1999 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
2000breakouterloop_mmap_sem:
2001
2002 spin_lock(&khugepaged_mm_lock);
2003 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2004 /*
2005 * Release the current mm_slot if this mm is about to die, or
2006 * if we scanned all vmas of this mm.
2007 */
2008 if (khugepaged_test_exit(mm) || !vma) {
2009 /*
2010 * Make sure that if mm_users is reaching zero while
2011 * khugepaged runs here, khugepaged_exit will find
2012 * mm_slot not pointing to the exiting mm.
2013 */
2014 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2015 khugepaged_scan.mm_slot = list_entry(
2016 mm_slot->mm_node.next,
2017 struct mm_slot, mm_node);
2018 khugepaged_scan.address = 0;
2019 } else {
2020 khugepaged_scan.mm_slot = NULL;
2021 khugepaged_full_scans++;
2022 }
2023
2024 collect_mm_slot(mm_slot);
2025 }
2026
2027 return progress;
2028}
2029
2030static int khugepaged_has_work(void)
2031{
2032 return !list_empty(&khugepaged_scan.mm_head) &&
2033 khugepaged_enabled();
2034}
2035
2036static int khugepaged_wait_event(void)
2037{
2038 return !list_empty(&khugepaged_scan.mm_head) ||
2039 kthread_should_stop();
2040}
2041
2042static void khugepaged_do_scan(void)
2043{
2044 struct page *hpage = NULL;
2045 unsigned int progress = 0, pass_through_head = 0;
2046 unsigned int pages = khugepaged_pages_to_scan;
2047 bool wait = true;
2048
2049 barrier(); /* write khugepaged_pages_to_scan to local stack */
2050
2051 while (progress < pages) {
2052 if (!khugepaged_prealloc_page(&hpage, &wait))
2053 break;
2054
2055 cond_resched();
2056
2057 if (unlikely(kthread_should_stop() || try_to_freeze()))
2058 break;
2059
2060 spin_lock(&khugepaged_mm_lock);
2061 if (!khugepaged_scan.mm_slot)
2062 pass_through_head++;
2063 if (khugepaged_has_work() &&
2064 pass_through_head < 2)
2065 progress += khugepaged_scan_mm_slot(pages - progress,
2066 &hpage);
2067 else
2068 progress = pages;
2069 spin_unlock(&khugepaged_mm_lock);
2070 }
2071
2072 if (!IS_ERR_OR_NULL(hpage))
2073 put_page(hpage);
2074}
2075
2076static bool khugepaged_should_wakeup(void)
2077{
2078 return kthread_should_stop() ||
2079 time_after_eq(jiffies, khugepaged_sleep_expire);
2080}
2081
2082static void khugepaged_wait_work(void)
2083{
2084 if (khugepaged_has_work()) {
2085 const unsigned long scan_sleep_jiffies =
2086 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2087
2088 if (!scan_sleep_jiffies)
2089 return;
2090
2091 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2092 wait_event_freezable_timeout(khugepaged_wait,
2093 khugepaged_should_wakeup(),
2094 scan_sleep_jiffies);
2095 return;
2096 }
2097
2098 if (khugepaged_enabled())
2099 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2100}
2101
2102static int khugepaged(void *none)
2103{
2104 struct mm_slot *mm_slot;
2105
2106 set_freezable();
2107 set_user_nice(current, MAX_NICE);
2108
2109 while (!kthread_should_stop()) {
2110 khugepaged_do_scan();
2111 khugepaged_wait_work();
2112 }
2113
2114 spin_lock(&khugepaged_mm_lock);
2115 mm_slot = khugepaged_scan.mm_slot;
2116 khugepaged_scan.mm_slot = NULL;
2117 if (mm_slot)
2118 collect_mm_slot(mm_slot);
2119 spin_unlock(&khugepaged_mm_lock);
2120 return 0;
2121}
2122
2123static void set_recommended_min_free_kbytes(void)
2124{
2125 struct zone *zone;
2126 int nr_zones = 0;
2127 unsigned long recommended_min;
2128
Joonsoo Kimb7d349c2018-04-10 16:30:27 -07002129 for_each_populated_zone(zone) {
2130 /*
2131 * We don't need to worry about fragmentation of
2132 * ZONE_MOVABLE since it only has movable pages.
2133 */
2134 if (zone_idx(zone) > gfp_zone(GFP_USER))
2135 continue;
2136
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002137 nr_zones++;
Joonsoo Kimb7d349c2018-04-10 16:30:27 -07002138 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002139
2140 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2141 recommended_min = pageblock_nr_pages * nr_zones * 2;
2142
2143 /*
2144 * Make sure that on average at least two pageblocks are almost free
2145 * of another type, one for a migratetype to fall back to and a
2146 * second to avoid subsequent fallbacks of other types There are 3
2147 * MIGRATE_TYPES we care about.
2148 */
2149 recommended_min += pageblock_nr_pages * nr_zones *
2150 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2151
2152 /* don't ever allow to reserve more than 5% of the lowmem */
2153 recommended_min = min(recommended_min,
2154 (unsigned long) nr_free_buffer_pages() / 20);
2155 recommended_min <<= (PAGE_SHIFT-10);
2156
2157 if (recommended_min > min_free_kbytes) {
2158 if (user_min_free_kbytes >= 0)
2159 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2160 min_free_kbytes, recommended_min);
2161
2162 min_free_kbytes = recommended_min;
2163 }
2164 setup_per_zone_wmarks();
2165}
2166
2167int start_stop_khugepaged(void)
2168{
2169 static struct task_struct *khugepaged_thread __read_mostly;
2170 static DEFINE_MUTEX(khugepaged_mutex);
2171 int err = 0;
2172
2173 mutex_lock(&khugepaged_mutex);
2174 if (khugepaged_enabled()) {
2175 if (!khugepaged_thread)
2176 khugepaged_thread = kthread_run(khugepaged, NULL,
2177 "khugepaged");
2178 if (IS_ERR(khugepaged_thread)) {
2179 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2180 err = PTR_ERR(khugepaged_thread);
2181 khugepaged_thread = NULL;
2182 goto fail;
2183 }
2184
2185 if (!list_empty(&khugepaged_scan.mm_head))
2186 wake_up_interruptible(&khugepaged_wait);
2187
2188 set_recommended_min_free_kbytes();
2189 } else if (khugepaged_thread) {
2190 kthread_stop(khugepaged_thread);
2191 khugepaged_thread = NULL;
2192 }
2193fail:
2194 mutex_unlock(&khugepaged_mutex);
2195 return err;
2196}