blob: e32389a970305eaf60b36da896e2226a8ce18a0d [file] [log] [blame]
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
3#include <linux/mm.h>
4#include <linux/sched.h>
5#include <linux/mmu_notifier.h>
6#include <linux/rmap.h>
7#include <linux/swap.h>
8#include <linux/mm_inline.h>
9#include <linux/kthread.h>
10#include <linux/khugepaged.h>
11#include <linux/freezer.h>
12#include <linux/mman.h>
13#include <linux/hashtable.h>
14#include <linux/userfaultfd_k.h>
15#include <linux/page_idle.h>
16#include <linux/swapops.h>
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -070017#include <linux/shmem_fs.h>
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070018
19#include <asm/tlb.h>
20#include <asm/pgalloc.h>
21#include "internal.h"
22
23enum scan_result {
24 SCAN_FAIL,
25 SCAN_SUCCEED,
26 SCAN_PMD_NULL,
27 SCAN_EXCEED_NONE_PTE,
28 SCAN_PTE_NON_PRESENT,
29 SCAN_PAGE_RO,
Ebru Akagunduz0db501f2016-07-26 15:26:46 -070030 SCAN_LACK_REFERENCED_PAGE,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070031 SCAN_PAGE_NULL,
32 SCAN_SCAN_ABORT,
33 SCAN_PAGE_COUNT,
34 SCAN_PAGE_LRU,
35 SCAN_PAGE_LOCK,
36 SCAN_PAGE_ANON,
37 SCAN_PAGE_COMPOUND,
38 SCAN_ANY_PROCESS,
39 SCAN_VMA_NULL,
40 SCAN_VMA_CHECK,
41 SCAN_ADDRESS_RANGE,
42 SCAN_SWAP_CACHE_PAGE,
43 SCAN_DEL_PAGE_LRU,
44 SCAN_ALLOC_HUGE_PAGE_FAIL,
45 SCAN_CGROUP_CHARGE_FAIL,
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -070046 SCAN_EXCEED_SWAP_PTE,
47 SCAN_TRUNCATED,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070048};
49
50#define CREATE_TRACE_POINTS
51#include <trace/events/huge_memory.h>
52
53/* default scan 8*512 pte (or vmas) every 30 second */
54static unsigned int khugepaged_pages_to_scan __read_mostly;
55static unsigned int khugepaged_pages_collapsed;
56static unsigned int khugepaged_full_scans;
57static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
58/* during fragmentation poll the hugepage allocator once every minute */
59static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
60static unsigned long khugepaged_sleep_expire;
61static DEFINE_SPINLOCK(khugepaged_mm_lock);
62static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
63/*
64 * default collapse hugepages if there is at least one pte mapped like
65 * it would have happened if the vma was large enough during page
66 * fault.
67 */
68static unsigned int khugepaged_max_ptes_none __read_mostly;
69static unsigned int khugepaged_max_ptes_swap __read_mostly;
70
71#define MM_SLOTS_HASH_BITS 10
72static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
73
74static struct kmem_cache *mm_slot_cache __read_mostly;
75
76/**
77 * struct mm_slot - hash lookup from mm to mm_slot
78 * @hash: hash collision list
79 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
80 * @mm: the mm that this information is valid for
81 */
82struct mm_slot {
83 struct hlist_node hash;
84 struct list_head mm_node;
85 struct mm_struct *mm;
86};
87
88/**
89 * struct khugepaged_scan - cursor for scanning
90 * @mm_head: the head of the mm list to scan
91 * @mm_slot: the current mm_slot we are scanning
92 * @address: the next address inside that to be scanned
93 *
94 * There is only the one khugepaged_scan instance of this cursor structure.
95 */
96struct khugepaged_scan {
97 struct list_head mm_head;
98 struct mm_slot *mm_slot;
99 unsigned long address;
100};
101
102static struct khugepaged_scan khugepaged_scan = {
103 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
104};
105
Jérémy Lefauree1465d12016-11-30 15:54:02 -0800106#ifdef CONFIG_SYSFS
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700107static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
108 struct kobj_attribute *attr,
109 char *buf)
110{
111 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
112}
113
114static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
115 struct kobj_attribute *attr,
116 const char *buf, size_t count)
117{
118 unsigned long msecs;
119 int err;
120
121 err = kstrtoul(buf, 10, &msecs);
122 if (err || msecs > UINT_MAX)
123 return -EINVAL;
124
125 khugepaged_scan_sleep_millisecs = msecs;
126 khugepaged_sleep_expire = 0;
127 wake_up_interruptible(&khugepaged_wait);
128
129 return count;
130}
131static struct kobj_attribute scan_sleep_millisecs_attr =
132 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
133 scan_sleep_millisecs_store);
134
135static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
136 struct kobj_attribute *attr,
137 char *buf)
138{
139 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
140}
141
142static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
143 struct kobj_attribute *attr,
144 const char *buf, size_t count)
145{
146 unsigned long msecs;
147 int err;
148
149 err = kstrtoul(buf, 10, &msecs);
150 if (err || msecs > UINT_MAX)
151 return -EINVAL;
152
153 khugepaged_alloc_sleep_millisecs = msecs;
154 khugepaged_sleep_expire = 0;
155 wake_up_interruptible(&khugepaged_wait);
156
157 return count;
158}
159static struct kobj_attribute alloc_sleep_millisecs_attr =
160 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
161 alloc_sleep_millisecs_store);
162
163static ssize_t pages_to_scan_show(struct kobject *kobj,
164 struct kobj_attribute *attr,
165 char *buf)
166{
167 return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
168}
169static ssize_t pages_to_scan_store(struct kobject *kobj,
170 struct kobj_attribute *attr,
171 const char *buf, size_t count)
172{
173 int err;
174 unsigned long pages;
175
176 err = kstrtoul(buf, 10, &pages);
177 if (err || !pages || pages > UINT_MAX)
178 return -EINVAL;
179
180 khugepaged_pages_to_scan = pages;
181
182 return count;
183}
184static struct kobj_attribute pages_to_scan_attr =
185 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
186 pages_to_scan_store);
187
188static ssize_t pages_collapsed_show(struct kobject *kobj,
189 struct kobj_attribute *attr,
190 char *buf)
191{
192 return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
193}
194static struct kobj_attribute pages_collapsed_attr =
195 __ATTR_RO(pages_collapsed);
196
197static ssize_t full_scans_show(struct kobject *kobj,
198 struct kobj_attribute *attr,
199 char *buf)
200{
201 return sprintf(buf, "%u\n", khugepaged_full_scans);
202}
203static struct kobj_attribute full_scans_attr =
204 __ATTR_RO(full_scans);
205
206static ssize_t khugepaged_defrag_show(struct kobject *kobj,
207 struct kobj_attribute *attr, char *buf)
208{
209 return single_hugepage_flag_show(kobj, attr, buf,
210 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
211}
212static ssize_t khugepaged_defrag_store(struct kobject *kobj,
213 struct kobj_attribute *attr,
214 const char *buf, size_t count)
215{
216 return single_hugepage_flag_store(kobj, attr, buf, count,
217 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
218}
219static struct kobj_attribute khugepaged_defrag_attr =
220 __ATTR(defrag, 0644, khugepaged_defrag_show,
221 khugepaged_defrag_store);
222
223/*
224 * max_ptes_none controls if khugepaged should collapse hugepages over
225 * any unmapped ptes in turn potentially increasing the memory
226 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
227 * reduce the available free memory in the system as it
228 * runs. Increasing max_ptes_none will instead potentially reduce the
229 * free memory in the system during the khugepaged scan.
230 */
231static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
232 struct kobj_attribute *attr,
233 char *buf)
234{
235 return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
236}
237static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
238 struct kobj_attribute *attr,
239 const char *buf, size_t count)
240{
241 int err;
242 unsigned long max_ptes_none;
243
244 err = kstrtoul(buf, 10, &max_ptes_none);
245 if (err || max_ptes_none > HPAGE_PMD_NR-1)
246 return -EINVAL;
247
248 khugepaged_max_ptes_none = max_ptes_none;
249
250 return count;
251}
252static struct kobj_attribute khugepaged_max_ptes_none_attr =
253 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
254 khugepaged_max_ptes_none_store);
255
256static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
257 struct kobj_attribute *attr,
258 char *buf)
259{
260 return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
261}
262
263static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
264 struct kobj_attribute *attr,
265 const char *buf, size_t count)
266{
267 int err;
268 unsigned long max_ptes_swap;
269
270 err = kstrtoul(buf, 10, &max_ptes_swap);
271 if (err || max_ptes_swap > HPAGE_PMD_NR-1)
272 return -EINVAL;
273
274 khugepaged_max_ptes_swap = max_ptes_swap;
275
276 return count;
277}
278
279static struct kobj_attribute khugepaged_max_ptes_swap_attr =
280 __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
281 khugepaged_max_ptes_swap_store);
282
283static struct attribute *khugepaged_attr[] = {
284 &khugepaged_defrag_attr.attr,
285 &khugepaged_max_ptes_none_attr.attr,
286 &pages_to_scan_attr.attr,
287 &pages_collapsed_attr.attr,
288 &full_scans_attr.attr,
289 &scan_sleep_millisecs_attr.attr,
290 &alloc_sleep_millisecs_attr.attr,
291 &khugepaged_max_ptes_swap_attr.attr,
292 NULL,
293};
294
295struct attribute_group khugepaged_attr_group = {
296 .attrs = khugepaged_attr,
297 .name = "khugepaged",
298};
Jérémy Lefauree1465d12016-11-30 15:54:02 -0800299#endif /* CONFIG_SYSFS */
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700300
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -0700301#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700302
303int hugepage_madvise(struct vm_area_struct *vma,
304 unsigned long *vm_flags, int advice)
305{
306 switch (advice) {
307 case MADV_HUGEPAGE:
308#ifdef CONFIG_S390
309 /*
310 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
311 * can't handle this properly after s390_enable_sie, so we simply
312 * ignore the madvise to prevent qemu from causing a SIGSEGV.
313 */
314 if (mm_has_pgste(vma->vm_mm))
315 return 0;
316#endif
317 *vm_flags &= ~VM_NOHUGEPAGE;
318 *vm_flags |= VM_HUGEPAGE;
319 /*
320 * If the vma become good for khugepaged to scan,
321 * register it here without waiting a page fault that
322 * may not happen any time soon.
323 */
324 if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
325 khugepaged_enter_vma_merge(vma, *vm_flags))
326 return -ENOMEM;
327 break;
328 case MADV_NOHUGEPAGE:
329 *vm_flags &= ~VM_HUGEPAGE;
330 *vm_flags |= VM_NOHUGEPAGE;
331 /*
332 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
333 * this vma even if we leave the mm registered in khugepaged if
334 * it got registered before VM_NOHUGEPAGE was set.
335 */
336 break;
337 }
338
339 return 0;
340}
341
342int __init khugepaged_init(void)
343{
344 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
345 sizeof(struct mm_slot),
346 __alignof__(struct mm_slot), 0, NULL);
347 if (!mm_slot_cache)
348 return -ENOMEM;
349
350 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
351 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
352 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
353
354 return 0;
355}
356
357void __init khugepaged_destroy(void)
358{
359 kmem_cache_destroy(mm_slot_cache);
360}
361
362static inline struct mm_slot *alloc_mm_slot(void)
363{
364 if (!mm_slot_cache) /* initialization failed */
365 return NULL;
366 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
367}
368
369static inline void free_mm_slot(struct mm_slot *mm_slot)
370{
371 kmem_cache_free(mm_slot_cache, mm_slot);
372}
373
374static struct mm_slot *get_mm_slot(struct mm_struct *mm)
375{
376 struct mm_slot *mm_slot;
377
378 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
379 if (mm == mm_slot->mm)
380 return mm_slot;
381
382 return NULL;
383}
384
385static void insert_to_mm_slots_hash(struct mm_struct *mm,
386 struct mm_slot *mm_slot)
387{
388 mm_slot->mm = mm;
389 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
390}
391
392static inline int khugepaged_test_exit(struct mm_struct *mm)
393{
394 return atomic_read(&mm->mm_users) == 0;
395}
396
397int __khugepaged_enter(struct mm_struct *mm)
398{
399 struct mm_slot *mm_slot;
400 int wakeup;
401
402 mm_slot = alloc_mm_slot();
403 if (!mm_slot)
404 return -ENOMEM;
405
406 /* __khugepaged_exit() must not run from under us */
407 VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
408 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
409 free_mm_slot(mm_slot);
410 return 0;
411 }
412
413 spin_lock(&khugepaged_mm_lock);
414 insert_to_mm_slots_hash(mm, mm_slot);
415 /*
416 * Insert just behind the scanning cursor, to let the area settle
417 * down a little.
418 */
419 wakeup = list_empty(&khugepaged_scan.mm_head);
420 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
421 spin_unlock(&khugepaged_mm_lock);
422
423 atomic_inc(&mm->mm_count);
424 if (wakeup)
425 wake_up_interruptible(&khugepaged_wait);
426
427 return 0;
428}
429
430int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
431 unsigned long vm_flags)
432{
433 unsigned long hstart, hend;
434 if (!vma->anon_vma)
435 /*
436 * Not yet faulted in so we will register later in the
437 * page fault if needed.
438 */
439 return 0;
440 if (vma->vm_ops || (vm_flags & VM_NO_KHUGEPAGED))
441 /* khugepaged not yet working on file or special mappings */
442 return 0;
443 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
444 hend = vma->vm_end & HPAGE_PMD_MASK;
445 if (hstart < hend)
446 return khugepaged_enter(vma, vm_flags);
447 return 0;
448}
449
450void __khugepaged_exit(struct mm_struct *mm)
451{
452 struct mm_slot *mm_slot;
453 int free = 0;
454
455 spin_lock(&khugepaged_mm_lock);
456 mm_slot = get_mm_slot(mm);
457 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
458 hash_del(&mm_slot->hash);
459 list_del(&mm_slot->mm_node);
460 free = 1;
461 }
462 spin_unlock(&khugepaged_mm_lock);
463
464 if (free) {
465 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
466 free_mm_slot(mm_slot);
467 mmdrop(mm);
468 } else if (mm_slot) {
469 /*
470 * This is required to serialize against
471 * khugepaged_test_exit() (which is guaranteed to run
472 * under mmap sem read mode). Stop here (after we
473 * return all pagetables will be destroyed) until
474 * khugepaged has finished working on the pagetables
475 * under the mmap_sem.
476 */
477 down_write(&mm->mmap_sem);
478 up_write(&mm->mmap_sem);
479 }
480}
481
482static void release_pte_page(struct page *page)
483{
484 /* 0 stands for page_is_file_cache(page) == false */
Mel Gorman599d0c92016-07-28 15:45:31 -0700485 dec_node_page_state(page, NR_ISOLATED_ANON + 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700486 unlock_page(page);
487 putback_lru_page(page);
488}
489
490static void release_pte_pages(pte_t *pte, pte_t *_pte)
491{
492 while (--_pte >= pte) {
493 pte_t pteval = *_pte;
494 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)))
495 release_pte_page(pte_page(pteval));
496 }
497}
498
499static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
500 unsigned long address,
501 pte_t *pte)
502{
503 struct page *page = NULL;
504 pte_t *_pte;
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700505 int none_or_zero = 0, result = 0, referenced = 0;
506 bool writable = false;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700507
508 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
509 _pte++, address += PAGE_SIZE) {
510 pte_t pteval = *_pte;
511 if (pte_none(pteval) || (pte_present(pteval) &&
512 is_zero_pfn(pte_pfn(pteval)))) {
513 if (!userfaultfd_armed(vma) &&
514 ++none_or_zero <= khugepaged_max_ptes_none) {
515 continue;
516 } else {
517 result = SCAN_EXCEED_NONE_PTE;
518 goto out;
519 }
520 }
521 if (!pte_present(pteval)) {
522 result = SCAN_PTE_NON_PRESENT;
523 goto out;
524 }
525 page = vm_normal_page(vma, address, pteval);
526 if (unlikely(!page)) {
527 result = SCAN_PAGE_NULL;
528 goto out;
529 }
530
531 VM_BUG_ON_PAGE(PageCompound(page), page);
532 VM_BUG_ON_PAGE(!PageAnon(page), page);
533 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
534
535 /*
536 * We can do it before isolate_lru_page because the
537 * page can't be freed from under us. NOTE: PG_lock
538 * is needed to serialize against split_huge_page
539 * when invoked from the VM.
540 */
541 if (!trylock_page(page)) {
542 result = SCAN_PAGE_LOCK;
543 goto out;
544 }
545
546 /*
547 * cannot use mapcount: can't collapse if there's a gup pin.
548 * The page must only be referenced by the scanned process
549 * and page swap cache.
550 */
551 if (page_count(page) != 1 + !!PageSwapCache(page)) {
552 unlock_page(page);
553 result = SCAN_PAGE_COUNT;
554 goto out;
555 }
556 if (pte_write(pteval)) {
557 writable = true;
558 } else {
559 if (PageSwapCache(page) &&
560 !reuse_swap_page(page, NULL)) {
561 unlock_page(page);
562 result = SCAN_SWAP_CACHE_PAGE;
563 goto out;
564 }
565 /*
566 * Page is not in the swap cache. It can be collapsed
567 * into a THP.
568 */
569 }
570
571 /*
572 * Isolate the page to avoid collapsing an hugepage
573 * currently in use by the VM.
574 */
575 if (isolate_lru_page(page)) {
576 unlock_page(page);
577 result = SCAN_DEL_PAGE_LRU;
578 goto out;
579 }
580 /* 0 stands for page_is_file_cache(page) == false */
Mel Gorman599d0c92016-07-28 15:45:31 -0700581 inc_node_page_state(page, NR_ISOLATED_ANON + 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700582 VM_BUG_ON_PAGE(!PageLocked(page), page);
583 VM_BUG_ON_PAGE(PageLRU(page), page);
584
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700585 /* There should be enough young pte to collapse the page */
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700586 if (pte_young(pteval) ||
587 page_is_young(page) || PageReferenced(page) ||
588 mmu_notifier_test_young(vma->vm_mm, address))
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700589 referenced++;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700590 }
591 if (likely(writable)) {
592 if (likely(referenced)) {
593 result = SCAN_SUCCEED;
594 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
595 referenced, writable, result);
596 return 1;
597 }
598 } else {
599 result = SCAN_PAGE_RO;
600 }
601
602out:
603 release_pte_pages(pte, _pte);
604 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
605 referenced, writable, result);
606 return 0;
607}
608
609static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
610 struct vm_area_struct *vma,
611 unsigned long address,
612 spinlock_t *ptl)
613{
614 pte_t *_pte;
615 for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
616 pte_t pteval = *_pte;
617 struct page *src_page;
618
619 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
620 clear_user_highpage(page, address);
621 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
622 if (is_zero_pfn(pte_pfn(pteval))) {
623 /*
624 * ptl mostly unnecessary.
625 */
626 spin_lock(ptl);
627 /*
628 * paravirt calls inside pte_clear here are
629 * superfluous.
630 */
631 pte_clear(vma->vm_mm, address, _pte);
632 spin_unlock(ptl);
633 }
634 } else {
635 src_page = pte_page(pteval);
636 copy_user_highpage(page, src_page, address, vma);
637 VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
638 release_pte_page(src_page);
639 /*
640 * ptl mostly unnecessary, but preempt has to
641 * be disabled to update the per-cpu stats
642 * inside page_remove_rmap().
643 */
644 spin_lock(ptl);
645 /*
646 * paravirt calls inside pte_clear here are
647 * superfluous.
648 */
649 pte_clear(vma->vm_mm, address, _pte);
650 page_remove_rmap(src_page, false);
651 spin_unlock(ptl);
652 free_page_and_swap_cache(src_page);
653 }
654
655 address += PAGE_SIZE;
656 page++;
657 }
658}
659
660static void khugepaged_alloc_sleep(void)
661{
662 DEFINE_WAIT(wait);
663
664 add_wait_queue(&khugepaged_wait, &wait);
665 freezable_schedule_timeout_interruptible(
666 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
667 remove_wait_queue(&khugepaged_wait, &wait);
668}
669
670static int khugepaged_node_load[MAX_NUMNODES];
671
672static bool khugepaged_scan_abort(int nid)
673{
674 int i;
675
676 /*
Mel Gormana5f5f912016-07-28 15:46:32 -0700677 * If node_reclaim_mode is disabled, then no extra effort is made to
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700678 * allocate memory locally.
679 */
Mel Gormana5f5f912016-07-28 15:46:32 -0700680 if (!node_reclaim_mode)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700681 return false;
682
683 /* If there is a count for this node already, it must be acceptable */
684 if (khugepaged_node_load[nid])
685 return false;
686
687 for (i = 0; i < MAX_NUMNODES; i++) {
688 if (!khugepaged_node_load[i])
689 continue;
690 if (node_distance(nid, i) > RECLAIM_DISTANCE)
691 return true;
692 }
693 return false;
694}
695
696/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
697static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
698{
Vlastimil Babka25160352016-07-28 15:49:25 -0700699 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700700}
701
702#ifdef CONFIG_NUMA
703static int khugepaged_find_target_node(void)
704{
705 static int last_khugepaged_target_node = NUMA_NO_NODE;
706 int nid, target_node = 0, max_value = 0;
707
708 /* find first node with max normal pages hit */
709 for (nid = 0; nid < MAX_NUMNODES; nid++)
710 if (khugepaged_node_load[nid] > max_value) {
711 max_value = khugepaged_node_load[nid];
712 target_node = nid;
713 }
714
715 /* do some balance if several nodes have the same hit record */
716 if (target_node <= last_khugepaged_target_node)
717 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
718 nid++)
719 if (max_value == khugepaged_node_load[nid]) {
720 target_node = nid;
721 break;
722 }
723
724 last_khugepaged_target_node = target_node;
725 return target_node;
726}
727
728static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
729{
730 if (IS_ERR(*hpage)) {
731 if (!*wait)
732 return false;
733
734 *wait = false;
735 *hpage = NULL;
736 khugepaged_alloc_sleep();
737 } else if (*hpage) {
738 put_page(*hpage);
739 *hpage = NULL;
740 }
741
742 return true;
743}
744
745static struct page *
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -0700746khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700747{
748 VM_BUG_ON_PAGE(*hpage, *hpage);
749
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700750 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
751 if (unlikely(!*hpage)) {
752 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
753 *hpage = ERR_PTR(-ENOMEM);
754 return NULL;
755 }
756
757 prep_transhuge_page(*hpage);
758 count_vm_event(THP_COLLAPSE_ALLOC);
759 return *hpage;
760}
761#else
762static int khugepaged_find_target_node(void)
763{
764 return 0;
765}
766
767static inline struct page *alloc_khugepaged_hugepage(void)
768{
769 struct page *page;
770
771 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
772 HPAGE_PMD_ORDER);
773 if (page)
774 prep_transhuge_page(page);
775 return page;
776}
777
778static struct page *khugepaged_alloc_hugepage(bool *wait)
779{
780 struct page *hpage;
781
782 do {
783 hpage = alloc_khugepaged_hugepage();
784 if (!hpage) {
785 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
786 if (!*wait)
787 return NULL;
788
789 *wait = false;
790 khugepaged_alloc_sleep();
791 } else
792 count_vm_event(THP_COLLAPSE_ALLOC);
793 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
794
795 return hpage;
796}
797
798static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
799{
800 if (!*hpage)
801 *hpage = khugepaged_alloc_hugepage(wait);
802
803 if (unlikely(!*hpage))
804 return false;
805
806 return true;
807}
808
809static struct page *
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -0700810khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700811{
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700812 VM_BUG_ON(!*hpage);
813
814 return *hpage;
815}
816#endif
817
818static bool hugepage_vma_check(struct vm_area_struct *vma)
819{
820 if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
821 (vma->vm_flags & VM_NOHUGEPAGE))
822 return false;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -0700823 if (shmem_file(vma->vm_file)) {
Kirill A. Shutemove496cf32016-07-26 15:26:35 -0700824 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
825 return false;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -0700826 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
827 HPAGE_PMD_NR);
828 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700829 if (!vma->anon_vma || vma->vm_ops)
830 return false;
831 if (is_vma_temporary_stack(vma))
832 return false;
833 return !(vma->vm_flags & VM_NO_KHUGEPAGED);
834}
835
836/*
837 * If mmap_sem temporarily dropped, revalidate vma
838 * before taking mmap_sem.
839 * Return 0 if succeeds, otherwise return none-zero
840 * value (scan code).
841 */
842
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700843static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
844 struct vm_area_struct **vmap)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700845{
846 struct vm_area_struct *vma;
847 unsigned long hstart, hend;
848
849 if (unlikely(khugepaged_test_exit(mm)))
850 return SCAN_ANY_PROCESS;
851
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700852 *vmap = vma = find_vma(mm, address);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700853 if (!vma)
854 return SCAN_VMA_NULL;
855
856 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
857 hend = vma->vm_end & HPAGE_PMD_MASK;
858 if (address < hstart || address + HPAGE_PMD_SIZE > hend)
859 return SCAN_ADDRESS_RANGE;
860 if (!hugepage_vma_check(vma))
861 return SCAN_VMA_CHECK;
862 return 0;
863}
864
865/*
866 * Bring missing pages in from swap, to complete THP collapse.
867 * Only done if khugepaged_scan_pmd believes it is worthwhile.
868 *
869 * Called and returns without pte mapped or spinlocks held,
870 * but with mmap_sem held to protect against vma changes.
871 */
872
873static bool __collapse_huge_page_swapin(struct mm_struct *mm,
874 struct vm_area_struct *vma,
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700875 unsigned long address, pmd_t *pmd,
876 int referenced)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700877{
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700878 int swapped_in = 0, ret = 0;
Jan Kara82b0f8c2016-12-14 15:06:58 -0800879 struct vm_fault vmf = {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700880 .vma = vma,
881 .address = address,
882 .flags = FAULT_FLAG_ALLOW_RETRY,
883 .pmd = pmd,
Jan Kara0721ec82016-12-14 15:07:04 -0800884 .pgoff = linear_page_index(vma, address),
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700885 };
886
Ebru Akagunduz982785c2016-09-19 14:44:04 -0700887 /* we only decide to swapin, if there is enough young ptes */
888 if (referenced < HPAGE_PMD_NR/2) {
889 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
890 return false;
891 }
Jan Kara82b0f8c2016-12-14 15:06:58 -0800892 vmf.pte = pte_offset_map(pmd, address);
893 for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
894 vmf.pte++, vmf.address += PAGE_SIZE) {
Jan Kara29943022016-12-14 15:07:16 -0800895 vmf.orig_pte = *vmf.pte;
896 if (!is_swap_pte(vmf.orig_pte))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700897 continue;
898 swapped_in++;
Jan Kara29943022016-12-14 15:07:16 -0800899 ret = do_swap_page(&vmf);
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700900
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700901 /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
902 if (ret & VM_FAULT_RETRY) {
903 down_read(&mm->mmap_sem);
Jan Kara82b0f8c2016-12-14 15:06:58 -0800904 if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
Ebru Akagunduz47f863e2016-07-26 15:26:43 -0700905 /* vma is no longer available, don't continue to swapin */
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700906 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700907 return false;
Ebru Akagunduz47f863e2016-07-26 15:26:43 -0700908 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700909 /* check if the pmd is still valid */
910 if (mm_find_pmd(mm, address) != pmd)
911 return false;
912 }
913 if (ret & VM_FAULT_ERROR) {
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700914 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700915 return false;
916 }
917 /* pte is unmapped now, we need to map it */
Jan Kara82b0f8c2016-12-14 15:06:58 -0800918 vmf.pte = pte_offset_map(pmd, vmf.address);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700919 }
Jan Kara82b0f8c2016-12-14 15:06:58 -0800920 vmf.pte--;
921 pte_unmap(vmf.pte);
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700922 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700923 return true;
924}
925
926static void collapse_huge_page(struct mm_struct *mm,
927 unsigned long address,
928 struct page **hpage,
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700929 int node, int referenced)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700930{
931 pmd_t *pmd, _pmd;
932 pte_t *pte;
933 pgtable_t pgtable;
934 struct page *new_page;
935 spinlock_t *pmd_ptl, *pte_ptl;
936 int isolated = 0, result = 0;
937 struct mem_cgroup *memcg;
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700938 struct vm_area_struct *vma;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700939 unsigned long mmun_start; /* For mmu_notifiers */
940 unsigned long mmun_end; /* For mmu_notifiers */
941 gfp_t gfp;
942
943 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
944
945 /* Only allocate from the target node */
946 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_OTHER_NODE | __GFP_THISNODE;
947
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -0700948 /*
949 * Before allocating the hugepage, release the mmap_sem read lock.
950 * The allocation can take potentially a long time if it involves
951 * sync compaction, and we do not need to hold the mmap_sem during
952 * that. We will recheck the vma after taking it again in write mode.
953 */
954 up_read(&mm->mmap_sem);
955 new_page = khugepaged_alloc_page(hpage, gfp, node);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700956 if (!new_page) {
957 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
958 goto out_nolock;
959 }
960
961 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
962 result = SCAN_CGROUP_CHARGE_FAIL;
963 goto out_nolock;
964 }
965
966 down_read(&mm->mmap_sem);
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700967 result = hugepage_vma_revalidate(mm, address, &vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700968 if (result) {
969 mem_cgroup_cancel_charge(new_page, memcg, true);
970 up_read(&mm->mmap_sem);
971 goto out_nolock;
972 }
973
974 pmd = mm_find_pmd(mm, address);
975 if (!pmd) {
976 result = SCAN_PMD_NULL;
977 mem_cgroup_cancel_charge(new_page, memcg, true);
978 up_read(&mm->mmap_sem);
979 goto out_nolock;
980 }
981
982 /*
983 * __collapse_huge_page_swapin always returns with mmap_sem locked.
Ebru Akagunduz47f863e2016-07-26 15:26:43 -0700984 * If it fails, we release mmap_sem and jump out_nolock.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700985 * Continuing to collapse causes inconsistency.
986 */
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700987 if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700988 mem_cgroup_cancel_charge(new_page, memcg, true);
989 up_read(&mm->mmap_sem);
990 goto out_nolock;
991 }
992
993 up_read(&mm->mmap_sem);
994 /*
995 * Prevent all access to pagetables with the exception of
996 * gup_fast later handled by the ptep_clear_flush and the VM
997 * handled by the anon_vma lock + PG_lock.
998 */
999 down_write(&mm->mmap_sem);
Kirill A. Shutemovc131f752016-09-19 14:44:01 -07001000 result = hugepage_vma_revalidate(mm, address, &vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001001 if (result)
1002 goto out;
1003 /* check if the pmd is still valid */
1004 if (mm_find_pmd(mm, address) != pmd)
1005 goto out;
1006
1007 anon_vma_lock_write(vma->anon_vma);
1008
1009 pte = pte_offset_map(pmd, address);
1010 pte_ptl = pte_lockptr(mm, pmd);
1011
1012 mmun_start = address;
1013 mmun_end = address + HPAGE_PMD_SIZE;
1014 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1015 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1016 /*
1017 * After this gup_fast can't run anymore. This also removes
1018 * any huge TLB entry from the CPU so we won't allow
1019 * huge and small TLB entries for the same virtual address
1020 * to avoid the risk of CPU bugs in that area.
1021 */
1022 _pmd = pmdp_collapse_flush(vma, address, pmd);
1023 spin_unlock(pmd_ptl);
1024 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1025
1026 spin_lock(pte_ptl);
1027 isolated = __collapse_huge_page_isolate(vma, address, pte);
1028 spin_unlock(pte_ptl);
1029
1030 if (unlikely(!isolated)) {
1031 pte_unmap(pte);
1032 spin_lock(pmd_ptl);
1033 BUG_ON(!pmd_none(*pmd));
1034 /*
1035 * We can only use set_pmd_at when establishing
1036 * hugepmds and never for establishing regular pmds that
1037 * points to regular pagetables. Use pmd_populate for that
1038 */
1039 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1040 spin_unlock(pmd_ptl);
1041 anon_vma_unlock_write(vma->anon_vma);
1042 result = SCAN_FAIL;
1043 goto out;
1044 }
1045
1046 /*
1047 * All pages are isolated and locked so anon_vma rmap
1048 * can't run anymore.
1049 */
1050 anon_vma_unlock_write(vma->anon_vma);
1051
1052 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
1053 pte_unmap(pte);
1054 __SetPageUptodate(new_page);
1055 pgtable = pmd_pgtable(_pmd);
1056
1057 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1058 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1059
1060 /*
1061 * spin_lock() below is not the equivalent of smp_wmb(), so
1062 * this is needed to avoid the copy_huge_page writes to become
1063 * visible after the set_pmd_at() write.
1064 */
1065 smp_wmb();
1066
1067 spin_lock(pmd_ptl);
1068 BUG_ON(!pmd_none(*pmd));
1069 page_add_new_anon_rmap(new_page, vma, address, true);
1070 mem_cgroup_commit_charge(new_page, memcg, false, true);
1071 lru_cache_add_active_or_unevictable(new_page, vma);
1072 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1073 set_pmd_at(mm, address, pmd, _pmd);
1074 update_mmu_cache_pmd(vma, address, pmd);
1075 spin_unlock(pmd_ptl);
1076
1077 *hpage = NULL;
1078
1079 khugepaged_pages_collapsed++;
1080 result = SCAN_SUCCEED;
1081out_up_write:
1082 up_write(&mm->mmap_sem);
1083out_nolock:
1084 trace_mm_collapse_huge_page(mm, isolated, result);
1085 return;
1086out:
1087 mem_cgroup_cancel_charge(new_page, memcg, true);
1088 goto out_up_write;
1089}
1090
1091static int khugepaged_scan_pmd(struct mm_struct *mm,
1092 struct vm_area_struct *vma,
1093 unsigned long address,
1094 struct page **hpage)
1095{
1096 pmd_t *pmd;
1097 pte_t *pte, *_pte;
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001098 int ret = 0, none_or_zero = 0, result = 0, referenced = 0;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001099 struct page *page = NULL;
1100 unsigned long _address;
1101 spinlock_t *ptl;
1102 int node = NUMA_NO_NODE, unmapped = 0;
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001103 bool writable = false;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001104
1105 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1106
1107 pmd = mm_find_pmd(mm, address);
1108 if (!pmd) {
1109 result = SCAN_PMD_NULL;
1110 goto out;
1111 }
1112
1113 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1114 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1115 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1116 _pte++, _address += PAGE_SIZE) {
1117 pte_t pteval = *_pte;
1118 if (is_swap_pte(pteval)) {
1119 if (++unmapped <= khugepaged_max_ptes_swap) {
1120 continue;
1121 } else {
1122 result = SCAN_EXCEED_SWAP_PTE;
1123 goto out_unmap;
1124 }
1125 }
1126 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1127 if (!userfaultfd_armed(vma) &&
1128 ++none_or_zero <= khugepaged_max_ptes_none) {
1129 continue;
1130 } else {
1131 result = SCAN_EXCEED_NONE_PTE;
1132 goto out_unmap;
1133 }
1134 }
1135 if (!pte_present(pteval)) {
1136 result = SCAN_PTE_NON_PRESENT;
1137 goto out_unmap;
1138 }
1139 if (pte_write(pteval))
1140 writable = true;
1141
1142 page = vm_normal_page(vma, _address, pteval);
1143 if (unlikely(!page)) {
1144 result = SCAN_PAGE_NULL;
1145 goto out_unmap;
1146 }
1147
1148 /* TODO: teach khugepaged to collapse THP mapped with pte */
1149 if (PageCompound(page)) {
1150 result = SCAN_PAGE_COMPOUND;
1151 goto out_unmap;
1152 }
1153
1154 /*
1155 * Record which node the original page is from and save this
1156 * information to khugepaged_node_load[].
1157 * Khupaged will allocate hugepage from the node has the max
1158 * hit record.
1159 */
1160 node = page_to_nid(page);
1161 if (khugepaged_scan_abort(node)) {
1162 result = SCAN_SCAN_ABORT;
1163 goto out_unmap;
1164 }
1165 khugepaged_node_load[node]++;
1166 if (!PageLRU(page)) {
1167 result = SCAN_PAGE_LRU;
1168 goto out_unmap;
1169 }
1170 if (PageLocked(page)) {
1171 result = SCAN_PAGE_LOCK;
1172 goto out_unmap;
1173 }
1174 if (!PageAnon(page)) {
1175 result = SCAN_PAGE_ANON;
1176 goto out_unmap;
1177 }
1178
1179 /*
1180 * cannot use mapcount: can't collapse if there's a gup pin.
1181 * The page must only be referenced by the scanned process
1182 * and page swap cache.
1183 */
1184 if (page_count(page) != 1 + !!PageSwapCache(page)) {
1185 result = SCAN_PAGE_COUNT;
1186 goto out_unmap;
1187 }
1188 if (pte_young(pteval) ||
1189 page_is_young(page) || PageReferenced(page) ||
1190 mmu_notifier_test_young(vma->vm_mm, address))
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001191 referenced++;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001192 }
1193 if (writable) {
1194 if (referenced) {
1195 result = SCAN_SUCCEED;
1196 ret = 1;
1197 } else {
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001198 result = SCAN_LACK_REFERENCED_PAGE;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001199 }
1200 } else {
1201 result = SCAN_PAGE_RO;
1202 }
1203out_unmap:
1204 pte_unmap_unlock(pte, ptl);
1205 if (ret) {
1206 node = khugepaged_find_target_node();
1207 /* collapse_huge_page will return with the mmap_sem released */
Kirill A. Shutemovc131f752016-09-19 14:44:01 -07001208 collapse_huge_page(mm, address, hpage, node, referenced);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001209 }
1210out:
1211 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1212 none_or_zero, result, unmapped);
1213 return ret;
1214}
1215
1216static void collect_mm_slot(struct mm_slot *mm_slot)
1217{
1218 struct mm_struct *mm = mm_slot->mm;
1219
1220 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
1221
1222 if (khugepaged_test_exit(mm)) {
1223 /* free mm_slot */
1224 hash_del(&mm_slot->hash);
1225 list_del(&mm_slot->mm_node);
1226
1227 /*
1228 * Not strictly needed because the mm exited already.
1229 *
1230 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1231 */
1232
1233 /* khugepaged_mm_lock actually not necessary for the below */
1234 free_mm_slot(mm_slot);
1235 mmdrop(mm);
1236 }
1237}
1238
Kirill A. Shutemove496cf32016-07-26 15:26:35 -07001239#if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001240static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1241{
1242 struct vm_area_struct *vma;
1243 unsigned long addr;
1244 pmd_t *pmd, _pmd;
Aneesh Kumar K.V953c66c2016-12-12 16:44:32 -08001245 bool deposited = false;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001246
1247 i_mmap_lock_write(mapping);
1248 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1249 /* probably overkill */
1250 if (vma->anon_vma)
1251 continue;
1252 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1253 if (addr & ~HPAGE_PMD_MASK)
1254 continue;
1255 if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1256 continue;
1257 pmd = mm_find_pmd(vma->vm_mm, addr);
1258 if (!pmd)
1259 continue;
1260 /*
1261 * We need exclusive mmap_sem to retract page table.
1262 * If trylock fails we would end up with pte-mapped THP after
1263 * re-fault. Not ideal, but it's more important to not disturb
1264 * the system too much.
1265 */
1266 if (down_write_trylock(&vma->vm_mm->mmap_sem)) {
1267 spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
1268 /* assume page table is clear */
1269 _pmd = pmdp_collapse_flush(vma, addr, pmd);
Aneesh Kumar K.V953c66c2016-12-12 16:44:32 -08001270 /*
1271 * now deposit the pgtable for arch that need it
1272 * otherwise free it.
1273 */
1274 if (arch_needs_pgtable_deposit()) {
1275 /*
1276 * The deposit should be visibile only after
1277 * collapse is seen by others.
1278 */
1279 smp_wmb();
1280 pgtable_trans_huge_deposit(vma->vm_mm, pmd,
1281 pmd_pgtable(_pmd));
1282 deposited = true;
1283 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001284 spin_unlock(ptl);
1285 up_write(&vma->vm_mm->mmap_sem);
Aneesh Kumar K.V953c66c2016-12-12 16:44:32 -08001286 if (!deposited) {
1287 atomic_long_dec(&vma->vm_mm->nr_ptes);
1288 pte_free(vma->vm_mm, pmd_pgtable(_pmd));
1289 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001290 }
1291 }
1292 i_mmap_unlock_write(mapping);
1293}
1294
1295/**
1296 * collapse_shmem - collapse small tmpfs/shmem pages into huge one.
1297 *
1298 * Basic scheme is simple, details are more complex:
1299 * - allocate and freeze a new huge page;
1300 * - scan over radix tree replacing old pages the new one
1301 * + swap in pages if necessary;
1302 * + fill in gaps;
1303 * + keep old pages around in case if rollback is required;
1304 * - if replacing succeed:
1305 * + copy data over;
1306 * + free old pages;
1307 * + unfreeze huge page;
1308 * - if replacing failed;
1309 * + put all pages back and unfreeze them;
1310 * + restore gaps in the radix-tree;
1311 * + free huge page;
1312 */
1313static void collapse_shmem(struct mm_struct *mm,
1314 struct address_space *mapping, pgoff_t start,
1315 struct page **hpage, int node)
1316{
1317 gfp_t gfp;
1318 struct page *page, *new_page, *tmp;
1319 struct mem_cgroup *memcg;
1320 pgoff_t index, end = start + HPAGE_PMD_NR;
1321 LIST_HEAD(pagelist);
1322 struct radix_tree_iter iter;
1323 void **slot;
1324 int nr_none = 0, result = SCAN_SUCCEED;
1325
1326 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1327
1328 /* Only allocate from the target node */
1329 gfp = alloc_hugepage_khugepaged_gfpmask() |
1330 __GFP_OTHER_NODE | __GFP_THISNODE;
1331
1332 new_page = khugepaged_alloc_page(hpage, gfp, node);
1333 if (!new_page) {
1334 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1335 goto out;
1336 }
1337
1338 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
1339 result = SCAN_CGROUP_CHARGE_FAIL;
1340 goto out;
1341 }
1342
1343 new_page->index = start;
1344 new_page->mapping = mapping;
1345 __SetPageSwapBacked(new_page);
1346 __SetPageLocked(new_page);
1347 BUG_ON(!page_ref_freeze(new_page, 1));
1348
1349
1350 /*
1351 * At this point the new_page is 'frozen' (page_count() is zero), locked
1352 * and not up-to-date. It's safe to insert it into radix tree, because
1353 * nobody would be able to map it or use it in other way until we
1354 * unfreeze it.
1355 */
1356
1357 index = start;
1358 spin_lock_irq(&mapping->tree_lock);
1359 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1360 int n = min(iter.index, end) - index;
1361
1362 /*
1363 * Handle holes in the radix tree: charge it from shmem and
1364 * insert relevant subpage of new_page into the radix-tree.
1365 */
1366 if (n && !shmem_charge(mapping->host, n)) {
1367 result = SCAN_FAIL;
1368 break;
1369 }
1370 nr_none += n;
1371 for (; index < min(iter.index, end); index++) {
1372 radix_tree_insert(&mapping->page_tree, index,
1373 new_page + (index % HPAGE_PMD_NR));
1374 }
1375
1376 /* We are done. */
1377 if (index >= end)
1378 break;
1379
1380 page = radix_tree_deref_slot_protected(slot,
1381 &mapping->tree_lock);
1382 if (radix_tree_exceptional_entry(page) || !PageUptodate(page)) {
1383 spin_unlock_irq(&mapping->tree_lock);
1384 /* swap in or instantiate fallocated page */
1385 if (shmem_getpage(mapping->host, index, &page,
1386 SGP_NOHUGE)) {
1387 result = SCAN_FAIL;
1388 goto tree_unlocked;
1389 }
1390 spin_lock_irq(&mapping->tree_lock);
1391 } else if (trylock_page(page)) {
1392 get_page(page);
1393 } else {
1394 result = SCAN_PAGE_LOCK;
1395 break;
1396 }
1397
1398 /*
1399 * The page must be locked, so we can drop the tree_lock
1400 * without racing with truncate.
1401 */
1402 VM_BUG_ON_PAGE(!PageLocked(page), page);
1403 VM_BUG_ON_PAGE(!PageUptodate(page), page);
1404 VM_BUG_ON_PAGE(PageTransCompound(page), page);
1405
1406 if (page_mapping(page) != mapping) {
1407 result = SCAN_TRUNCATED;
1408 goto out_unlock;
1409 }
1410 spin_unlock_irq(&mapping->tree_lock);
1411
1412 if (isolate_lru_page(page)) {
1413 result = SCAN_DEL_PAGE_LRU;
1414 goto out_isolate_failed;
1415 }
1416
1417 if (page_mapped(page))
1418 unmap_mapping_range(mapping, index << PAGE_SHIFT,
1419 PAGE_SIZE, 0);
1420
1421 spin_lock_irq(&mapping->tree_lock);
1422
Johannes Weiner91a45f72016-12-12 16:43:32 -08001423 slot = radix_tree_lookup_slot(&mapping->page_tree, index);
1424 VM_BUG_ON_PAGE(page != radix_tree_deref_slot_protected(slot,
1425 &mapping->tree_lock), page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001426 VM_BUG_ON_PAGE(page_mapped(page), page);
1427
1428 /*
1429 * The page is expected to have page_count() == 3:
1430 * - we hold a pin on it;
1431 * - one reference from radix tree;
1432 * - one from isolate_lru_page;
1433 */
1434 if (!page_ref_freeze(page, 3)) {
1435 result = SCAN_PAGE_COUNT;
1436 goto out_lru;
1437 }
1438
1439 /*
1440 * Add the page to the list to be able to undo the collapse if
1441 * something go wrong.
1442 */
1443 list_add_tail(&page->lru, &pagelist);
1444
1445 /* Finally, replace with the new page. */
Johannes Weiner6d75f362016-12-12 16:43:43 -08001446 radix_tree_replace_slot(&mapping->page_tree, slot,
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001447 new_page + (index % HPAGE_PMD_NR));
1448
Matthew Wilcox148deab2016-12-14 15:08:49 -08001449 slot = radix_tree_iter_resume(slot, &iter);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001450 index++;
1451 continue;
1452out_lru:
1453 spin_unlock_irq(&mapping->tree_lock);
1454 putback_lru_page(page);
1455out_isolate_failed:
1456 unlock_page(page);
1457 put_page(page);
1458 goto tree_unlocked;
1459out_unlock:
1460 unlock_page(page);
1461 put_page(page);
1462 break;
1463 }
1464
1465 /*
1466 * Handle hole in radix tree at the end of the range.
1467 * This code only triggers if there's nothing in radix tree
1468 * beyond 'end'.
1469 */
1470 if (result == SCAN_SUCCEED && index < end) {
1471 int n = end - index;
1472
1473 if (!shmem_charge(mapping->host, n)) {
1474 result = SCAN_FAIL;
1475 goto tree_locked;
1476 }
1477
1478 for (; index < end; index++) {
1479 radix_tree_insert(&mapping->page_tree, index,
1480 new_page + (index % HPAGE_PMD_NR));
1481 }
1482 nr_none += n;
1483 }
1484
1485tree_locked:
1486 spin_unlock_irq(&mapping->tree_lock);
1487tree_unlocked:
1488
1489 if (result == SCAN_SUCCEED) {
1490 unsigned long flags;
1491 struct zone *zone = page_zone(new_page);
1492
1493 /*
1494 * Replacing old pages with new one has succeed, now we need to
1495 * copy the content and free old pages.
1496 */
1497 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
1498 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1499 page);
1500 list_del(&page->lru);
1501 unlock_page(page);
1502 page_ref_unfreeze(page, 1);
1503 page->mapping = NULL;
1504 ClearPageActive(page);
1505 ClearPageUnevictable(page);
1506 put_page(page);
1507 }
1508
1509 local_irq_save(flags);
Mel Gorman11fb9982016-07-28 15:46:20 -07001510 __inc_node_page_state(new_page, NR_SHMEM_THPS);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001511 if (nr_none) {
Mel Gorman11fb9982016-07-28 15:46:20 -07001512 __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
1513 __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001514 }
1515 local_irq_restore(flags);
1516
1517 /*
1518 * Remove pte page tables, so we can re-faulti
1519 * the page as huge.
1520 */
1521 retract_page_tables(mapping, start);
1522
1523 /* Everything is ready, let's unfreeze the new_page */
1524 set_page_dirty(new_page);
1525 SetPageUptodate(new_page);
1526 page_ref_unfreeze(new_page, HPAGE_PMD_NR);
1527 mem_cgroup_commit_charge(new_page, memcg, false, true);
1528 lru_cache_add_anon(new_page);
1529 unlock_page(new_page);
1530
1531 *hpage = NULL;
1532 } else {
1533 /* Something went wrong: rollback changes to the radix-tree */
1534 shmem_uncharge(mapping->host, nr_none);
1535 spin_lock_irq(&mapping->tree_lock);
1536 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter,
1537 start) {
1538 if (iter.index >= end)
1539 break;
1540 page = list_first_entry_or_null(&pagelist,
1541 struct page, lru);
1542 if (!page || iter.index < page->index) {
1543 if (!nr_none)
1544 break;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001545 nr_none--;
Johannes Weiner59749e62016-12-12 16:43:35 -08001546 /* Put holes back where they were */
1547 radix_tree_delete(&mapping->page_tree,
1548 iter.index);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001549 continue;
1550 }
1551
1552 VM_BUG_ON_PAGE(page->index != iter.index, page);
1553
1554 /* Unfreeze the page. */
1555 list_del(&page->lru);
1556 page_ref_unfreeze(page, 2);
Johannes Weiner6d75f362016-12-12 16:43:43 -08001557 radix_tree_replace_slot(&mapping->page_tree,
1558 slot, page);
Matthew Wilcox148deab2016-12-14 15:08:49 -08001559 slot = radix_tree_iter_resume(slot, &iter);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001560 spin_unlock_irq(&mapping->tree_lock);
1561 putback_lru_page(page);
1562 unlock_page(page);
1563 spin_lock_irq(&mapping->tree_lock);
1564 }
1565 VM_BUG_ON(nr_none);
1566 spin_unlock_irq(&mapping->tree_lock);
1567
1568 /* Unfreeze new_page, caller would take care about freeing it */
1569 page_ref_unfreeze(new_page, 1);
1570 mem_cgroup_cancel_charge(new_page, memcg, true);
1571 unlock_page(new_page);
1572 new_page->mapping = NULL;
1573 }
1574out:
1575 VM_BUG_ON(!list_empty(&pagelist));
1576 /* TODO: tracepoints */
1577}
1578
1579static void khugepaged_scan_shmem(struct mm_struct *mm,
1580 struct address_space *mapping,
1581 pgoff_t start, struct page **hpage)
1582{
1583 struct page *page = NULL;
1584 struct radix_tree_iter iter;
1585 void **slot;
1586 int present, swap;
1587 int node = NUMA_NO_NODE;
1588 int result = SCAN_SUCCEED;
1589
1590 present = 0;
1591 swap = 0;
1592 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1593 rcu_read_lock();
1594 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1595 if (iter.index >= start + HPAGE_PMD_NR)
1596 break;
1597
1598 page = radix_tree_deref_slot(slot);
1599 if (radix_tree_deref_retry(page)) {
1600 slot = radix_tree_iter_retry(&iter);
1601 continue;
1602 }
1603
1604 if (radix_tree_exception(page)) {
1605 if (++swap > khugepaged_max_ptes_swap) {
1606 result = SCAN_EXCEED_SWAP_PTE;
1607 break;
1608 }
1609 continue;
1610 }
1611
1612 if (PageTransCompound(page)) {
1613 result = SCAN_PAGE_COMPOUND;
1614 break;
1615 }
1616
1617 node = page_to_nid(page);
1618 if (khugepaged_scan_abort(node)) {
1619 result = SCAN_SCAN_ABORT;
1620 break;
1621 }
1622 khugepaged_node_load[node]++;
1623
1624 if (!PageLRU(page)) {
1625 result = SCAN_PAGE_LRU;
1626 break;
1627 }
1628
1629 if (page_count(page) != 1 + page_mapcount(page)) {
1630 result = SCAN_PAGE_COUNT;
1631 break;
1632 }
1633
1634 /*
1635 * We probably should check if the page is referenced here, but
1636 * nobody would transfer pte_young() to PageReferenced() for us.
1637 * And rmap walk here is just too costly...
1638 */
1639
1640 present++;
1641
1642 if (need_resched()) {
Matthew Wilcox148deab2016-12-14 15:08:49 -08001643 slot = radix_tree_iter_resume(slot, &iter);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001644 cond_resched_rcu();
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001645 }
1646 }
1647 rcu_read_unlock();
1648
1649 if (result == SCAN_SUCCEED) {
1650 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
1651 result = SCAN_EXCEED_NONE_PTE;
1652 } else {
1653 node = khugepaged_find_target_node();
1654 collapse_shmem(mm, mapping, start, hpage, node);
1655 }
1656 }
1657
1658 /* TODO: tracepoints */
1659}
1660#else
1661static void khugepaged_scan_shmem(struct mm_struct *mm,
1662 struct address_space *mapping,
1663 pgoff_t start, struct page **hpage)
1664{
1665 BUILD_BUG();
1666}
1667#endif
1668
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001669static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
1670 struct page **hpage)
1671 __releases(&khugepaged_mm_lock)
1672 __acquires(&khugepaged_mm_lock)
1673{
1674 struct mm_slot *mm_slot;
1675 struct mm_struct *mm;
1676 struct vm_area_struct *vma;
1677 int progress = 0;
1678
1679 VM_BUG_ON(!pages);
1680 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
1681
1682 if (khugepaged_scan.mm_slot)
1683 mm_slot = khugepaged_scan.mm_slot;
1684 else {
1685 mm_slot = list_entry(khugepaged_scan.mm_head.next,
1686 struct mm_slot, mm_node);
1687 khugepaged_scan.address = 0;
1688 khugepaged_scan.mm_slot = mm_slot;
1689 }
1690 spin_unlock(&khugepaged_mm_lock);
1691
1692 mm = mm_slot->mm;
1693 down_read(&mm->mmap_sem);
1694 if (unlikely(khugepaged_test_exit(mm)))
1695 vma = NULL;
1696 else
1697 vma = find_vma(mm, khugepaged_scan.address);
1698
1699 progress++;
1700 for (; vma; vma = vma->vm_next) {
1701 unsigned long hstart, hend;
1702
1703 cond_resched();
1704 if (unlikely(khugepaged_test_exit(mm))) {
1705 progress++;
1706 break;
1707 }
1708 if (!hugepage_vma_check(vma)) {
1709skip:
1710 progress++;
1711 continue;
1712 }
1713 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1714 hend = vma->vm_end & HPAGE_PMD_MASK;
1715 if (hstart >= hend)
1716 goto skip;
1717 if (khugepaged_scan.address > hend)
1718 goto skip;
1719 if (khugepaged_scan.address < hstart)
1720 khugepaged_scan.address = hstart;
1721 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
1722
1723 while (khugepaged_scan.address < hend) {
1724 int ret;
1725 cond_resched();
1726 if (unlikely(khugepaged_test_exit(mm)))
1727 goto breakouterloop;
1728
1729 VM_BUG_ON(khugepaged_scan.address < hstart ||
1730 khugepaged_scan.address + HPAGE_PMD_SIZE >
1731 hend);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001732 if (shmem_file(vma->vm_file)) {
Kirill A. Shutemove496cf32016-07-26 15:26:35 -07001733 struct file *file;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001734 pgoff_t pgoff = linear_page_index(vma,
1735 khugepaged_scan.address);
Kirill A. Shutemove496cf32016-07-26 15:26:35 -07001736 if (!shmem_huge_enabled(vma))
1737 goto skip;
1738 file = get_file(vma->vm_file);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001739 up_read(&mm->mmap_sem);
1740 ret = 1;
1741 khugepaged_scan_shmem(mm, file->f_mapping,
1742 pgoff, hpage);
1743 fput(file);
1744 } else {
1745 ret = khugepaged_scan_pmd(mm, vma,
1746 khugepaged_scan.address,
1747 hpage);
1748 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001749 /* move to next address */
1750 khugepaged_scan.address += HPAGE_PMD_SIZE;
1751 progress += HPAGE_PMD_NR;
1752 if (ret)
1753 /* we released mmap_sem so break loop */
1754 goto breakouterloop_mmap_sem;
1755 if (progress >= pages)
1756 goto breakouterloop;
1757 }
1758 }
1759breakouterloop:
1760 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
1761breakouterloop_mmap_sem:
1762
1763 spin_lock(&khugepaged_mm_lock);
1764 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
1765 /*
1766 * Release the current mm_slot if this mm is about to die, or
1767 * if we scanned all vmas of this mm.
1768 */
1769 if (khugepaged_test_exit(mm) || !vma) {
1770 /*
1771 * Make sure that if mm_users is reaching zero while
1772 * khugepaged runs here, khugepaged_exit will find
1773 * mm_slot not pointing to the exiting mm.
1774 */
1775 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
1776 khugepaged_scan.mm_slot = list_entry(
1777 mm_slot->mm_node.next,
1778 struct mm_slot, mm_node);
1779 khugepaged_scan.address = 0;
1780 } else {
1781 khugepaged_scan.mm_slot = NULL;
1782 khugepaged_full_scans++;
1783 }
1784
1785 collect_mm_slot(mm_slot);
1786 }
1787
1788 return progress;
1789}
1790
1791static int khugepaged_has_work(void)
1792{
1793 return !list_empty(&khugepaged_scan.mm_head) &&
1794 khugepaged_enabled();
1795}
1796
1797static int khugepaged_wait_event(void)
1798{
1799 return !list_empty(&khugepaged_scan.mm_head) ||
1800 kthread_should_stop();
1801}
1802
1803static void khugepaged_do_scan(void)
1804{
1805 struct page *hpage = NULL;
1806 unsigned int progress = 0, pass_through_head = 0;
1807 unsigned int pages = khugepaged_pages_to_scan;
1808 bool wait = true;
1809
1810 barrier(); /* write khugepaged_pages_to_scan to local stack */
1811
1812 while (progress < pages) {
1813 if (!khugepaged_prealloc_page(&hpage, &wait))
1814 break;
1815
1816 cond_resched();
1817
1818 if (unlikely(kthread_should_stop() || try_to_freeze()))
1819 break;
1820
1821 spin_lock(&khugepaged_mm_lock);
1822 if (!khugepaged_scan.mm_slot)
1823 pass_through_head++;
1824 if (khugepaged_has_work() &&
1825 pass_through_head < 2)
1826 progress += khugepaged_scan_mm_slot(pages - progress,
1827 &hpage);
1828 else
1829 progress = pages;
1830 spin_unlock(&khugepaged_mm_lock);
1831 }
1832
1833 if (!IS_ERR_OR_NULL(hpage))
1834 put_page(hpage);
1835}
1836
1837static bool khugepaged_should_wakeup(void)
1838{
1839 return kthread_should_stop() ||
1840 time_after_eq(jiffies, khugepaged_sleep_expire);
1841}
1842
1843static void khugepaged_wait_work(void)
1844{
1845 if (khugepaged_has_work()) {
1846 const unsigned long scan_sleep_jiffies =
1847 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
1848
1849 if (!scan_sleep_jiffies)
1850 return;
1851
1852 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
1853 wait_event_freezable_timeout(khugepaged_wait,
1854 khugepaged_should_wakeup(),
1855 scan_sleep_jiffies);
1856 return;
1857 }
1858
1859 if (khugepaged_enabled())
1860 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
1861}
1862
1863static int khugepaged(void *none)
1864{
1865 struct mm_slot *mm_slot;
1866
1867 set_freezable();
1868 set_user_nice(current, MAX_NICE);
1869
1870 while (!kthread_should_stop()) {
1871 khugepaged_do_scan();
1872 khugepaged_wait_work();
1873 }
1874
1875 spin_lock(&khugepaged_mm_lock);
1876 mm_slot = khugepaged_scan.mm_slot;
1877 khugepaged_scan.mm_slot = NULL;
1878 if (mm_slot)
1879 collect_mm_slot(mm_slot);
1880 spin_unlock(&khugepaged_mm_lock);
1881 return 0;
1882}
1883
1884static void set_recommended_min_free_kbytes(void)
1885{
1886 struct zone *zone;
1887 int nr_zones = 0;
1888 unsigned long recommended_min;
1889
1890 for_each_populated_zone(zone)
1891 nr_zones++;
1892
1893 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
1894 recommended_min = pageblock_nr_pages * nr_zones * 2;
1895
1896 /*
1897 * Make sure that on average at least two pageblocks are almost free
1898 * of another type, one for a migratetype to fall back to and a
1899 * second to avoid subsequent fallbacks of other types There are 3
1900 * MIGRATE_TYPES we care about.
1901 */
1902 recommended_min += pageblock_nr_pages * nr_zones *
1903 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
1904
1905 /* don't ever allow to reserve more than 5% of the lowmem */
1906 recommended_min = min(recommended_min,
1907 (unsigned long) nr_free_buffer_pages() / 20);
1908 recommended_min <<= (PAGE_SHIFT-10);
1909
1910 if (recommended_min > min_free_kbytes) {
1911 if (user_min_free_kbytes >= 0)
1912 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
1913 min_free_kbytes, recommended_min);
1914
1915 min_free_kbytes = recommended_min;
1916 }
1917 setup_per_zone_wmarks();
1918}
1919
1920int start_stop_khugepaged(void)
1921{
1922 static struct task_struct *khugepaged_thread __read_mostly;
1923 static DEFINE_MUTEX(khugepaged_mutex);
1924 int err = 0;
1925
1926 mutex_lock(&khugepaged_mutex);
1927 if (khugepaged_enabled()) {
1928 if (!khugepaged_thread)
1929 khugepaged_thread = kthread_run(khugepaged, NULL,
1930 "khugepaged");
1931 if (IS_ERR(khugepaged_thread)) {
1932 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
1933 err = PTR_ERR(khugepaged_thread);
1934 khugepaged_thread = NULL;
1935 goto fail;
1936 }
1937
1938 if (!list_empty(&khugepaged_scan.mm_head))
1939 wake_up_interruptible(&khugepaged_wait);
1940
1941 set_recommended_min_free_kbytes();
1942 } else if (khugepaged_thread) {
1943 kthread_stop(khugepaged_thread);
1944 khugepaged_thread = NULL;
1945 }
1946fail:
1947 mutex_unlock(&khugepaged_mutex);
1948 return err;
1949}