blob: 763711121ef52be9df6f7b9892caf4c4cc1779e5 [file] [log] [blame]
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001/*
2 * Copyright (C) 2009 Red Hat, Inc.
3 *
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
6 */
7
8#include <linux/mm.h>
9#include <linux/sched.h>
10#include <linux/highmem.h>
11#include <linux/hugetlb.h>
12#include <linux/mmu_notifier.h>
13#include <linux/rmap.h>
14#include <linux/swap.h>
Andrea Arcangeliba761492011-01-13 15:46:58 -080015#include <linux/mm_inline.h>
16#include <linux/kthread.h>
17#include <linux/khugepaged.h>
Andrea Arcangeli878aee72011-01-13 15:47:10 -080018#include <linux/freezer.h>
Andrea Arcangelia664b2d2011-01-13 15:47:17 -080019#include <linux/mman.h>
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -080020#include <asm/tlb.h>
21#include <asm/pgalloc.h>
22#include "internal.h"
23
Andrea Arcangeliba761492011-01-13 15:46:58 -080024/*
25 * By default transparent hugepage support is enabled for all mappings
26 * and khugepaged scans all mappings. Defrag is only invoked by
27 * khugepaged hugepage allocations and by page faults inside
28 * MADV_HUGEPAGE regions to avoid the risk of slowing down short lived
29 * allocations.
30 */
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -080031unsigned long transparent_hugepage_flags __read_mostly =
Andrea Arcangeli13ece882011-01-13 15:47:07 -080032#ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
Andrea Arcangeliba761492011-01-13 15:46:58 -080033 (1<<TRANSPARENT_HUGEPAGE_FLAG)|
Andrea Arcangeli13ece882011-01-13 15:47:07 -080034#endif
35#ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
36 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
37#endif
Andrea Arcangelid39d33c2011-01-13 15:47:05 -080038 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
Andrea Arcangeliba761492011-01-13 15:46:58 -080039 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
40
41/* default scan 8*512 pte (or vmas) every 30 second */
42static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
43static unsigned int khugepaged_pages_collapsed;
44static unsigned int khugepaged_full_scans;
45static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
46/* during fragmentation poll the hugepage allocator once every minute */
47static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
48static struct task_struct *khugepaged_thread __read_mostly;
49static DEFINE_MUTEX(khugepaged_mutex);
50static DEFINE_SPINLOCK(khugepaged_mm_lock);
51static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
52/*
53 * default collapse hugepages if there is at least one pte mapped like
54 * it would have happened if the vma was large enough during page
55 * fault.
56 */
57static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
58
59static int khugepaged(void *none);
60static int mm_slots_hash_init(void);
61static int khugepaged_slab_init(void);
62static void khugepaged_slab_free(void);
63
64#define MM_SLOTS_HASH_HEADS 1024
65static struct hlist_head *mm_slots_hash __read_mostly;
66static struct kmem_cache *mm_slot_cache __read_mostly;
67
68/**
69 * struct mm_slot - hash lookup from mm to mm_slot
70 * @hash: hash collision list
71 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
72 * @mm: the mm that this information is valid for
73 */
74struct mm_slot {
75 struct hlist_node hash;
76 struct list_head mm_node;
77 struct mm_struct *mm;
78};
79
80/**
81 * struct khugepaged_scan - cursor for scanning
82 * @mm_head: the head of the mm list to scan
83 * @mm_slot: the current mm_slot we are scanning
84 * @address: the next address inside that to be scanned
85 *
86 * There is only the one khugepaged_scan instance of this cursor structure.
87 */
88struct khugepaged_scan {
89 struct list_head mm_head;
90 struct mm_slot *mm_slot;
91 unsigned long address;
H Hartley Sweeten2f1da642011-10-31 17:09:25 -070092};
93static struct khugepaged_scan khugepaged_scan = {
Andrea Arcangeliba761492011-01-13 15:46:58 -080094 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
95};
96
Andrea Arcangelif0005652011-01-13 15:47:04 -080097
98static int set_recommended_min_free_kbytes(void)
99{
100 struct zone *zone;
101 int nr_zones = 0;
102 unsigned long recommended_min;
103 extern int min_free_kbytes;
104
105 if (!test_bit(TRANSPARENT_HUGEPAGE_FLAG,
106 &transparent_hugepage_flags) &&
107 !test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
108 &transparent_hugepage_flags))
109 return 0;
110
111 for_each_populated_zone(zone)
112 nr_zones++;
113
114 /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */
115 recommended_min = pageblock_nr_pages * nr_zones * 2;
116
117 /*
118 * Make sure that on average at least two pageblocks are almost free
119 * of another type, one for a migratetype to fall back to and a
120 * second to avoid subsequent fallbacks of other types There are 3
121 * MIGRATE_TYPES we care about.
122 */
123 recommended_min += pageblock_nr_pages * nr_zones *
124 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
125
126 /* don't ever allow to reserve more than 5% of the lowmem */
127 recommended_min = min(recommended_min,
128 (unsigned long) nr_free_buffer_pages() / 20);
129 recommended_min <<= (PAGE_SHIFT-10);
130
131 if (recommended_min > min_free_kbytes)
132 min_free_kbytes = recommended_min;
133 setup_per_zone_wmarks();
134 return 0;
135}
136late_initcall(set_recommended_min_free_kbytes);
137
Andrea Arcangeliba761492011-01-13 15:46:58 -0800138static int start_khugepaged(void)
139{
140 int err = 0;
141 if (khugepaged_enabled()) {
142 int wakeup;
143 if (unlikely(!mm_slot_cache || !mm_slots_hash)) {
144 err = -ENOMEM;
145 goto out;
146 }
147 mutex_lock(&khugepaged_mutex);
148 if (!khugepaged_thread)
149 khugepaged_thread = kthread_run(khugepaged, NULL,
150 "khugepaged");
151 if (unlikely(IS_ERR(khugepaged_thread))) {
152 printk(KERN_ERR
153 "khugepaged: kthread_run(khugepaged) failed\n");
154 err = PTR_ERR(khugepaged_thread);
155 khugepaged_thread = NULL;
156 }
157 wakeup = !list_empty(&khugepaged_scan.mm_head);
158 mutex_unlock(&khugepaged_mutex);
159 if (wakeup)
160 wake_up_interruptible(&khugepaged_wait);
Andrea Arcangelif0005652011-01-13 15:47:04 -0800161
162 set_recommended_min_free_kbytes();
Andrea Arcangeliba761492011-01-13 15:46:58 -0800163 } else
164 /* wakeup to exit */
165 wake_up_interruptible(&khugepaged_wait);
166out:
167 return err;
168}
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800169
170#ifdef CONFIG_SYSFS
Andrea Arcangeliba761492011-01-13 15:46:58 -0800171
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800172static ssize_t double_flag_show(struct kobject *kobj,
173 struct kobj_attribute *attr, char *buf,
174 enum transparent_hugepage_flag enabled,
175 enum transparent_hugepage_flag req_madv)
176{
177 if (test_bit(enabled, &transparent_hugepage_flags)) {
178 VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags));
179 return sprintf(buf, "[always] madvise never\n");
180 } else if (test_bit(req_madv, &transparent_hugepage_flags))
181 return sprintf(buf, "always [madvise] never\n");
182 else
183 return sprintf(buf, "always madvise [never]\n");
184}
185static ssize_t double_flag_store(struct kobject *kobj,
186 struct kobj_attribute *attr,
187 const char *buf, size_t count,
188 enum transparent_hugepage_flag enabled,
189 enum transparent_hugepage_flag req_madv)
190{
191 if (!memcmp("always", buf,
192 min(sizeof("always")-1, count))) {
193 set_bit(enabled, &transparent_hugepage_flags);
194 clear_bit(req_madv, &transparent_hugepage_flags);
195 } else if (!memcmp("madvise", buf,
196 min(sizeof("madvise")-1, count))) {
197 clear_bit(enabled, &transparent_hugepage_flags);
198 set_bit(req_madv, &transparent_hugepage_flags);
199 } else if (!memcmp("never", buf,
200 min(sizeof("never")-1, count))) {
201 clear_bit(enabled, &transparent_hugepage_flags);
202 clear_bit(req_madv, &transparent_hugepage_flags);
203 } else
204 return -EINVAL;
205
206 return count;
207}
208
209static ssize_t enabled_show(struct kobject *kobj,
210 struct kobj_attribute *attr, char *buf)
211{
212 return double_flag_show(kobj, attr, buf,
213 TRANSPARENT_HUGEPAGE_FLAG,
214 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
215}
216static ssize_t enabled_store(struct kobject *kobj,
217 struct kobj_attribute *attr,
218 const char *buf, size_t count)
219{
Andrea Arcangeliba761492011-01-13 15:46:58 -0800220 ssize_t ret;
221
222 ret = double_flag_store(kobj, attr, buf, count,
223 TRANSPARENT_HUGEPAGE_FLAG,
224 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
225
226 if (ret > 0) {
227 int err = start_khugepaged();
228 if (err)
229 ret = err;
230 }
231
Andrea Arcangelif0005652011-01-13 15:47:04 -0800232 if (ret > 0 &&
233 (test_bit(TRANSPARENT_HUGEPAGE_FLAG,
234 &transparent_hugepage_flags) ||
235 test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
236 &transparent_hugepage_flags)))
237 set_recommended_min_free_kbytes();
238
Andrea Arcangeliba761492011-01-13 15:46:58 -0800239 return ret;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800240}
241static struct kobj_attribute enabled_attr =
242 __ATTR(enabled, 0644, enabled_show, enabled_store);
243
244static ssize_t single_flag_show(struct kobject *kobj,
245 struct kobj_attribute *attr, char *buf,
246 enum transparent_hugepage_flag flag)
247{
Ben Hutchingse27e6152011-04-14 15:22:21 -0700248 return sprintf(buf, "%d\n",
249 !!test_bit(flag, &transparent_hugepage_flags));
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800250}
Ben Hutchingse27e6152011-04-14 15:22:21 -0700251
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800252static ssize_t single_flag_store(struct kobject *kobj,
253 struct kobj_attribute *attr,
254 const char *buf, size_t count,
255 enum transparent_hugepage_flag flag)
256{
Ben Hutchingse27e6152011-04-14 15:22:21 -0700257 unsigned long value;
258 int ret;
259
260 ret = kstrtoul(buf, 10, &value);
261 if (ret < 0)
262 return ret;
263 if (value > 1)
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800264 return -EINVAL;
265
Ben Hutchingse27e6152011-04-14 15:22:21 -0700266 if (value)
267 set_bit(flag, &transparent_hugepage_flags);
268 else
269 clear_bit(flag, &transparent_hugepage_flags);
270
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800271 return count;
272}
273
274/*
275 * Currently defrag only disables __GFP_NOWAIT for allocation. A blind
276 * __GFP_REPEAT is too aggressive, it's never worth swapping tons of
277 * memory just to allocate one more hugepage.
278 */
279static ssize_t defrag_show(struct kobject *kobj,
280 struct kobj_attribute *attr, char *buf)
281{
282 return double_flag_show(kobj, attr, buf,
283 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
284 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
285}
286static ssize_t defrag_store(struct kobject *kobj,
287 struct kobj_attribute *attr,
288 const char *buf, size_t count)
289{
290 return double_flag_store(kobj, attr, buf, count,
291 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
292 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
293}
294static struct kobj_attribute defrag_attr =
295 __ATTR(defrag, 0644, defrag_show, defrag_store);
296
297#ifdef CONFIG_DEBUG_VM
298static ssize_t debug_cow_show(struct kobject *kobj,
299 struct kobj_attribute *attr, char *buf)
300{
301 return single_flag_show(kobj, attr, buf,
302 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
303}
304static ssize_t debug_cow_store(struct kobject *kobj,
305 struct kobj_attribute *attr,
306 const char *buf, size_t count)
307{
308 return single_flag_store(kobj, attr, buf, count,
309 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
310}
311static struct kobj_attribute debug_cow_attr =
312 __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
313#endif /* CONFIG_DEBUG_VM */
314
315static struct attribute *hugepage_attr[] = {
316 &enabled_attr.attr,
317 &defrag_attr.attr,
318#ifdef CONFIG_DEBUG_VM
319 &debug_cow_attr.attr,
320#endif
321 NULL,
322};
323
324static struct attribute_group hugepage_attr_group = {
325 .attrs = hugepage_attr,
Andrea Arcangeliba761492011-01-13 15:46:58 -0800326};
327
328static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
329 struct kobj_attribute *attr,
330 char *buf)
331{
332 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
333}
334
335static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
336 struct kobj_attribute *attr,
337 const char *buf, size_t count)
338{
339 unsigned long msecs;
340 int err;
341
342 err = strict_strtoul(buf, 10, &msecs);
343 if (err || msecs > UINT_MAX)
344 return -EINVAL;
345
346 khugepaged_scan_sleep_millisecs = msecs;
347 wake_up_interruptible(&khugepaged_wait);
348
349 return count;
350}
351static struct kobj_attribute scan_sleep_millisecs_attr =
352 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
353 scan_sleep_millisecs_store);
354
355static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
356 struct kobj_attribute *attr,
357 char *buf)
358{
359 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
360}
361
362static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
363 struct kobj_attribute *attr,
364 const char *buf, size_t count)
365{
366 unsigned long msecs;
367 int err;
368
369 err = strict_strtoul(buf, 10, &msecs);
370 if (err || msecs > UINT_MAX)
371 return -EINVAL;
372
373 khugepaged_alloc_sleep_millisecs = msecs;
374 wake_up_interruptible(&khugepaged_wait);
375
376 return count;
377}
378static struct kobj_attribute alloc_sleep_millisecs_attr =
379 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
380 alloc_sleep_millisecs_store);
381
382static ssize_t pages_to_scan_show(struct kobject *kobj,
383 struct kobj_attribute *attr,
384 char *buf)
385{
386 return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
387}
388static ssize_t pages_to_scan_store(struct kobject *kobj,
389 struct kobj_attribute *attr,
390 const char *buf, size_t count)
391{
392 int err;
393 unsigned long pages;
394
395 err = strict_strtoul(buf, 10, &pages);
396 if (err || !pages || pages > UINT_MAX)
397 return -EINVAL;
398
399 khugepaged_pages_to_scan = pages;
400
401 return count;
402}
403static struct kobj_attribute pages_to_scan_attr =
404 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
405 pages_to_scan_store);
406
407static ssize_t pages_collapsed_show(struct kobject *kobj,
408 struct kobj_attribute *attr,
409 char *buf)
410{
411 return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
412}
413static struct kobj_attribute pages_collapsed_attr =
414 __ATTR_RO(pages_collapsed);
415
416static ssize_t full_scans_show(struct kobject *kobj,
417 struct kobj_attribute *attr,
418 char *buf)
419{
420 return sprintf(buf, "%u\n", khugepaged_full_scans);
421}
422static struct kobj_attribute full_scans_attr =
423 __ATTR_RO(full_scans);
424
425static ssize_t khugepaged_defrag_show(struct kobject *kobj,
426 struct kobj_attribute *attr, char *buf)
427{
428 return single_flag_show(kobj, attr, buf,
429 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
430}
431static ssize_t khugepaged_defrag_store(struct kobject *kobj,
432 struct kobj_attribute *attr,
433 const char *buf, size_t count)
434{
435 return single_flag_store(kobj, attr, buf, count,
436 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
437}
438static struct kobj_attribute khugepaged_defrag_attr =
439 __ATTR(defrag, 0644, khugepaged_defrag_show,
440 khugepaged_defrag_store);
441
442/*
443 * max_ptes_none controls if khugepaged should collapse hugepages over
444 * any unmapped ptes in turn potentially increasing the memory
445 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
446 * reduce the available free memory in the system as it
447 * runs. Increasing max_ptes_none will instead potentially reduce the
448 * free memory in the system during the khugepaged scan.
449 */
450static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
451 struct kobj_attribute *attr,
452 char *buf)
453{
454 return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
455}
456static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
457 struct kobj_attribute *attr,
458 const char *buf, size_t count)
459{
460 int err;
461 unsigned long max_ptes_none;
462
463 err = strict_strtoul(buf, 10, &max_ptes_none);
464 if (err || max_ptes_none > HPAGE_PMD_NR-1)
465 return -EINVAL;
466
467 khugepaged_max_ptes_none = max_ptes_none;
468
469 return count;
470}
471static struct kobj_attribute khugepaged_max_ptes_none_attr =
472 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
473 khugepaged_max_ptes_none_store);
474
475static struct attribute *khugepaged_attr[] = {
476 &khugepaged_defrag_attr.attr,
477 &khugepaged_max_ptes_none_attr.attr,
478 &pages_to_scan_attr.attr,
479 &pages_collapsed_attr.attr,
480 &full_scans_attr.attr,
481 &scan_sleep_millisecs_attr.attr,
482 &alloc_sleep_millisecs_attr.attr,
483 NULL,
484};
485
486static struct attribute_group khugepaged_attr_group = {
487 .attrs = khugepaged_attr,
488 .name = "khugepaged",
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800489};
Shaohua Li569e5592012-01-12 17:19:11 -0800490
491static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
492{
493 int err;
494
495 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
496 if (unlikely(!*hugepage_kobj)) {
497 printk(KERN_ERR "hugepage: failed kobject create\n");
498 return -ENOMEM;
499 }
500
501 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
502 if (err) {
503 printk(KERN_ERR "hugepage: failed register hugeage group\n");
504 goto delete_obj;
505 }
506
507 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
508 if (err) {
509 printk(KERN_ERR "hugepage: failed register hugeage group\n");
510 goto remove_hp_group;
511 }
512
513 return 0;
514
515remove_hp_group:
516 sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
517delete_obj:
518 kobject_put(*hugepage_kobj);
519 return err;
520}
521
522static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
523{
524 sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
525 sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
526 kobject_put(hugepage_kobj);
527}
528#else
529static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
530{
531 return 0;
532}
533
534static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
535{
536}
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800537#endif /* CONFIG_SYSFS */
538
539static int __init hugepage_init(void)
540{
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800541 int err;
Shaohua Li569e5592012-01-12 17:19:11 -0800542 struct kobject *hugepage_kobj;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800543
Andrea Arcangeli4b7167b2011-01-13 15:47:09 -0800544 if (!has_transparent_hugepage()) {
545 transparent_hugepage_flags = 0;
Shaohua Li569e5592012-01-12 17:19:11 -0800546 return -EINVAL;
Andrea Arcangeli4b7167b2011-01-13 15:47:09 -0800547 }
548
Shaohua Li569e5592012-01-12 17:19:11 -0800549 err = hugepage_init_sysfs(&hugepage_kobj);
550 if (err)
551 return err;
Andrea Arcangeliba761492011-01-13 15:46:58 -0800552
553 err = khugepaged_slab_init();
554 if (err)
555 goto out;
556
557 err = mm_slots_hash_init();
558 if (err) {
559 khugepaged_slab_free();
560 goto out;
561 }
562
Rik van Riel97562cd2011-01-13 15:47:12 -0800563 /*
564 * By default disable transparent hugepages on smaller systems,
565 * where the extra memory used could hurt more than TLB overhead
566 * is likely to save. The admin can still enable it through /sys.
567 */
568 if (totalram_pages < (512 << (20 - PAGE_SHIFT)))
569 transparent_hugepage_flags = 0;
570
Andrea Arcangeliba761492011-01-13 15:46:58 -0800571 start_khugepaged();
572
Andrea Arcangelif0005652011-01-13 15:47:04 -0800573 set_recommended_min_free_kbytes();
574
Shaohua Li569e5592012-01-12 17:19:11 -0800575 return 0;
Andrea Arcangeliba761492011-01-13 15:46:58 -0800576out:
Shaohua Li569e5592012-01-12 17:19:11 -0800577 hugepage_exit_sysfs(hugepage_kobj);
Andrea Arcangeliba761492011-01-13 15:46:58 -0800578 return err;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800579}
580module_init(hugepage_init)
581
582static int __init setup_transparent_hugepage(char *str)
583{
584 int ret = 0;
585 if (!str)
586 goto out;
587 if (!strcmp(str, "always")) {
588 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
589 &transparent_hugepage_flags);
590 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
591 &transparent_hugepage_flags);
592 ret = 1;
593 } else if (!strcmp(str, "madvise")) {
594 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
595 &transparent_hugepage_flags);
596 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
597 &transparent_hugepage_flags);
598 ret = 1;
599 } else if (!strcmp(str, "never")) {
600 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
601 &transparent_hugepage_flags);
602 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
603 &transparent_hugepage_flags);
604 ret = 1;
605 }
606out:
607 if (!ret)
608 printk(KERN_WARNING
609 "transparent_hugepage= cannot parse, ignored\n");
610 return ret;
611}
612__setup("transparent_hugepage=", setup_transparent_hugepage);
613
614static void prepare_pmd_huge_pte(pgtable_t pgtable,
615 struct mm_struct *mm)
616{
617 assert_spin_locked(&mm->page_table_lock);
618
619 /* FIFO */
620 if (!mm->pmd_huge_pte)
621 INIT_LIST_HEAD(&pgtable->lru);
622 else
623 list_add(&pgtable->lru, &mm->pmd_huge_pte->lru);
624 mm->pmd_huge_pte = pgtable;
625}
626
627static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
628{
629 if (likely(vma->vm_flags & VM_WRITE))
630 pmd = pmd_mkwrite(pmd);
631 return pmd;
632}
633
634static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
635 struct vm_area_struct *vma,
636 unsigned long haddr, pmd_t *pmd,
637 struct page *page)
638{
639 int ret = 0;
640 pgtable_t pgtable;
641
642 VM_BUG_ON(!PageCompound(page));
643 pgtable = pte_alloc_one(mm, haddr);
644 if (unlikely(!pgtable)) {
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -0800645 mem_cgroup_uncharge_page(page);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800646 put_page(page);
647 return VM_FAULT_OOM;
648 }
649
650 clear_huge_page(page, haddr, HPAGE_PMD_NR);
651 __SetPageUptodate(page);
652
653 spin_lock(&mm->page_table_lock);
654 if (unlikely(!pmd_none(*pmd))) {
655 spin_unlock(&mm->page_table_lock);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -0800656 mem_cgroup_uncharge_page(page);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800657 put_page(page);
658 pte_free(mm, pgtable);
659 } else {
660 pmd_t entry;
661 entry = mk_pmd(page, vma->vm_page_prot);
662 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
663 entry = pmd_mkhuge(entry);
664 /*
665 * The spinlocking to take the lru_lock inside
666 * page_add_new_anon_rmap() acts as a full memory
667 * barrier to be sure clear_huge_page writes become
668 * visible after the set_pmd_at() write.
669 */
670 page_add_new_anon_rmap(page, vma, haddr);
671 set_pmd_at(mm, haddr, pmd, entry);
672 prepare_pmd_huge_pte(pgtable, mm);
673 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
674 spin_unlock(&mm->page_table_lock);
675 }
676
677 return ret;
678}
679
Andi Kleencc5d4622011-03-22 16:33:13 -0700680static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -0800681{
Andi Kleencc5d4622011-03-22 16:33:13 -0700682 return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -0800683}
684
685static inline struct page *alloc_hugepage_vma(int defrag,
686 struct vm_area_struct *vma,
Andi Kleencc5d4622011-03-22 16:33:13 -0700687 unsigned long haddr, int nd,
688 gfp_t extra_gfp)
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -0800689{
Andi Kleencc5d4622011-03-22 16:33:13 -0700690 return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp),
Andi Kleen5c4b4be2011-03-04 17:36:32 -0800691 HPAGE_PMD_ORDER, vma, haddr, nd);
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -0800692}
693
694#ifndef CONFIG_NUMA
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800695static inline struct page *alloc_hugepage(int defrag)
696{
Andi Kleencc5d4622011-03-22 16:33:13 -0700697 return alloc_pages(alloc_hugepage_gfpmask(defrag, 0),
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800698 HPAGE_PMD_ORDER);
699}
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -0800700#endif
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800701
702int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
703 unsigned long address, pmd_t *pmd,
704 unsigned int flags)
705{
706 struct page *page;
707 unsigned long haddr = address & HPAGE_PMD_MASK;
708 pte_t *pte;
709
710 if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) {
711 if (unlikely(anon_vma_prepare(vma)))
712 return VM_FAULT_OOM;
Andrea Arcangeliba761492011-01-13 15:46:58 -0800713 if (unlikely(khugepaged_enter(vma)))
714 return VM_FAULT_OOM;
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -0800715 page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
Andi Kleencc5d4622011-03-22 16:33:13 -0700716 vma, haddr, numa_node_id(), 0);
Andi Kleen81ab4202011-04-14 15:22:06 -0700717 if (unlikely(!page)) {
718 count_vm_event(THP_FAULT_FALLBACK);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800719 goto out;
Andi Kleen81ab4202011-04-14 15:22:06 -0700720 }
721 count_vm_event(THP_FAULT_ALLOC);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -0800722 if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
723 put_page(page);
724 goto out;
725 }
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800726
727 return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page);
728 }
729out:
730 /*
731 * Use __pte_alloc instead of pte_alloc_map, because we can't
732 * run pte_offset_map on the pmd, if an huge pmd could
733 * materialize from under us from a different thread.
734 */
735 if (unlikely(__pte_alloc(mm, vma, pmd, address)))
736 return VM_FAULT_OOM;
737 /* if an huge pmd materialized from under us just retry later */
738 if (unlikely(pmd_trans_huge(*pmd)))
739 return 0;
740 /*
741 * A regular pmd is established and it can't morph into a huge pmd
742 * from under us anymore at this point because we hold the mmap_sem
743 * read mode and khugepaged takes it in write mode. So now it's
744 * safe to run pte_offset_map().
745 */
746 pte = pte_offset_map(pmd, address);
747 return handle_pte_fault(mm, vma, address, pte, pmd, flags);
748}
749
750int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
751 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
752 struct vm_area_struct *vma)
753{
754 struct page *src_page;
755 pmd_t pmd;
756 pgtable_t pgtable;
757 int ret;
758
759 ret = -ENOMEM;
760 pgtable = pte_alloc_one(dst_mm, addr);
761 if (unlikely(!pgtable))
762 goto out;
763
764 spin_lock(&dst_mm->page_table_lock);
765 spin_lock_nested(&src_mm->page_table_lock, SINGLE_DEPTH_NESTING);
766
767 ret = -EAGAIN;
768 pmd = *src_pmd;
769 if (unlikely(!pmd_trans_huge(pmd))) {
770 pte_free(dst_mm, pgtable);
771 goto out_unlock;
772 }
773 if (unlikely(pmd_trans_splitting(pmd))) {
774 /* split huge page running from under us */
775 spin_unlock(&src_mm->page_table_lock);
776 spin_unlock(&dst_mm->page_table_lock);
777 pte_free(dst_mm, pgtable);
778
779 wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */
780 goto out;
781 }
782 src_page = pmd_page(pmd);
783 VM_BUG_ON(!PageHead(src_page));
784 get_page(src_page);
785 page_dup_rmap(src_page);
786 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
787
788 pmdp_set_wrprotect(src_mm, addr, src_pmd);
789 pmd = pmd_mkold(pmd_wrprotect(pmd));
790 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
791 prepare_pmd_huge_pte(pgtable, dst_mm);
792
793 ret = 0;
794out_unlock:
795 spin_unlock(&src_mm->page_table_lock);
796 spin_unlock(&dst_mm->page_table_lock);
797out:
798 return ret;
799}
800
801/* no "address" argument so destroys page coloring of some arch */
802pgtable_t get_pmd_huge_pte(struct mm_struct *mm)
803{
804 pgtable_t pgtable;
805
806 assert_spin_locked(&mm->page_table_lock);
807
808 /* FIFO */
809 pgtable = mm->pmd_huge_pte;
810 if (list_empty(&pgtable->lru))
811 mm->pmd_huge_pte = NULL;
812 else {
813 mm->pmd_huge_pte = list_entry(pgtable->lru.next,
814 struct page, lru);
815 list_del(&pgtable->lru);
816 }
817 return pgtable;
818}
819
820static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
821 struct vm_area_struct *vma,
822 unsigned long address,
823 pmd_t *pmd, pmd_t orig_pmd,
824 struct page *page,
825 unsigned long haddr)
826{
827 pgtable_t pgtable;
828 pmd_t _pmd;
829 int ret = 0, i;
830 struct page **pages;
831
832 pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
833 GFP_KERNEL);
834 if (unlikely(!pages)) {
835 ret |= VM_FAULT_OOM;
836 goto out;
837 }
838
839 for (i = 0; i < HPAGE_PMD_NR; i++) {
Andi Kleencc5d4622011-03-22 16:33:13 -0700840 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
841 __GFP_OTHER_NODE,
Andi Kleen19ee1512011-03-04 17:36:31 -0800842 vma, address, page_to_nid(page));
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -0800843 if (unlikely(!pages[i] ||
844 mem_cgroup_newpage_charge(pages[i], mm,
845 GFP_KERNEL))) {
846 if (pages[i])
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800847 put_page(pages[i]);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -0800848 mem_cgroup_uncharge_start();
849 while (--i >= 0) {
850 mem_cgroup_uncharge_page(pages[i]);
851 put_page(pages[i]);
852 }
853 mem_cgroup_uncharge_end();
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800854 kfree(pages);
855 ret |= VM_FAULT_OOM;
856 goto out;
857 }
858 }
859
860 for (i = 0; i < HPAGE_PMD_NR; i++) {
861 copy_user_highpage(pages[i], page + i,
Hillf Danton0089e482011-10-31 17:09:38 -0700862 haddr + PAGE_SIZE * i, vma);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800863 __SetPageUptodate(pages[i]);
864 cond_resched();
865 }
866
867 spin_lock(&mm->page_table_lock);
868 if (unlikely(!pmd_same(*pmd, orig_pmd)))
869 goto out_free_pages;
870 VM_BUG_ON(!PageHead(page));
871
872 pmdp_clear_flush_notify(vma, haddr, pmd);
873 /* leave pmd empty until pte is filled */
874
875 pgtable = get_pmd_huge_pte(mm);
876 pmd_populate(mm, &_pmd, pgtable);
877
878 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
879 pte_t *pte, entry;
880 entry = mk_pte(pages[i], vma->vm_page_prot);
881 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
882 page_add_new_anon_rmap(pages[i], vma, haddr);
883 pte = pte_offset_map(&_pmd, haddr);
884 VM_BUG_ON(!pte_none(*pte));
885 set_pte_at(mm, haddr, pte, entry);
886 pte_unmap(pte);
887 }
888 kfree(pages);
889
890 mm->nr_ptes++;
891 smp_wmb(); /* make pte visible before pmd */
892 pmd_populate(mm, pmd, pgtable);
893 page_remove_rmap(page);
894 spin_unlock(&mm->page_table_lock);
895
896 ret |= VM_FAULT_WRITE;
897 put_page(page);
898
899out:
900 return ret;
901
902out_free_pages:
903 spin_unlock(&mm->page_table_lock);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -0800904 mem_cgroup_uncharge_start();
905 for (i = 0; i < HPAGE_PMD_NR; i++) {
906 mem_cgroup_uncharge_page(pages[i]);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800907 put_page(pages[i]);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -0800908 }
909 mem_cgroup_uncharge_end();
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800910 kfree(pages);
911 goto out;
912}
913
914int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
915 unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
916{
917 int ret = 0;
918 struct page *page, *new_page;
919 unsigned long haddr;
920
921 VM_BUG_ON(!vma->anon_vma);
922 spin_lock(&mm->page_table_lock);
923 if (unlikely(!pmd_same(*pmd, orig_pmd)))
924 goto out_unlock;
925
926 page = pmd_page(orig_pmd);
927 VM_BUG_ON(!PageCompound(page) || !PageHead(page));
928 haddr = address & HPAGE_PMD_MASK;
929 if (page_mapcount(page) == 1) {
930 pmd_t entry;
931 entry = pmd_mkyoung(orig_pmd);
932 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
933 if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1))
934 update_mmu_cache(vma, address, entry);
935 ret |= VM_FAULT_WRITE;
936 goto out_unlock;
937 }
938 get_page(page);
939 spin_unlock(&mm->page_table_lock);
940
941 if (transparent_hugepage_enabled(vma) &&
942 !transparent_hugepage_debug_cow())
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -0800943 new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
Andi Kleencc5d4622011-03-22 16:33:13 -0700944 vma, haddr, numa_node_id(), 0);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800945 else
946 new_page = NULL;
947
948 if (unlikely(!new_page)) {
Andi Kleen81ab4202011-04-14 15:22:06 -0700949 count_vm_event(THP_FAULT_FALLBACK);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800950 ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
951 pmd, orig_pmd, page, haddr);
952 put_page(page);
953 goto out;
954 }
Andi Kleen81ab4202011-04-14 15:22:06 -0700955 count_vm_event(THP_FAULT_ALLOC);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800956
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -0800957 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
958 put_page(new_page);
959 put_page(page);
960 ret |= VM_FAULT_OOM;
961 goto out;
962 }
963
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800964 copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
965 __SetPageUptodate(new_page);
966
967 spin_lock(&mm->page_table_lock);
968 put_page(page);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -0800969 if (unlikely(!pmd_same(*pmd, orig_pmd))) {
970 mem_cgroup_uncharge_page(new_page);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800971 put_page(new_page);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -0800972 } else {
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800973 pmd_t entry;
974 VM_BUG_ON(!PageHead(page));
975 entry = mk_pmd(new_page, vma->vm_page_prot);
976 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
977 entry = pmd_mkhuge(entry);
978 pmdp_clear_flush_notify(vma, haddr, pmd);
979 page_add_new_anon_rmap(new_page, vma, haddr);
980 set_pmd_at(mm, haddr, pmd, entry);
981 update_mmu_cache(vma, address, entry);
982 page_remove_rmap(page);
983 put_page(page);
984 ret |= VM_FAULT_WRITE;
985 }
986out_unlock:
987 spin_unlock(&mm->page_table_lock);
988out:
989 return ret;
990}
991
992struct page *follow_trans_huge_pmd(struct mm_struct *mm,
993 unsigned long addr,
994 pmd_t *pmd,
995 unsigned int flags)
996{
997 struct page *page = NULL;
998
999 assert_spin_locked(&mm->page_table_lock);
1000
1001 if (flags & FOLL_WRITE && !pmd_write(*pmd))
1002 goto out;
1003
1004 page = pmd_page(*pmd);
1005 VM_BUG_ON(!PageHead(page));
1006 if (flags & FOLL_TOUCH) {
1007 pmd_t _pmd;
1008 /*
1009 * We should set the dirty bit only for FOLL_WRITE but
1010 * for now the dirty bit in the pmd is meaningless.
1011 * And if the dirty bit will become meaningful and
1012 * we'll only set it with FOLL_WRITE, an atomic
1013 * set_bit will be required on the pmd to set the
1014 * young bit, instead of the current set_pmd_at.
1015 */
1016 _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
1017 set_pmd_at(mm, addr & HPAGE_PMD_MASK, pmd, _pmd);
1018 }
1019 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
1020 VM_BUG_ON(!PageCompound(page));
1021 if (flags & FOLL_GET)
Andrea Arcangeli70b50f92011-11-02 13:36:59 -07001022 get_page_foll(page);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001023
1024out:
1025 return page;
1026}
1027
1028int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1029 pmd_t *pmd)
1030{
1031 int ret = 0;
1032
1033 spin_lock(&tlb->mm->page_table_lock);
1034 if (likely(pmd_trans_huge(*pmd))) {
1035 if (unlikely(pmd_trans_splitting(*pmd))) {
1036 spin_unlock(&tlb->mm->page_table_lock);
1037 wait_split_huge_page(vma->anon_vma,
1038 pmd);
1039 } else {
1040 struct page *page;
1041 pgtable_t pgtable;
1042 pgtable = get_pmd_huge_pte(tlb->mm);
1043 page = pmd_page(*pmd);
1044 pmd_clear(pmd);
1045 page_remove_rmap(page);
1046 VM_BUG_ON(page_mapcount(page) < 0);
1047 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1048 VM_BUG_ON(!PageHead(page));
1049 spin_unlock(&tlb->mm->page_table_lock);
1050 tlb_remove_page(tlb, page);
1051 pte_free(tlb->mm, pgtable);
1052 ret = 1;
1053 }
1054 } else
1055 spin_unlock(&tlb->mm->page_table_lock);
1056
1057 return ret;
1058}
1059
Johannes Weiner0ca16342011-01-13 15:47:02 -08001060int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1061 unsigned long addr, unsigned long end,
1062 unsigned char *vec)
1063{
1064 int ret = 0;
1065
1066 spin_lock(&vma->vm_mm->page_table_lock);
1067 if (likely(pmd_trans_huge(*pmd))) {
1068 ret = !pmd_trans_splitting(*pmd);
1069 spin_unlock(&vma->vm_mm->page_table_lock);
1070 if (unlikely(!ret))
1071 wait_split_huge_page(vma->anon_vma, pmd);
1072 else {
1073 /*
1074 * All logical pages in the range are present
1075 * if backed by a huge page.
1076 */
1077 memset(vec, 1, (end - addr) >> PAGE_SHIFT);
1078 }
1079 } else
1080 spin_unlock(&vma->vm_mm->page_table_lock);
1081
1082 return ret;
1083}
1084
Andrea Arcangeli37a1c492011-10-31 17:08:30 -07001085int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
1086 unsigned long old_addr,
1087 unsigned long new_addr, unsigned long old_end,
1088 pmd_t *old_pmd, pmd_t *new_pmd)
1089{
1090 int ret = 0;
1091 pmd_t pmd;
1092
1093 struct mm_struct *mm = vma->vm_mm;
1094
1095 if ((old_addr & ~HPAGE_PMD_MASK) ||
1096 (new_addr & ~HPAGE_PMD_MASK) ||
1097 old_end - old_addr < HPAGE_PMD_SIZE ||
1098 (new_vma->vm_flags & VM_NOHUGEPAGE))
1099 goto out;
1100
1101 /*
1102 * The destination pmd shouldn't be established, free_pgtables()
1103 * should have release it.
1104 */
1105 if (WARN_ON(!pmd_none(*new_pmd))) {
1106 VM_BUG_ON(pmd_trans_huge(*new_pmd));
1107 goto out;
1108 }
1109
1110 spin_lock(&mm->page_table_lock);
1111 if (likely(pmd_trans_huge(*old_pmd))) {
1112 if (pmd_trans_splitting(*old_pmd)) {
1113 spin_unlock(&mm->page_table_lock);
1114 wait_split_huge_page(vma->anon_vma, old_pmd);
1115 ret = -1;
1116 } else {
1117 pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
1118 VM_BUG_ON(!pmd_none(*new_pmd));
1119 set_pmd_at(mm, new_addr, new_pmd, pmd);
1120 spin_unlock(&mm->page_table_lock);
1121 ret = 1;
1122 }
1123 } else {
1124 spin_unlock(&mm->page_table_lock);
1125 }
1126out:
1127 return ret;
1128}
1129
Johannes Weinercd7548a2011-01-13 15:47:04 -08001130int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1131 unsigned long addr, pgprot_t newprot)
1132{
1133 struct mm_struct *mm = vma->vm_mm;
1134 int ret = 0;
1135
1136 spin_lock(&mm->page_table_lock);
1137 if (likely(pmd_trans_huge(*pmd))) {
1138 if (unlikely(pmd_trans_splitting(*pmd))) {
1139 spin_unlock(&mm->page_table_lock);
1140 wait_split_huge_page(vma->anon_vma, pmd);
1141 } else {
1142 pmd_t entry;
1143
1144 entry = pmdp_get_and_clear(mm, addr, pmd);
1145 entry = pmd_modify(entry, newprot);
1146 set_pmd_at(mm, addr, pmd, entry);
1147 spin_unlock(&vma->vm_mm->page_table_lock);
1148 flush_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE);
1149 ret = 1;
1150 }
1151 } else
1152 spin_unlock(&vma->vm_mm->page_table_lock);
1153
1154 return ret;
1155}
1156
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001157pmd_t *page_check_address_pmd(struct page *page,
1158 struct mm_struct *mm,
1159 unsigned long address,
1160 enum page_check_address_pmd_flag flag)
1161{
1162 pgd_t *pgd;
1163 pud_t *pud;
1164 pmd_t *pmd, *ret = NULL;
1165
1166 if (address & ~HPAGE_PMD_MASK)
1167 goto out;
1168
1169 pgd = pgd_offset(mm, address);
1170 if (!pgd_present(*pgd))
1171 goto out;
1172
1173 pud = pud_offset(pgd, address);
1174 if (!pud_present(*pud))
1175 goto out;
1176
1177 pmd = pmd_offset(pud, address);
1178 if (pmd_none(*pmd))
1179 goto out;
1180 if (pmd_page(*pmd) != page)
1181 goto out;
Andrea Arcangeli94fcc582011-01-13 15:47:08 -08001182 /*
1183 * split_vma() may create temporary aliased mappings. There is
1184 * no risk as long as all huge pmd are found and have their
1185 * splitting bit set before __split_huge_page_refcount
1186 * runs. Finding the same huge pmd more than once during the
1187 * same rmap walk is not a problem.
1188 */
1189 if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
1190 pmd_trans_splitting(*pmd))
1191 goto out;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001192 if (pmd_trans_huge(*pmd)) {
1193 VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
1194 !pmd_trans_splitting(*pmd));
1195 ret = pmd;
1196 }
1197out:
1198 return ret;
1199}
1200
1201static int __split_huge_page_splitting(struct page *page,
1202 struct vm_area_struct *vma,
1203 unsigned long address)
1204{
1205 struct mm_struct *mm = vma->vm_mm;
1206 pmd_t *pmd;
1207 int ret = 0;
1208
1209 spin_lock(&mm->page_table_lock);
1210 pmd = page_check_address_pmd(page, mm, address,
1211 PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG);
1212 if (pmd) {
1213 /*
1214 * We can't temporarily set the pmd to null in order
1215 * to split it, the pmd must remain marked huge at all
1216 * times or the VM won't take the pmd_trans_huge paths
Peter Zijlstra2b575eb2011-05-24 17:12:11 -07001217 * and it won't wait on the anon_vma->root->mutex to
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001218 * serialize against split_huge_page*.
1219 */
1220 pmdp_splitting_flush_notify(vma, address, pmd);
1221 ret = 1;
1222 }
1223 spin_unlock(&mm->page_table_lock);
1224
1225 return ret;
1226}
1227
1228static void __split_huge_page_refcount(struct page *page)
1229{
1230 int i;
1231 unsigned long head_index = page->index;
1232 struct zone *zone = page_zone(page);
Rik van Riel2c888cf2011-01-13 15:47:13 -08001233 int zonestat;
Andrea Arcangeli70b50f92011-11-02 13:36:59 -07001234 int tail_count = 0;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001235
1236 /* prevent PageLRU to go away from under us, and freeze lru stats */
1237 spin_lock_irq(&zone->lru_lock);
1238 compound_lock(page);
KAMEZAWA Hiroyukie94c8a92012-01-12 17:18:20 -08001239 /* complete memcg works before add pages to LRU */
1240 mem_cgroup_split_huge_fixup(page);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001241
1242 for (i = 1; i < HPAGE_PMD_NR; i++) {
1243 struct page *page_tail = page + i;
1244
Andrea Arcangeli70b50f92011-11-02 13:36:59 -07001245 /* tail_page->_mapcount cannot change */
1246 BUG_ON(page_mapcount(page_tail) < 0);
1247 tail_count += page_mapcount(page_tail);
1248 /* check for overflow */
1249 BUG_ON(tail_count < 0);
1250 BUG_ON(atomic_read(&page_tail->_count) != 0);
1251 /*
1252 * tail_page->_count is zero and not changing from
1253 * under us. But get_page_unless_zero() may be running
1254 * from under us on the tail_page. If we used
1255 * atomic_set() below instead of atomic_add(), we
1256 * would then run atomic_set() concurrently with
1257 * get_page_unless_zero(), and atomic_set() is
1258 * implemented in C not using locked ops. spin_unlock
1259 * on x86 sometime uses locked ops because of PPro
1260 * errata 66, 92, so unless somebody can guarantee
1261 * atomic_set() here would be safe on all archs (and
1262 * not only on x86), it's safer to use atomic_add().
1263 */
1264 atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1,
1265 &page_tail->_count);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001266
1267 /* after clearing PageTail the gup refcount can be released */
1268 smp_mb();
1269
Jin Dongminga6d30dd2011-02-01 15:52:40 -08001270 /*
1271 * retain hwpoison flag of the poisoned tail page:
1272 * fix for the unsuitable process killed on Guest Machine(KVM)
1273 * by the memory-failure.
1274 */
1275 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001276 page_tail->flags |= (page->flags &
1277 ((1L << PG_referenced) |
1278 (1L << PG_swapbacked) |
1279 (1L << PG_mlocked) |
1280 (1L << PG_uptodate)));
1281 page_tail->flags |= (1L << PG_dirty);
1282
Andrea Arcangeli70b50f92011-11-02 13:36:59 -07001283 /* clear PageTail before overwriting first_page */
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001284 smp_wmb();
1285
1286 /*
1287 * __split_huge_page_splitting() already set the
1288 * splitting bit in all pmd that could map this
1289 * hugepage, that will ensure no CPU can alter the
1290 * mapcount on the head page. The mapcount is only
1291 * accounted in the head page and it has to be
1292 * transferred to all tail pages in the below code. So
1293 * for this code to be safe, the split the mapcount
1294 * can't change. But that doesn't mean userland can't
1295 * keep changing and reading the page contents while
1296 * we transfer the mapcount, so the pmd splitting
1297 * status is achieved setting a reserved bit in the
1298 * pmd, not by clearing the present bit.
1299 */
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001300 page_tail->_mapcount = page->_mapcount;
1301
1302 BUG_ON(page_tail->mapping);
1303 page_tail->mapping = page->mapping;
1304
1305 page_tail->index = ++head_index;
1306
1307 BUG_ON(!PageAnon(page_tail));
1308 BUG_ON(!PageUptodate(page_tail));
1309 BUG_ON(!PageDirty(page_tail));
1310 BUG_ON(!PageSwapBacked(page_tail));
1311
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08001312
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001313 lru_add_page_tail(zone, page, page_tail);
1314 }
Andrea Arcangeli70b50f92011-11-02 13:36:59 -07001315 atomic_sub(tail_count, &page->_count);
1316 BUG_ON(atomic_read(&page->_count) <= 0);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001317
Andrea Arcangeli79134172011-01-13 15:46:58 -08001318 __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
1319 __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
1320
Rik van Riel2c888cf2011-01-13 15:47:13 -08001321 /*
1322 * A hugepage counts for HPAGE_PMD_NR pages on the LRU statistics,
1323 * so adjust those appropriately if this page is on the LRU.
1324 */
1325 if (PageLRU(page)) {
1326 zonestat = NR_LRU_BASE + page_lru(page);
1327 __mod_zone_page_state(zone, zonestat, -(HPAGE_PMD_NR-1));
1328 }
1329
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001330 ClearPageCompound(page);
1331 compound_unlock(page);
1332 spin_unlock_irq(&zone->lru_lock);
1333
1334 for (i = 1; i < HPAGE_PMD_NR; i++) {
1335 struct page *page_tail = page + i;
1336 BUG_ON(page_count(page_tail) <= 0);
1337 /*
1338 * Tail pages may be freed if there wasn't any mapping
1339 * like if add_to_swap() is running on a lru page that
1340 * had its mapping zapped. And freeing these pages
1341 * requires taking the lru_lock so we do the put_page
1342 * of the tail pages after the split is complete.
1343 */
1344 put_page(page_tail);
1345 }
1346
1347 /*
1348 * Only the head page (now become a regular page) is required
1349 * to be pinned by the caller.
1350 */
1351 BUG_ON(page_count(page) <= 0);
1352}
1353
1354static int __split_huge_page_map(struct page *page,
1355 struct vm_area_struct *vma,
1356 unsigned long address)
1357{
1358 struct mm_struct *mm = vma->vm_mm;
1359 pmd_t *pmd, _pmd;
1360 int ret = 0, i;
1361 pgtable_t pgtable;
1362 unsigned long haddr;
1363
1364 spin_lock(&mm->page_table_lock);
1365 pmd = page_check_address_pmd(page, mm, address,
1366 PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG);
1367 if (pmd) {
1368 pgtable = get_pmd_huge_pte(mm);
1369 pmd_populate(mm, &_pmd, pgtable);
1370
1371 for (i = 0, haddr = address; i < HPAGE_PMD_NR;
1372 i++, haddr += PAGE_SIZE) {
1373 pte_t *pte, entry;
1374 BUG_ON(PageCompound(page+i));
1375 entry = mk_pte(page + i, vma->vm_page_prot);
1376 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1377 if (!pmd_write(*pmd))
1378 entry = pte_wrprotect(entry);
1379 else
1380 BUG_ON(page_mapcount(page) != 1);
1381 if (!pmd_young(*pmd))
1382 entry = pte_mkold(entry);
1383 pte = pte_offset_map(&_pmd, haddr);
1384 BUG_ON(!pte_none(*pte));
1385 set_pte_at(mm, haddr, pte, entry);
1386 pte_unmap(pte);
1387 }
1388
1389 mm->nr_ptes++;
1390 smp_wmb(); /* make pte visible before pmd */
1391 /*
1392 * Up to this point the pmd is present and huge and
1393 * userland has the whole access to the hugepage
1394 * during the split (which happens in place). If we
1395 * overwrite the pmd with the not-huge version
1396 * pointing to the pte here (which of course we could
1397 * if all CPUs were bug free), userland could trigger
1398 * a small page size TLB miss on the small sized TLB
1399 * while the hugepage TLB entry is still established
1400 * in the huge TLB. Some CPU doesn't like that. See
1401 * http://support.amd.com/us/Processor_TechDocs/41322.pdf,
1402 * Erratum 383 on page 93. Intel should be safe but is
1403 * also warns that it's only safe if the permission
1404 * and cache attributes of the two entries loaded in
1405 * the two TLB is identical (which should be the case
1406 * here). But it is generally safer to never allow
1407 * small and huge TLB entries for the same virtual
1408 * address to be loaded simultaneously. So instead of
1409 * doing "pmd_populate(); flush_tlb_range();" we first
1410 * mark the current pmd notpresent (atomically because
1411 * here the pmd_trans_huge and pmd_trans_splitting
1412 * must remain set at all times on the pmd until the
1413 * split is complete for this pmd), then we flush the
1414 * SMP TLB and finally we write the non-huge version
1415 * of the pmd entry with pmd_populate.
1416 */
1417 set_pmd_at(mm, address, pmd, pmd_mknotpresent(*pmd));
1418 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
1419 pmd_populate(mm, pmd, pgtable);
1420 ret = 1;
1421 }
1422 spin_unlock(&mm->page_table_lock);
1423
1424 return ret;
1425}
1426
Peter Zijlstra2b575eb2011-05-24 17:12:11 -07001427/* must be called with anon_vma->root->mutex hold */
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001428static void __split_huge_page(struct page *page,
1429 struct anon_vma *anon_vma)
1430{
1431 int mapcount, mapcount2;
1432 struct anon_vma_chain *avc;
1433
1434 BUG_ON(!PageHead(page));
1435 BUG_ON(PageTail(page));
1436
1437 mapcount = 0;
1438 list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
1439 struct vm_area_struct *vma = avc->vma;
1440 unsigned long addr = vma_address(page, vma);
1441 BUG_ON(is_vma_temporary_stack(vma));
1442 if (addr == -EFAULT)
1443 continue;
1444 mapcount += __split_huge_page_splitting(page, vma, addr);
1445 }
Andrea Arcangeli05759d32011-01-13 15:46:53 -08001446 /*
1447 * It is critical that new vmas are added to the tail of the
1448 * anon_vma list. This guarantes that if copy_huge_pmd() runs
1449 * and establishes a child pmd before
1450 * __split_huge_page_splitting() freezes the parent pmd (so if
1451 * we fail to prevent copy_huge_pmd() from running until the
1452 * whole __split_huge_page() is complete), we will still see
1453 * the newly established pmd of the child later during the
1454 * walk, to be able to set it as pmd_trans_splitting too.
1455 */
1456 if (mapcount != page_mapcount(page))
1457 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1458 mapcount, page_mapcount(page));
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001459 BUG_ON(mapcount != page_mapcount(page));
1460
1461 __split_huge_page_refcount(page);
1462
1463 mapcount2 = 0;
1464 list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
1465 struct vm_area_struct *vma = avc->vma;
1466 unsigned long addr = vma_address(page, vma);
1467 BUG_ON(is_vma_temporary_stack(vma));
1468 if (addr == -EFAULT)
1469 continue;
1470 mapcount2 += __split_huge_page_map(page, vma, addr);
1471 }
Andrea Arcangeli05759d32011-01-13 15:46:53 -08001472 if (mapcount != mapcount2)
1473 printk(KERN_ERR "mapcount %d mapcount2 %d page_mapcount %d\n",
1474 mapcount, mapcount2, page_mapcount(page));
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001475 BUG_ON(mapcount != mapcount2);
1476}
1477
1478int split_huge_page(struct page *page)
1479{
1480 struct anon_vma *anon_vma;
1481 int ret = 1;
1482
1483 BUG_ON(!PageAnon(page));
1484 anon_vma = page_lock_anon_vma(page);
1485 if (!anon_vma)
1486 goto out;
1487 ret = 0;
1488 if (!PageCompound(page))
1489 goto out_unlock;
1490
1491 BUG_ON(!PageSwapBacked(page));
1492 __split_huge_page(page, anon_vma);
Andi Kleen81ab4202011-04-14 15:22:06 -07001493 count_vm_event(THP_SPLIT);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001494
1495 BUG_ON(PageCompound(page));
1496out_unlock:
1497 page_unlock_anon_vma(anon_vma);
1498out:
1499 return ret;
1500}
1501
Andrea Arcangeli78f11a22011-04-27 15:26:45 -07001502#define VM_NO_THP (VM_SPECIAL|VM_INSERTPAGE|VM_MIXEDMAP|VM_SAO| \
1503 VM_HUGETLB|VM_SHARED|VM_MAYSHARE)
1504
Andrea Arcangeli60ab3242011-01-13 15:47:18 -08001505int hugepage_madvise(struct vm_area_struct *vma,
1506 unsigned long *vm_flags, int advice)
Andrea Arcangeli0af4e982011-01-13 15:46:55 -08001507{
Andrea Arcangelia664b2d2011-01-13 15:47:17 -08001508 switch (advice) {
1509 case MADV_HUGEPAGE:
1510 /*
1511 * Be somewhat over-protective like KSM for now!
1512 */
Andrea Arcangeli78f11a22011-04-27 15:26:45 -07001513 if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP))
Andrea Arcangelia664b2d2011-01-13 15:47:17 -08001514 return -EINVAL;
1515 *vm_flags &= ~VM_NOHUGEPAGE;
1516 *vm_flags |= VM_HUGEPAGE;
Andrea Arcangeli60ab3242011-01-13 15:47:18 -08001517 /*
1518 * If the vma become good for khugepaged to scan,
1519 * register it here without waiting a page fault that
1520 * may not happen any time soon.
1521 */
1522 if (unlikely(khugepaged_enter_vma_merge(vma)))
1523 return -ENOMEM;
Andrea Arcangelia664b2d2011-01-13 15:47:17 -08001524 break;
1525 case MADV_NOHUGEPAGE:
1526 /*
1527 * Be somewhat over-protective like KSM for now!
1528 */
Andrea Arcangeli78f11a22011-04-27 15:26:45 -07001529 if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP))
Andrea Arcangelia664b2d2011-01-13 15:47:17 -08001530 return -EINVAL;
1531 *vm_flags &= ~VM_HUGEPAGE;
1532 *vm_flags |= VM_NOHUGEPAGE;
Andrea Arcangeli60ab3242011-01-13 15:47:18 -08001533 /*
1534 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
1535 * this vma even if we leave the mm registered in khugepaged if
1536 * it got registered before VM_NOHUGEPAGE was set.
1537 */
Andrea Arcangelia664b2d2011-01-13 15:47:17 -08001538 break;
1539 }
Andrea Arcangeli0af4e982011-01-13 15:46:55 -08001540
1541 return 0;
1542}
1543
Andrea Arcangeliba761492011-01-13 15:46:58 -08001544static int __init khugepaged_slab_init(void)
1545{
1546 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
1547 sizeof(struct mm_slot),
1548 __alignof__(struct mm_slot), 0, NULL);
1549 if (!mm_slot_cache)
1550 return -ENOMEM;
1551
1552 return 0;
1553}
1554
1555static void __init khugepaged_slab_free(void)
1556{
1557 kmem_cache_destroy(mm_slot_cache);
1558 mm_slot_cache = NULL;
1559}
1560
1561static inline struct mm_slot *alloc_mm_slot(void)
1562{
1563 if (!mm_slot_cache) /* initialization failed */
1564 return NULL;
1565 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
1566}
1567
1568static inline void free_mm_slot(struct mm_slot *mm_slot)
1569{
1570 kmem_cache_free(mm_slot_cache, mm_slot);
1571}
1572
1573static int __init mm_slots_hash_init(void)
1574{
1575 mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head),
1576 GFP_KERNEL);
1577 if (!mm_slots_hash)
1578 return -ENOMEM;
1579 return 0;
1580}
1581
1582#if 0
1583static void __init mm_slots_hash_free(void)
1584{
1585 kfree(mm_slots_hash);
1586 mm_slots_hash = NULL;
1587}
1588#endif
1589
1590static struct mm_slot *get_mm_slot(struct mm_struct *mm)
1591{
1592 struct mm_slot *mm_slot;
1593 struct hlist_head *bucket;
1594 struct hlist_node *node;
1595
1596 bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
1597 % MM_SLOTS_HASH_HEADS];
1598 hlist_for_each_entry(mm_slot, node, bucket, hash) {
1599 if (mm == mm_slot->mm)
1600 return mm_slot;
1601 }
1602 return NULL;
1603}
1604
1605static void insert_to_mm_slots_hash(struct mm_struct *mm,
1606 struct mm_slot *mm_slot)
1607{
1608 struct hlist_head *bucket;
1609
1610 bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
1611 % MM_SLOTS_HASH_HEADS];
1612 mm_slot->mm = mm;
1613 hlist_add_head(&mm_slot->hash, bucket);
1614}
1615
1616static inline int khugepaged_test_exit(struct mm_struct *mm)
1617{
1618 return atomic_read(&mm->mm_users) == 0;
1619}
1620
1621int __khugepaged_enter(struct mm_struct *mm)
1622{
1623 struct mm_slot *mm_slot;
1624 int wakeup;
1625
1626 mm_slot = alloc_mm_slot();
1627 if (!mm_slot)
1628 return -ENOMEM;
1629
1630 /* __khugepaged_exit() must not run from under us */
1631 VM_BUG_ON(khugepaged_test_exit(mm));
1632 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
1633 free_mm_slot(mm_slot);
1634 return 0;
1635 }
1636
1637 spin_lock(&khugepaged_mm_lock);
1638 insert_to_mm_slots_hash(mm, mm_slot);
1639 /*
1640 * Insert just behind the scanning cursor, to let the area settle
1641 * down a little.
1642 */
1643 wakeup = list_empty(&khugepaged_scan.mm_head);
1644 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
1645 spin_unlock(&khugepaged_mm_lock);
1646
1647 atomic_inc(&mm->mm_count);
1648 if (wakeup)
1649 wake_up_interruptible(&khugepaged_wait);
1650
1651 return 0;
1652}
1653
1654int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
1655{
1656 unsigned long hstart, hend;
1657 if (!vma->anon_vma)
1658 /*
1659 * Not yet faulted in so we will register later in the
1660 * page fault if needed.
1661 */
1662 return 0;
Andrea Arcangeli78f11a22011-04-27 15:26:45 -07001663 if (vma->vm_ops)
Andrea Arcangeliba761492011-01-13 15:46:58 -08001664 /* khugepaged not yet working on file or special mappings */
1665 return 0;
Andrea Arcangeli78f11a22011-04-27 15:26:45 -07001666 /*
1667 * If is_pfn_mapping() is true is_learn_pfn_mapping() must be
1668 * true too, verify it here.
1669 */
1670 VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP);
Andrea Arcangeliba761492011-01-13 15:46:58 -08001671 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1672 hend = vma->vm_end & HPAGE_PMD_MASK;
1673 if (hstart < hend)
1674 return khugepaged_enter(vma);
1675 return 0;
1676}
1677
1678void __khugepaged_exit(struct mm_struct *mm)
1679{
1680 struct mm_slot *mm_slot;
1681 int free = 0;
1682
1683 spin_lock(&khugepaged_mm_lock);
1684 mm_slot = get_mm_slot(mm);
1685 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
1686 hlist_del(&mm_slot->hash);
1687 list_del(&mm_slot->mm_node);
1688 free = 1;
1689 }
Chris Wrightd788e802011-07-25 17:12:14 -07001690 spin_unlock(&khugepaged_mm_lock);
Andrea Arcangeliba761492011-01-13 15:46:58 -08001691
1692 if (free) {
Andrea Arcangeliba761492011-01-13 15:46:58 -08001693 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1694 free_mm_slot(mm_slot);
1695 mmdrop(mm);
1696 } else if (mm_slot) {
Andrea Arcangeliba761492011-01-13 15:46:58 -08001697 /*
1698 * This is required to serialize against
1699 * khugepaged_test_exit() (which is guaranteed to run
1700 * under mmap sem read mode). Stop here (after we
1701 * return all pagetables will be destroyed) until
1702 * khugepaged has finished working on the pagetables
1703 * under the mmap_sem.
1704 */
1705 down_write(&mm->mmap_sem);
1706 up_write(&mm->mmap_sem);
Chris Wrightd788e802011-07-25 17:12:14 -07001707 }
Andrea Arcangeliba761492011-01-13 15:46:58 -08001708}
1709
1710static void release_pte_page(struct page *page)
1711{
1712 /* 0 stands for page_is_file_cache(page) == false */
1713 dec_zone_page_state(page, NR_ISOLATED_ANON + 0);
1714 unlock_page(page);
1715 putback_lru_page(page);
1716}
1717
1718static void release_pte_pages(pte_t *pte, pte_t *_pte)
1719{
1720 while (--_pte >= pte) {
1721 pte_t pteval = *_pte;
1722 if (!pte_none(pteval))
1723 release_pte_page(pte_page(pteval));
1724 }
1725}
1726
1727static void release_all_pte_pages(pte_t *pte)
1728{
1729 release_pte_pages(pte, pte + HPAGE_PMD_NR);
1730}
1731
1732static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
1733 unsigned long address,
1734 pte_t *pte)
1735{
1736 struct page *page;
1737 pte_t *_pte;
1738 int referenced = 0, isolated = 0, none = 0;
1739 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
1740 _pte++, address += PAGE_SIZE) {
1741 pte_t pteval = *_pte;
1742 if (pte_none(pteval)) {
1743 if (++none <= khugepaged_max_ptes_none)
1744 continue;
1745 else {
1746 release_pte_pages(pte, _pte);
1747 goto out;
1748 }
1749 }
1750 if (!pte_present(pteval) || !pte_write(pteval)) {
1751 release_pte_pages(pte, _pte);
1752 goto out;
1753 }
1754 page = vm_normal_page(vma, address, pteval);
1755 if (unlikely(!page)) {
1756 release_pte_pages(pte, _pte);
1757 goto out;
1758 }
1759 VM_BUG_ON(PageCompound(page));
1760 BUG_ON(!PageAnon(page));
1761 VM_BUG_ON(!PageSwapBacked(page));
1762
1763 /* cannot use mapcount: can't collapse if there's a gup pin */
1764 if (page_count(page) != 1) {
1765 release_pte_pages(pte, _pte);
1766 goto out;
1767 }
1768 /*
1769 * We can do it before isolate_lru_page because the
1770 * page can't be freed from under us. NOTE: PG_lock
1771 * is needed to serialize against split_huge_page
1772 * when invoked from the VM.
1773 */
1774 if (!trylock_page(page)) {
1775 release_pte_pages(pte, _pte);
1776 goto out;
1777 }
1778 /*
1779 * Isolate the page to avoid collapsing an hugepage
1780 * currently in use by the VM.
1781 */
1782 if (isolate_lru_page(page)) {
1783 unlock_page(page);
1784 release_pte_pages(pte, _pte);
1785 goto out;
1786 }
1787 /* 0 stands for page_is_file_cache(page) == false */
1788 inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
1789 VM_BUG_ON(!PageLocked(page));
1790 VM_BUG_ON(PageLRU(page));
1791
1792 /* If there is no mapped pte young don't collapse the page */
Andrea Arcangeli8ee53822011-01-13 15:47:10 -08001793 if (pte_young(pteval) || PageReferenced(page) ||
1794 mmu_notifier_test_young(vma->vm_mm, address))
Andrea Arcangeliba761492011-01-13 15:46:58 -08001795 referenced = 1;
1796 }
1797 if (unlikely(!referenced))
1798 release_all_pte_pages(pte);
1799 else
1800 isolated = 1;
1801out:
1802 return isolated;
1803}
1804
1805static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
1806 struct vm_area_struct *vma,
1807 unsigned long address,
1808 spinlock_t *ptl)
1809{
1810 pte_t *_pte;
1811 for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
1812 pte_t pteval = *_pte;
1813 struct page *src_page;
1814
1815 if (pte_none(pteval)) {
1816 clear_user_highpage(page, address);
1817 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
1818 } else {
1819 src_page = pte_page(pteval);
1820 copy_user_highpage(page, src_page, address, vma);
1821 VM_BUG_ON(page_mapcount(src_page) != 1);
1822 VM_BUG_ON(page_count(src_page) != 2);
1823 release_pte_page(src_page);
1824 /*
1825 * ptl mostly unnecessary, but preempt has to
1826 * be disabled to update the per-cpu stats
1827 * inside page_remove_rmap().
1828 */
1829 spin_lock(ptl);
1830 /*
1831 * paravirt calls inside pte_clear here are
1832 * superfluous.
1833 */
1834 pte_clear(vma->vm_mm, address, _pte);
1835 page_remove_rmap(src_page);
1836 spin_unlock(ptl);
1837 free_page_and_swap_cache(src_page);
1838 }
1839
1840 address += PAGE_SIZE;
1841 page++;
1842 }
1843}
1844
1845static void collapse_huge_page(struct mm_struct *mm,
1846 unsigned long address,
Andrea Arcangelice83d212011-01-13 15:47:06 -08001847 struct page **hpage,
Andi Kleen5c4b4be2011-03-04 17:36:32 -08001848 struct vm_area_struct *vma,
1849 int node)
Andrea Arcangeliba761492011-01-13 15:46:58 -08001850{
Andrea Arcangeliba761492011-01-13 15:46:58 -08001851 pgd_t *pgd;
1852 pud_t *pud;
1853 pmd_t *pmd, _pmd;
1854 pte_t *pte;
1855 pgtable_t pgtable;
1856 struct page *new_page;
1857 spinlock_t *ptl;
1858 int isolated;
1859 unsigned long hstart, hend;
1860
1861 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08001862#ifndef CONFIG_NUMA
Andrea Arcangeli692e0b32011-05-24 17:12:14 -07001863 up_read(&mm->mmap_sem);
Andrea Arcangeliba761492011-01-13 15:46:58 -08001864 VM_BUG_ON(!*hpage);
Andrea Arcangelice83d212011-01-13 15:47:06 -08001865 new_page = *hpage;
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08001866#else
1867 VM_BUG_ON(*hpage);
Andrea Arcangelice83d212011-01-13 15:47:06 -08001868 /*
1869 * Allocate the page while the vma is still valid and under
1870 * the mmap_sem read mode so there is no memory allocation
1871 * later when we take the mmap_sem in write mode. This is more
1872 * friendly behavior (OTOH it may actually hide bugs) to
1873 * filesystems in userland with daemons allocating memory in
1874 * the userland I/O paths. Allocating memory with the
1875 * mmap_sem in read mode is good idea also to allow greater
1876 * scalability.
1877 */
Andi Kleen5c4b4be2011-03-04 17:36:32 -08001878 new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
Andi Kleencc5d4622011-03-22 16:33:13 -07001879 node, __GFP_OTHER_NODE);
Andrea Arcangeli692e0b32011-05-24 17:12:14 -07001880
1881 /*
1882 * After allocating the hugepage, release the mmap_sem read lock in
1883 * preparation for taking it in write mode.
1884 */
1885 up_read(&mm->mmap_sem);
Andrea Arcangelice83d212011-01-13 15:47:06 -08001886 if (unlikely(!new_page)) {
Andi Kleen81ab4202011-04-14 15:22:06 -07001887 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
Andrea Arcangelice83d212011-01-13 15:47:06 -08001888 *hpage = ERR_PTR(-ENOMEM);
1889 return;
1890 }
Hugh Dickins2fbfac42011-03-14 01:08:47 -07001891#endif
Andrea Arcangelice83d212011-01-13 15:47:06 -08001892
Andrea Arcangeli692e0b32011-05-24 17:12:14 -07001893 count_vm_event(THP_COLLAPSE_ALLOC);
1894 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
1895#ifdef CONFIG_NUMA
1896 put_page(new_page);
1897#endif
1898 return;
1899 }
Andrea Arcangeliba761492011-01-13 15:46:58 -08001900
1901 /*
1902 * Prevent all access to pagetables with the exception of
1903 * gup_fast later hanlded by the ptep_clear_flush and the VM
1904 * handled by the anon_vma lock + PG_lock.
1905 */
1906 down_write(&mm->mmap_sem);
1907 if (unlikely(khugepaged_test_exit(mm)))
1908 goto out;
1909
1910 vma = find_vma(mm, address);
1911 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1912 hend = vma->vm_end & HPAGE_PMD_MASK;
1913 if (address < hstart || address + HPAGE_PMD_SIZE > hend)
1914 goto out;
1915
Andrea Arcangeli60ab3242011-01-13 15:47:18 -08001916 if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
1917 (vma->vm_flags & VM_NOHUGEPAGE))
Andrea Arcangeliba761492011-01-13 15:46:58 -08001918 goto out;
1919
Andrea Arcangeli78f11a22011-04-27 15:26:45 -07001920 if (!vma->anon_vma || vma->vm_ops)
Andrea Arcangeliba761492011-01-13 15:46:58 -08001921 goto out;
Andrea Arcangelia7d6e4e2011-02-15 19:02:45 +01001922 if (is_vma_temporary_stack(vma))
1923 goto out;
Andrea Arcangeli78f11a22011-04-27 15:26:45 -07001924 /*
1925 * If is_pfn_mapping() is true is_learn_pfn_mapping() must be
1926 * true too, verify it here.
1927 */
1928 VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP);
Andrea Arcangeliba761492011-01-13 15:46:58 -08001929
1930 pgd = pgd_offset(mm, address);
1931 if (!pgd_present(*pgd))
1932 goto out;
1933
1934 pud = pud_offset(pgd, address);
1935 if (!pud_present(*pud))
1936 goto out;
1937
1938 pmd = pmd_offset(pud, address);
1939 /* pmd can't go away or become huge under us */
1940 if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
1941 goto out;
1942
Andrea Arcangeliba761492011-01-13 15:46:58 -08001943 anon_vma_lock(vma->anon_vma);
1944
1945 pte = pte_offset_map(pmd, address);
1946 ptl = pte_lockptr(mm, pmd);
1947
1948 spin_lock(&mm->page_table_lock); /* probably unnecessary */
1949 /*
1950 * After this gup_fast can't run anymore. This also removes
1951 * any huge TLB entry from the CPU so we won't allow
1952 * huge and small TLB entries for the same virtual address
1953 * to avoid the risk of CPU bugs in that area.
1954 */
1955 _pmd = pmdp_clear_flush_notify(vma, address, pmd);
1956 spin_unlock(&mm->page_table_lock);
1957
1958 spin_lock(ptl);
1959 isolated = __collapse_huge_page_isolate(vma, address, pte);
1960 spin_unlock(ptl);
Andrea Arcangeliba761492011-01-13 15:46:58 -08001961
1962 if (unlikely(!isolated)) {
Johannes Weiner453c7192011-01-20 14:44:18 -08001963 pte_unmap(pte);
Andrea Arcangeliba761492011-01-13 15:46:58 -08001964 spin_lock(&mm->page_table_lock);
1965 BUG_ON(!pmd_none(*pmd));
1966 set_pmd_at(mm, address, pmd, _pmd);
1967 spin_unlock(&mm->page_table_lock);
1968 anon_vma_unlock(vma->anon_vma);
Andrea Arcangelice83d212011-01-13 15:47:06 -08001969 goto out;
Andrea Arcangeliba761492011-01-13 15:46:58 -08001970 }
1971
1972 /*
1973 * All pages are isolated and locked so anon_vma rmap
1974 * can't run anymore.
1975 */
1976 anon_vma_unlock(vma->anon_vma);
1977
1978 __collapse_huge_page_copy(pte, new_page, vma, address, ptl);
Johannes Weiner453c7192011-01-20 14:44:18 -08001979 pte_unmap(pte);
Andrea Arcangeliba761492011-01-13 15:46:58 -08001980 __SetPageUptodate(new_page);
1981 pgtable = pmd_pgtable(_pmd);
1982 VM_BUG_ON(page_count(pgtable) != 1);
1983 VM_BUG_ON(page_mapcount(pgtable) != 0);
1984
1985 _pmd = mk_pmd(new_page, vma->vm_page_prot);
1986 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1987 _pmd = pmd_mkhuge(_pmd);
1988
1989 /*
1990 * spin_lock() below is not the equivalent of smp_wmb(), so
1991 * this is needed to avoid the copy_huge_page writes to become
1992 * visible after the set_pmd_at() write.
1993 */
1994 smp_wmb();
1995
1996 spin_lock(&mm->page_table_lock);
1997 BUG_ON(!pmd_none(*pmd));
1998 page_add_new_anon_rmap(new_page, vma, address);
1999 set_pmd_at(mm, address, pmd, _pmd);
Hillf Danton35d8c7a2011-10-31 17:09:40 -07002000 update_mmu_cache(vma, address, _pmd);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002001 prepare_pmd_huge_pte(pgtable, mm);
2002 mm->nr_ptes--;
2003 spin_unlock(&mm->page_table_lock);
2004
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002005#ifndef CONFIG_NUMA
Andrea Arcangeliba761492011-01-13 15:46:58 -08002006 *hpage = NULL;
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002007#endif
Andrea Arcangeliba761492011-01-13 15:46:58 -08002008 khugepaged_pages_collapsed++;
Andrea Arcangelice83d212011-01-13 15:47:06 -08002009out_up_write:
Andrea Arcangeliba761492011-01-13 15:46:58 -08002010 up_write(&mm->mmap_sem);
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002011 return;
2012
Andrea Arcangelice83d212011-01-13 15:47:06 -08002013out:
KAMEZAWA Hiroyuki678ff892011-02-10 15:01:36 -08002014 mem_cgroup_uncharge_page(new_page);
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002015#ifdef CONFIG_NUMA
2016 put_page(new_page);
2017#endif
Andrea Arcangelice83d212011-01-13 15:47:06 -08002018 goto out_up_write;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002019}
2020
2021static int khugepaged_scan_pmd(struct mm_struct *mm,
2022 struct vm_area_struct *vma,
2023 unsigned long address,
2024 struct page **hpage)
2025{
2026 pgd_t *pgd;
2027 pud_t *pud;
2028 pmd_t *pmd;
2029 pte_t *pte, *_pte;
2030 int ret = 0, referenced = 0, none = 0;
2031 struct page *page;
2032 unsigned long _address;
2033 spinlock_t *ptl;
Andi Kleen5c4b4be2011-03-04 17:36:32 -08002034 int node = -1;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002035
2036 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
2037
2038 pgd = pgd_offset(mm, address);
2039 if (!pgd_present(*pgd))
2040 goto out;
2041
2042 pud = pud_offset(pgd, address);
2043 if (!pud_present(*pud))
2044 goto out;
2045
2046 pmd = pmd_offset(pud, address);
2047 if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
2048 goto out;
2049
2050 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
2051 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
2052 _pte++, _address += PAGE_SIZE) {
2053 pte_t pteval = *_pte;
2054 if (pte_none(pteval)) {
2055 if (++none <= khugepaged_max_ptes_none)
2056 continue;
2057 else
2058 goto out_unmap;
2059 }
2060 if (!pte_present(pteval) || !pte_write(pteval))
2061 goto out_unmap;
2062 page = vm_normal_page(vma, _address, pteval);
2063 if (unlikely(!page))
2064 goto out_unmap;
Andi Kleen5c4b4be2011-03-04 17:36:32 -08002065 /*
2066 * Chose the node of the first page. This could
2067 * be more sophisticated and look at more pages,
2068 * but isn't for now.
2069 */
2070 if (node == -1)
2071 node = page_to_nid(page);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002072 VM_BUG_ON(PageCompound(page));
2073 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
2074 goto out_unmap;
2075 /* cannot use mapcount: can't collapse if there's a gup pin */
2076 if (page_count(page) != 1)
2077 goto out_unmap;
Andrea Arcangeli8ee53822011-01-13 15:47:10 -08002078 if (pte_young(pteval) || PageReferenced(page) ||
2079 mmu_notifier_test_young(vma->vm_mm, address))
Andrea Arcangeliba761492011-01-13 15:46:58 -08002080 referenced = 1;
2081 }
2082 if (referenced)
2083 ret = 1;
2084out_unmap:
2085 pte_unmap_unlock(pte, ptl);
Andrea Arcangelice83d212011-01-13 15:47:06 -08002086 if (ret)
2087 /* collapse_huge_page will return with the mmap_sem released */
Andi Kleen5c4b4be2011-03-04 17:36:32 -08002088 collapse_huge_page(mm, address, hpage, vma, node);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002089out:
2090 return ret;
2091}
2092
2093static void collect_mm_slot(struct mm_slot *mm_slot)
2094{
2095 struct mm_struct *mm = mm_slot->mm;
2096
2097 VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
2098
2099 if (khugepaged_test_exit(mm)) {
2100 /* free mm_slot */
2101 hlist_del(&mm_slot->hash);
2102 list_del(&mm_slot->mm_node);
2103
2104 /*
2105 * Not strictly needed because the mm exited already.
2106 *
2107 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
2108 */
2109
2110 /* khugepaged_mm_lock actually not necessary for the below */
2111 free_mm_slot(mm_slot);
2112 mmdrop(mm);
2113 }
2114}
2115
2116static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2117 struct page **hpage)
H Hartley Sweeten2f1da642011-10-31 17:09:25 -07002118 __releases(&khugepaged_mm_lock)
2119 __acquires(&khugepaged_mm_lock)
Andrea Arcangeliba761492011-01-13 15:46:58 -08002120{
2121 struct mm_slot *mm_slot;
2122 struct mm_struct *mm;
2123 struct vm_area_struct *vma;
2124 int progress = 0;
2125
2126 VM_BUG_ON(!pages);
2127 VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
2128
2129 if (khugepaged_scan.mm_slot)
2130 mm_slot = khugepaged_scan.mm_slot;
2131 else {
2132 mm_slot = list_entry(khugepaged_scan.mm_head.next,
2133 struct mm_slot, mm_node);
2134 khugepaged_scan.address = 0;
2135 khugepaged_scan.mm_slot = mm_slot;
2136 }
2137 spin_unlock(&khugepaged_mm_lock);
2138
2139 mm = mm_slot->mm;
2140 down_read(&mm->mmap_sem);
2141 if (unlikely(khugepaged_test_exit(mm)))
2142 vma = NULL;
2143 else
2144 vma = find_vma(mm, khugepaged_scan.address);
2145
2146 progress++;
2147 for (; vma; vma = vma->vm_next) {
2148 unsigned long hstart, hend;
2149
2150 cond_resched();
2151 if (unlikely(khugepaged_test_exit(mm))) {
2152 progress++;
2153 break;
2154 }
2155
Andrea Arcangeli60ab3242011-01-13 15:47:18 -08002156 if ((!(vma->vm_flags & VM_HUGEPAGE) &&
2157 !khugepaged_always()) ||
2158 (vma->vm_flags & VM_NOHUGEPAGE)) {
Andrea Arcangelia7d6e4e2011-02-15 19:02:45 +01002159 skip:
Andrea Arcangeliba761492011-01-13 15:46:58 -08002160 progress++;
2161 continue;
2162 }
Andrea Arcangeli78f11a22011-04-27 15:26:45 -07002163 if (!vma->anon_vma || vma->vm_ops)
Andrea Arcangelia7d6e4e2011-02-15 19:02:45 +01002164 goto skip;
2165 if (is_vma_temporary_stack(vma))
2166 goto skip;
Andrea Arcangeli78f11a22011-04-27 15:26:45 -07002167 /*
2168 * If is_pfn_mapping() is true is_learn_pfn_mapping()
2169 * must be true too, verify it here.
2170 */
2171 VM_BUG_ON(is_linear_pfn_mapping(vma) ||
2172 vma->vm_flags & VM_NO_THP);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002173
2174 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2175 hend = vma->vm_end & HPAGE_PMD_MASK;
Andrea Arcangelia7d6e4e2011-02-15 19:02:45 +01002176 if (hstart >= hend)
2177 goto skip;
2178 if (khugepaged_scan.address > hend)
2179 goto skip;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002180 if (khugepaged_scan.address < hstart)
2181 khugepaged_scan.address = hstart;
Andrea Arcangelia7d6e4e2011-02-15 19:02:45 +01002182 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002183
2184 while (khugepaged_scan.address < hend) {
2185 int ret;
2186 cond_resched();
2187 if (unlikely(khugepaged_test_exit(mm)))
2188 goto breakouterloop;
2189
2190 VM_BUG_ON(khugepaged_scan.address < hstart ||
2191 khugepaged_scan.address + HPAGE_PMD_SIZE >
2192 hend);
2193 ret = khugepaged_scan_pmd(mm, vma,
2194 khugepaged_scan.address,
2195 hpage);
2196 /* move to next address */
2197 khugepaged_scan.address += HPAGE_PMD_SIZE;
2198 progress += HPAGE_PMD_NR;
2199 if (ret)
2200 /* we released mmap_sem so break loop */
2201 goto breakouterloop_mmap_sem;
2202 if (progress >= pages)
2203 goto breakouterloop;
2204 }
2205 }
2206breakouterloop:
2207 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
2208breakouterloop_mmap_sem:
2209
2210 spin_lock(&khugepaged_mm_lock);
Andrea Arcangelia7d6e4e2011-02-15 19:02:45 +01002211 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002212 /*
2213 * Release the current mm_slot if this mm is about to die, or
2214 * if we scanned all vmas of this mm.
2215 */
2216 if (khugepaged_test_exit(mm) || !vma) {
2217 /*
2218 * Make sure that if mm_users is reaching zero while
2219 * khugepaged runs here, khugepaged_exit will find
2220 * mm_slot not pointing to the exiting mm.
2221 */
2222 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2223 khugepaged_scan.mm_slot = list_entry(
2224 mm_slot->mm_node.next,
2225 struct mm_slot, mm_node);
2226 khugepaged_scan.address = 0;
2227 } else {
2228 khugepaged_scan.mm_slot = NULL;
2229 khugepaged_full_scans++;
2230 }
2231
2232 collect_mm_slot(mm_slot);
2233 }
2234
2235 return progress;
2236}
2237
2238static int khugepaged_has_work(void)
2239{
2240 return !list_empty(&khugepaged_scan.mm_head) &&
2241 khugepaged_enabled();
2242}
2243
2244static int khugepaged_wait_event(void)
2245{
2246 return !list_empty(&khugepaged_scan.mm_head) ||
2247 !khugepaged_enabled();
2248}
2249
2250static void khugepaged_do_scan(struct page **hpage)
2251{
2252 unsigned int progress = 0, pass_through_head = 0;
2253 unsigned int pages = khugepaged_pages_to_scan;
2254
2255 barrier(); /* write khugepaged_pages_to_scan to local stack */
2256
2257 while (progress < pages) {
2258 cond_resched();
2259
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002260#ifndef CONFIG_NUMA
Andrea Arcangeliba761492011-01-13 15:46:58 -08002261 if (!*hpage) {
2262 *hpage = alloc_hugepage(khugepaged_defrag());
Andi Kleen81ab4202011-04-14 15:22:06 -07002263 if (unlikely(!*hpage)) {
2264 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002265 break;
Andi Kleen81ab4202011-04-14 15:22:06 -07002266 }
2267 count_vm_event(THP_COLLAPSE_ALLOC);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002268 }
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002269#else
2270 if (IS_ERR(*hpage))
2271 break;
2272#endif
Andrea Arcangeliba761492011-01-13 15:46:58 -08002273
Andrea Arcangeli878aee72011-01-13 15:47:10 -08002274 if (unlikely(kthread_should_stop() || freezing(current)))
2275 break;
2276
Andrea Arcangeliba761492011-01-13 15:46:58 -08002277 spin_lock(&khugepaged_mm_lock);
2278 if (!khugepaged_scan.mm_slot)
2279 pass_through_head++;
2280 if (khugepaged_has_work() &&
2281 pass_through_head < 2)
2282 progress += khugepaged_scan_mm_slot(pages - progress,
2283 hpage);
2284 else
2285 progress = pages;
2286 spin_unlock(&khugepaged_mm_lock);
2287 }
2288}
2289
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002290static void khugepaged_alloc_sleep(void)
2291{
Andrea Arcangeli1dfb0592011-12-08 14:33:57 -08002292 wait_event_freezable_timeout(khugepaged_wait, false,
2293 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002294}
2295
2296#ifndef CONFIG_NUMA
Andrea Arcangeliba761492011-01-13 15:46:58 -08002297static struct page *khugepaged_alloc_hugepage(void)
2298{
2299 struct page *hpage;
2300
2301 do {
2302 hpage = alloc_hugepage(khugepaged_defrag());
Andi Kleen81ab4202011-04-14 15:22:06 -07002303 if (!hpage) {
2304 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002305 khugepaged_alloc_sleep();
Andi Kleen81ab4202011-04-14 15:22:06 -07002306 } else
2307 count_vm_event(THP_COLLAPSE_ALLOC);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002308 } while (unlikely(!hpage) &&
2309 likely(khugepaged_enabled()));
2310 return hpage;
2311}
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002312#endif
Andrea Arcangeliba761492011-01-13 15:46:58 -08002313
2314static void khugepaged_loop(void)
2315{
2316 struct page *hpage;
2317
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002318#ifdef CONFIG_NUMA
2319 hpage = NULL;
2320#endif
Andrea Arcangeliba761492011-01-13 15:46:58 -08002321 while (likely(khugepaged_enabled())) {
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002322#ifndef CONFIG_NUMA
Andrea Arcangeliba761492011-01-13 15:46:58 -08002323 hpage = khugepaged_alloc_hugepage();
Andrea Arcangelif300ea42011-06-15 15:08:08 -07002324 if (unlikely(!hpage))
Andrea Arcangeliba761492011-01-13 15:46:58 -08002325 break;
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002326#else
2327 if (IS_ERR(hpage)) {
2328 khugepaged_alloc_sleep();
2329 hpage = NULL;
2330 }
2331#endif
Andrea Arcangeliba761492011-01-13 15:46:58 -08002332
2333 khugepaged_do_scan(&hpage);
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002334#ifndef CONFIG_NUMA
Andrea Arcangeliba761492011-01-13 15:46:58 -08002335 if (hpage)
2336 put_page(hpage);
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002337#endif
Andrea Arcangeli878aee72011-01-13 15:47:10 -08002338 try_to_freeze();
2339 if (unlikely(kthread_should_stop()))
2340 break;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002341 if (khugepaged_has_work()) {
Andrea Arcangeliba761492011-01-13 15:46:58 -08002342 if (!khugepaged_scan_sleep_millisecs)
2343 continue;
Andrea Arcangeli1dfb0592011-12-08 14:33:57 -08002344 wait_event_freezable_timeout(khugepaged_wait, false,
2345 msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
Andrea Arcangeliba761492011-01-13 15:46:58 -08002346 } else if (khugepaged_enabled())
Andrea Arcangeli878aee72011-01-13 15:47:10 -08002347 wait_event_freezable(khugepaged_wait,
2348 khugepaged_wait_event());
Andrea Arcangeliba761492011-01-13 15:46:58 -08002349 }
2350}
2351
2352static int khugepaged(void *none)
2353{
2354 struct mm_slot *mm_slot;
2355
Andrea Arcangeli878aee72011-01-13 15:47:10 -08002356 set_freezable();
Andrea Arcangeliba761492011-01-13 15:46:58 -08002357 set_user_nice(current, 19);
2358
2359 /* serialize with start_khugepaged() */
2360 mutex_lock(&khugepaged_mutex);
2361
2362 for (;;) {
2363 mutex_unlock(&khugepaged_mutex);
Andrea Arcangelia7d6e4e2011-02-15 19:02:45 +01002364 VM_BUG_ON(khugepaged_thread != current);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002365 khugepaged_loop();
Andrea Arcangelia7d6e4e2011-02-15 19:02:45 +01002366 VM_BUG_ON(khugepaged_thread != current);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002367
2368 mutex_lock(&khugepaged_mutex);
2369 if (!khugepaged_enabled())
2370 break;
Andrea Arcangeli878aee72011-01-13 15:47:10 -08002371 if (unlikely(kthread_should_stop()))
2372 break;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002373 }
2374
2375 spin_lock(&khugepaged_mm_lock);
2376 mm_slot = khugepaged_scan.mm_slot;
2377 khugepaged_scan.mm_slot = NULL;
2378 if (mm_slot)
2379 collect_mm_slot(mm_slot);
2380 spin_unlock(&khugepaged_mm_lock);
2381
2382 khugepaged_thread = NULL;
2383 mutex_unlock(&khugepaged_mutex);
2384
2385 return 0;
2386}
2387
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08002388void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd)
2389{
2390 struct page *page;
2391
2392 spin_lock(&mm->page_table_lock);
2393 if (unlikely(!pmd_trans_huge(*pmd))) {
2394 spin_unlock(&mm->page_table_lock);
2395 return;
2396 }
2397 page = pmd_page(*pmd);
2398 VM_BUG_ON(!page_count(page));
2399 get_page(page);
2400 spin_unlock(&mm->page_table_lock);
2401
2402 split_huge_page(page);
2403
2404 put_page(page);
2405 BUG_ON(pmd_trans_huge(*pmd));
2406}
Andrea Arcangeli94fcc582011-01-13 15:47:08 -08002407
2408static void split_huge_page_address(struct mm_struct *mm,
2409 unsigned long address)
2410{
2411 pgd_t *pgd;
2412 pud_t *pud;
2413 pmd_t *pmd;
2414
2415 VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
2416
2417 pgd = pgd_offset(mm, address);
2418 if (!pgd_present(*pgd))
2419 return;
2420
2421 pud = pud_offset(pgd, address);
2422 if (!pud_present(*pud))
2423 return;
2424
2425 pmd = pmd_offset(pud, address);
2426 if (!pmd_present(*pmd))
2427 return;
2428 /*
2429 * Caller holds the mmap_sem write mode, so a huge pmd cannot
2430 * materialize from under us.
2431 */
2432 split_huge_page_pmd(mm, pmd);
2433}
2434
2435void __vma_adjust_trans_huge(struct vm_area_struct *vma,
2436 unsigned long start,
2437 unsigned long end,
2438 long adjust_next)
2439{
2440 /*
2441 * If the new start address isn't hpage aligned and it could
2442 * previously contain an hugepage: check if we need to split
2443 * an huge pmd.
2444 */
2445 if (start & ~HPAGE_PMD_MASK &&
2446 (start & HPAGE_PMD_MASK) >= vma->vm_start &&
2447 (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2448 split_huge_page_address(vma->vm_mm, start);
2449
2450 /*
2451 * If the new end address isn't hpage aligned and it could
2452 * previously contain an hugepage: check if we need to split
2453 * an huge pmd.
2454 */
2455 if (end & ~HPAGE_PMD_MASK &&
2456 (end & HPAGE_PMD_MASK) >= vma->vm_start &&
2457 (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2458 split_huge_page_address(vma->vm_mm, end);
2459
2460 /*
2461 * If we're also updating the vma->vm_next->vm_start, if the new
2462 * vm_next->vm_start isn't page aligned and it could previously
2463 * contain an hugepage: check if we need to split an huge pmd.
2464 */
2465 if (adjust_next > 0) {
2466 struct vm_area_struct *next = vma->vm_next;
2467 unsigned long nstart = next->vm_start;
2468 nstart += adjust_next << PAGE_SHIFT;
2469 if (nstart & ~HPAGE_PMD_MASK &&
2470 (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
2471 (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
2472 split_huge_page_address(next->vm_mm, nstart);
2473 }
2474}