blob: 6f022f505e88d91ea5f008f5a216a5086164c685 [file] [log] [blame]
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001/*
2 * Copyright (C) 2009 Red Hat, Inc.
3 *
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
6 */
7
8#include <linux/mm.h>
9#include <linux/sched.h>
10#include <linux/highmem.h>
11#include <linux/hugetlb.h>
12#include <linux/mmu_notifier.h>
13#include <linux/rmap.h>
14#include <linux/swap.h>
Andrea Arcangeliba761492011-01-13 15:46:58 -080015#include <linux/mm_inline.h>
16#include <linux/kthread.h>
17#include <linux/khugepaged.h>
Andrea Arcangeli878aee72011-01-13 15:47:10 -080018#include <linux/freezer.h>
Andrea Arcangelia664b2d2011-01-13 15:47:17 -080019#include <linux/mman.h>
Ralf Baechle325adeb2012-10-15 13:44:56 +020020#include <linux/pagemap.h>
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -080021#include <asm/tlb.h>
22#include <asm/pgalloc.h>
23#include "internal.h"
24
Andrea Arcangeliba761492011-01-13 15:46:58 -080025/*
26 * By default transparent hugepage support is enabled for all mappings
27 * and khugepaged scans all mappings. Defrag is only invoked by
28 * khugepaged hugepage allocations and by page faults inside
29 * MADV_HUGEPAGE regions to avoid the risk of slowing down short lived
30 * allocations.
31 */
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -080032unsigned long transparent_hugepage_flags __read_mostly =
Andrea Arcangeli13ece882011-01-13 15:47:07 -080033#ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
Andrea Arcangeliba761492011-01-13 15:46:58 -080034 (1<<TRANSPARENT_HUGEPAGE_FLAG)|
Andrea Arcangeli13ece882011-01-13 15:47:07 -080035#endif
36#ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
37 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
38#endif
Andrea Arcangelid39d33c2011-01-13 15:47:05 -080039 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
Andrea Arcangeliba761492011-01-13 15:46:58 -080040 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
41
42/* default scan 8*512 pte (or vmas) every 30 second */
43static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
44static unsigned int khugepaged_pages_collapsed;
45static unsigned int khugepaged_full_scans;
46static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
47/* during fragmentation poll the hugepage allocator once every minute */
48static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
49static struct task_struct *khugepaged_thread __read_mostly;
50static DEFINE_MUTEX(khugepaged_mutex);
51static DEFINE_SPINLOCK(khugepaged_mm_lock);
52static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
53/*
54 * default collapse hugepages if there is at least one pte mapped like
55 * it would have happened if the vma was large enough during page
56 * fault.
57 */
58static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
59
60static int khugepaged(void *none);
61static int mm_slots_hash_init(void);
62static int khugepaged_slab_init(void);
63static void khugepaged_slab_free(void);
64
65#define MM_SLOTS_HASH_HEADS 1024
66static struct hlist_head *mm_slots_hash __read_mostly;
67static struct kmem_cache *mm_slot_cache __read_mostly;
68
69/**
70 * struct mm_slot - hash lookup from mm to mm_slot
71 * @hash: hash collision list
72 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
73 * @mm: the mm that this information is valid for
74 */
75struct mm_slot {
76 struct hlist_node hash;
77 struct list_head mm_node;
78 struct mm_struct *mm;
79};
80
81/**
82 * struct khugepaged_scan - cursor for scanning
83 * @mm_head: the head of the mm list to scan
84 * @mm_slot: the current mm_slot we are scanning
85 * @address: the next address inside that to be scanned
86 *
87 * There is only the one khugepaged_scan instance of this cursor structure.
88 */
89struct khugepaged_scan {
90 struct list_head mm_head;
91 struct mm_slot *mm_slot;
92 unsigned long address;
H Hartley Sweeten2f1da642011-10-31 17:09:25 -070093};
94static struct khugepaged_scan khugepaged_scan = {
Andrea Arcangeliba761492011-01-13 15:46:58 -080095 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
96};
97
Andrea Arcangelif0005652011-01-13 15:47:04 -080098
99static int set_recommended_min_free_kbytes(void)
100{
101 struct zone *zone;
102 int nr_zones = 0;
103 unsigned long recommended_min;
104 extern int min_free_kbytes;
105
Xiao Guangrong17c230a2012-10-08 16:29:56 -0700106 if (!khugepaged_enabled())
Andrea Arcangelif0005652011-01-13 15:47:04 -0800107 return 0;
108
109 for_each_populated_zone(zone)
110 nr_zones++;
111
112 /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */
113 recommended_min = pageblock_nr_pages * nr_zones * 2;
114
115 /*
116 * Make sure that on average at least two pageblocks are almost free
117 * of another type, one for a migratetype to fall back to and a
118 * second to avoid subsequent fallbacks of other types There are 3
119 * MIGRATE_TYPES we care about.
120 */
121 recommended_min += pageblock_nr_pages * nr_zones *
122 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
123
124 /* don't ever allow to reserve more than 5% of the lowmem */
125 recommended_min = min(recommended_min,
126 (unsigned long) nr_free_buffer_pages() / 20);
127 recommended_min <<= (PAGE_SHIFT-10);
128
129 if (recommended_min > min_free_kbytes)
130 min_free_kbytes = recommended_min;
131 setup_per_zone_wmarks();
132 return 0;
133}
134late_initcall(set_recommended_min_free_kbytes);
135
Andrea Arcangeliba761492011-01-13 15:46:58 -0800136static int start_khugepaged(void)
137{
138 int err = 0;
139 if (khugepaged_enabled()) {
Andrea Arcangeliba761492011-01-13 15:46:58 -0800140 if (!khugepaged_thread)
141 khugepaged_thread = kthread_run(khugepaged, NULL,
142 "khugepaged");
143 if (unlikely(IS_ERR(khugepaged_thread))) {
144 printk(KERN_ERR
145 "khugepaged: kthread_run(khugepaged) failed\n");
146 err = PTR_ERR(khugepaged_thread);
147 khugepaged_thread = NULL;
148 }
Xiao Guangrong911891a2012-10-08 16:29:41 -0700149
150 if (!list_empty(&khugepaged_scan.mm_head))
Andrea Arcangeliba761492011-01-13 15:46:58 -0800151 wake_up_interruptible(&khugepaged_wait);
Andrea Arcangelif0005652011-01-13 15:47:04 -0800152
153 set_recommended_min_free_kbytes();
Xiao Guangrong911891a2012-10-08 16:29:41 -0700154 } else if (khugepaged_thread) {
Xiao Guangrong911891a2012-10-08 16:29:41 -0700155 kthread_stop(khugepaged_thread);
156 khugepaged_thread = NULL;
157 }
Xiao Guangrong637e3a22012-10-08 16:29:38 -0700158
Andrea Arcangeliba761492011-01-13 15:46:58 -0800159 return err;
160}
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800161
162#ifdef CONFIG_SYSFS
Andrea Arcangeliba761492011-01-13 15:46:58 -0800163
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800164static ssize_t double_flag_show(struct kobject *kobj,
165 struct kobj_attribute *attr, char *buf,
166 enum transparent_hugepage_flag enabled,
167 enum transparent_hugepage_flag req_madv)
168{
169 if (test_bit(enabled, &transparent_hugepage_flags)) {
170 VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags));
171 return sprintf(buf, "[always] madvise never\n");
172 } else if (test_bit(req_madv, &transparent_hugepage_flags))
173 return sprintf(buf, "always [madvise] never\n");
174 else
175 return sprintf(buf, "always madvise [never]\n");
176}
177static ssize_t double_flag_store(struct kobject *kobj,
178 struct kobj_attribute *attr,
179 const char *buf, size_t count,
180 enum transparent_hugepage_flag enabled,
181 enum transparent_hugepage_flag req_madv)
182{
183 if (!memcmp("always", buf,
184 min(sizeof("always")-1, count))) {
185 set_bit(enabled, &transparent_hugepage_flags);
186 clear_bit(req_madv, &transparent_hugepage_flags);
187 } else if (!memcmp("madvise", buf,
188 min(sizeof("madvise")-1, count))) {
189 clear_bit(enabled, &transparent_hugepage_flags);
190 set_bit(req_madv, &transparent_hugepage_flags);
191 } else if (!memcmp("never", buf,
192 min(sizeof("never")-1, count))) {
193 clear_bit(enabled, &transparent_hugepage_flags);
194 clear_bit(req_madv, &transparent_hugepage_flags);
195 } else
196 return -EINVAL;
197
198 return count;
199}
200
201static ssize_t enabled_show(struct kobject *kobj,
202 struct kobj_attribute *attr, char *buf)
203{
204 return double_flag_show(kobj, attr, buf,
205 TRANSPARENT_HUGEPAGE_FLAG,
206 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
207}
208static ssize_t enabled_store(struct kobject *kobj,
209 struct kobj_attribute *attr,
210 const char *buf, size_t count)
211{
Andrea Arcangeliba761492011-01-13 15:46:58 -0800212 ssize_t ret;
213
214 ret = double_flag_store(kobj, attr, buf, count,
215 TRANSPARENT_HUGEPAGE_FLAG,
216 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
217
218 if (ret > 0) {
Xiao Guangrong911891a2012-10-08 16:29:41 -0700219 int err;
220
221 mutex_lock(&khugepaged_mutex);
222 err = start_khugepaged();
223 mutex_unlock(&khugepaged_mutex);
224
Andrea Arcangeliba761492011-01-13 15:46:58 -0800225 if (err)
226 ret = err;
227 }
228
229 return ret;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800230}
231static struct kobj_attribute enabled_attr =
232 __ATTR(enabled, 0644, enabled_show, enabled_store);
233
234static ssize_t single_flag_show(struct kobject *kobj,
235 struct kobj_attribute *attr, char *buf,
236 enum transparent_hugepage_flag flag)
237{
Ben Hutchingse27e6152011-04-14 15:22:21 -0700238 return sprintf(buf, "%d\n",
239 !!test_bit(flag, &transparent_hugepage_flags));
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800240}
Ben Hutchingse27e6152011-04-14 15:22:21 -0700241
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800242static ssize_t single_flag_store(struct kobject *kobj,
243 struct kobj_attribute *attr,
244 const char *buf, size_t count,
245 enum transparent_hugepage_flag flag)
246{
Ben Hutchingse27e6152011-04-14 15:22:21 -0700247 unsigned long value;
248 int ret;
249
250 ret = kstrtoul(buf, 10, &value);
251 if (ret < 0)
252 return ret;
253 if (value > 1)
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800254 return -EINVAL;
255
Ben Hutchingse27e6152011-04-14 15:22:21 -0700256 if (value)
257 set_bit(flag, &transparent_hugepage_flags);
258 else
259 clear_bit(flag, &transparent_hugepage_flags);
260
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800261 return count;
262}
263
264/*
265 * Currently defrag only disables __GFP_NOWAIT for allocation. A blind
266 * __GFP_REPEAT is too aggressive, it's never worth swapping tons of
267 * memory just to allocate one more hugepage.
268 */
269static ssize_t defrag_show(struct kobject *kobj,
270 struct kobj_attribute *attr, char *buf)
271{
272 return double_flag_show(kobj, attr, buf,
273 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
274 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
275}
276static ssize_t defrag_store(struct kobject *kobj,
277 struct kobj_attribute *attr,
278 const char *buf, size_t count)
279{
280 return double_flag_store(kobj, attr, buf, count,
281 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
282 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
283}
284static struct kobj_attribute defrag_attr =
285 __ATTR(defrag, 0644, defrag_show, defrag_store);
286
287#ifdef CONFIG_DEBUG_VM
288static ssize_t debug_cow_show(struct kobject *kobj,
289 struct kobj_attribute *attr, char *buf)
290{
291 return single_flag_show(kobj, attr, buf,
292 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
293}
294static ssize_t debug_cow_store(struct kobject *kobj,
295 struct kobj_attribute *attr,
296 const char *buf, size_t count)
297{
298 return single_flag_store(kobj, attr, buf, count,
299 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
300}
301static struct kobj_attribute debug_cow_attr =
302 __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
303#endif /* CONFIG_DEBUG_VM */
304
305static struct attribute *hugepage_attr[] = {
306 &enabled_attr.attr,
307 &defrag_attr.attr,
308#ifdef CONFIG_DEBUG_VM
309 &debug_cow_attr.attr,
310#endif
311 NULL,
312};
313
314static struct attribute_group hugepage_attr_group = {
315 .attrs = hugepage_attr,
Andrea Arcangeliba761492011-01-13 15:46:58 -0800316};
317
318static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
319 struct kobj_attribute *attr,
320 char *buf)
321{
322 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
323}
324
325static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
326 struct kobj_attribute *attr,
327 const char *buf, size_t count)
328{
329 unsigned long msecs;
330 int err;
331
332 err = strict_strtoul(buf, 10, &msecs);
333 if (err || msecs > UINT_MAX)
334 return -EINVAL;
335
336 khugepaged_scan_sleep_millisecs = msecs;
337 wake_up_interruptible(&khugepaged_wait);
338
339 return count;
340}
341static struct kobj_attribute scan_sleep_millisecs_attr =
342 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
343 scan_sleep_millisecs_store);
344
345static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
346 struct kobj_attribute *attr,
347 char *buf)
348{
349 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
350}
351
352static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
353 struct kobj_attribute *attr,
354 const char *buf, size_t count)
355{
356 unsigned long msecs;
357 int err;
358
359 err = strict_strtoul(buf, 10, &msecs);
360 if (err || msecs > UINT_MAX)
361 return -EINVAL;
362
363 khugepaged_alloc_sleep_millisecs = msecs;
364 wake_up_interruptible(&khugepaged_wait);
365
366 return count;
367}
368static struct kobj_attribute alloc_sleep_millisecs_attr =
369 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
370 alloc_sleep_millisecs_store);
371
372static ssize_t pages_to_scan_show(struct kobject *kobj,
373 struct kobj_attribute *attr,
374 char *buf)
375{
376 return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
377}
378static ssize_t pages_to_scan_store(struct kobject *kobj,
379 struct kobj_attribute *attr,
380 const char *buf, size_t count)
381{
382 int err;
383 unsigned long pages;
384
385 err = strict_strtoul(buf, 10, &pages);
386 if (err || !pages || pages > UINT_MAX)
387 return -EINVAL;
388
389 khugepaged_pages_to_scan = pages;
390
391 return count;
392}
393static struct kobj_attribute pages_to_scan_attr =
394 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
395 pages_to_scan_store);
396
397static ssize_t pages_collapsed_show(struct kobject *kobj,
398 struct kobj_attribute *attr,
399 char *buf)
400{
401 return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
402}
403static struct kobj_attribute pages_collapsed_attr =
404 __ATTR_RO(pages_collapsed);
405
406static ssize_t full_scans_show(struct kobject *kobj,
407 struct kobj_attribute *attr,
408 char *buf)
409{
410 return sprintf(buf, "%u\n", khugepaged_full_scans);
411}
412static struct kobj_attribute full_scans_attr =
413 __ATTR_RO(full_scans);
414
415static ssize_t khugepaged_defrag_show(struct kobject *kobj,
416 struct kobj_attribute *attr, char *buf)
417{
418 return single_flag_show(kobj, attr, buf,
419 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
420}
421static ssize_t khugepaged_defrag_store(struct kobject *kobj,
422 struct kobj_attribute *attr,
423 const char *buf, size_t count)
424{
425 return single_flag_store(kobj, attr, buf, count,
426 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
427}
428static struct kobj_attribute khugepaged_defrag_attr =
429 __ATTR(defrag, 0644, khugepaged_defrag_show,
430 khugepaged_defrag_store);
431
432/*
433 * max_ptes_none controls if khugepaged should collapse hugepages over
434 * any unmapped ptes in turn potentially increasing the memory
435 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
436 * reduce the available free memory in the system as it
437 * runs. Increasing max_ptes_none will instead potentially reduce the
438 * free memory in the system during the khugepaged scan.
439 */
440static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
441 struct kobj_attribute *attr,
442 char *buf)
443{
444 return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
445}
446static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
447 struct kobj_attribute *attr,
448 const char *buf, size_t count)
449{
450 int err;
451 unsigned long max_ptes_none;
452
453 err = strict_strtoul(buf, 10, &max_ptes_none);
454 if (err || max_ptes_none > HPAGE_PMD_NR-1)
455 return -EINVAL;
456
457 khugepaged_max_ptes_none = max_ptes_none;
458
459 return count;
460}
461static struct kobj_attribute khugepaged_max_ptes_none_attr =
462 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
463 khugepaged_max_ptes_none_store);
464
465static struct attribute *khugepaged_attr[] = {
466 &khugepaged_defrag_attr.attr,
467 &khugepaged_max_ptes_none_attr.attr,
468 &pages_to_scan_attr.attr,
469 &pages_collapsed_attr.attr,
470 &full_scans_attr.attr,
471 &scan_sleep_millisecs_attr.attr,
472 &alloc_sleep_millisecs_attr.attr,
473 NULL,
474};
475
476static struct attribute_group khugepaged_attr_group = {
477 .attrs = khugepaged_attr,
478 .name = "khugepaged",
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800479};
Shaohua Li569e5592012-01-12 17:19:11 -0800480
481static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
482{
483 int err;
484
485 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
486 if (unlikely(!*hugepage_kobj)) {
487 printk(KERN_ERR "hugepage: failed kobject create\n");
488 return -ENOMEM;
489 }
490
491 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
492 if (err) {
493 printk(KERN_ERR "hugepage: failed register hugeage group\n");
494 goto delete_obj;
495 }
496
497 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
498 if (err) {
499 printk(KERN_ERR "hugepage: failed register hugeage group\n");
500 goto remove_hp_group;
501 }
502
503 return 0;
504
505remove_hp_group:
506 sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
507delete_obj:
508 kobject_put(*hugepage_kobj);
509 return err;
510}
511
512static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
513{
514 sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
515 sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
516 kobject_put(hugepage_kobj);
517}
518#else
519static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
520{
521 return 0;
522}
523
524static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
525{
526}
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800527#endif /* CONFIG_SYSFS */
528
529static int __init hugepage_init(void)
530{
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800531 int err;
Shaohua Li569e5592012-01-12 17:19:11 -0800532 struct kobject *hugepage_kobj;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800533
Andrea Arcangeli4b7167b2011-01-13 15:47:09 -0800534 if (!has_transparent_hugepage()) {
535 transparent_hugepage_flags = 0;
Shaohua Li569e5592012-01-12 17:19:11 -0800536 return -EINVAL;
Andrea Arcangeli4b7167b2011-01-13 15:47:09 -0800537 }
538
Shaohua Li569e5592012-01-12 17:19:11 -0800539 err = hugepage_init_sysfs(&hugepage_kobj);
540 if (err)
541 return err;
Andrea Arcangeliba761492011-01-13 15:46:58 -0800542
543 err = khugepaged_slab_init();
544 if (err)
545 goto out;
546
547 err = mm_slots_hash_init();
548 if (err) {
549 khugepaged_slab_free();
550 goto out;
551 }
552
Rik van Riel97562cd2011-01-13 15:47:12 -0800553 /*
554 * By default disable transparent hugepages on smaller systems,
555 * where the extra memory used could hurt more than TLB overhead
556 * is likely to save. The admin can still enable it through /sys.
557 */
558 if (totalram_pages < (512 << (20 - PAGE_SHIFT)))
559 transparent_hugepage_flags = 0;
560
Andrea Arcangeliba761492011-01-13 15:46:58 -0800561 start_khugepaged();
562
Shaohua Li569e5592012-01-12 17:19:11 -0800563 return 0;
Andrea Arcangeliba761492011-01-13 15:46:58 -0800564out:
Shaohua Li569e5592012-01-12 17:19:11 -0800565 hugepage_exit_sysfs(hugepage_kobj);
Andrea Arcangeliba761492011-01-13 15:46:58 -0800566 return err;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800567}
568module_init(hugepage_init)
569
570static int __init setup_transparent_hugepage(char *str)
571{
572 int ret = 0;
573 if (!str)
574 goto out;
575 if (!strcmp(str, "always")) {
576 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
577 &transparent_hugepage_flags);
578 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
579 &transparent_hugepage_flags);
580 ret = 1;
581 } else if (!strcmp(str, "madvise")) {
582 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
583 &transparent_hugepage_flags);
584 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
585 &transparent_hugepage_flags);
586 ret = 1;
587 } else if (!strcmp(str, "never")) {
588 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
589 &transparent_hugepage_flags);
590 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
591 &transparent_hugepage_flags);
592 ret = 1;
593 }
594out:
595 if (!ret)
596 printk(KERN_WARNING
597 "transparent_hugepage= cannot parse, ignored\n");
598 return ret;
599}
600__setup("transparent_hugepage=", setup_transparent_hugepage);
601
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800602static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
603{
604 if (likely(vma->vm_flags & VM_WRITE))
605 pmd = pmd_mkwrite(pmd);
606 return pmd;
607}
608
609static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
610 struct vm_area_struct *vma,
611 unsigned long haddr, pmd_t *pmd,
612 struct page *page)
613{
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800614 pgtable_t pgtable;
615
616 VM_BUG_ON(!PageCompound(page));
617 pgtable = pte_alloc_one(mm, haddr);
David Rientjesedad9d22012-05-29 15:06:17 -0700618 if (unlikely(!pgtable))
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800619 return VM_FAULT_OOM;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800620
621 clear_huge_page(page, haddr, HPAGE_PMD_NR);
622 __SetPageUptodate(page);
623
624 spin_lock(&mm->page_table_lock);
625 if (unlikely(!pmd_none(*pmd))) {
626 spin_unlock(&mm->page_table_lock);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -0800627 mem_cgroup_uncharge_page(page);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800628 put_page(page);
629 pte_free(mm, pgtable);
630 } else {
631 pmd_t entry;
632 entry = mk_pmd(page, vma->vm_page_prot);
633 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
634 entry = pmd_mkhuge(entry);
635 /*
636 * The spinlocking to take the lru_lock inside
637 * page_add_new_anon_rmap() acts as a full memory
638 * barrier to be sure clear_huge_page writes become
639 * visible after the set_pmd_at() write.
640 */
641 page_add_new_anon_rmap(page, vma, haddr);
642 set_pmd_at(mm, haddr, pmd, entry);
Gerald Schaefere3ebcf642012-10-08 16:30:07 -0700643 pgtable_trans_huge_deposit(mm, pgtable);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800644 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
Andrea Arcangeli1c641e82012-03-05 14:59:20 -0800645 mm->nr_ptes++;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800646 spin_unlock(&mm->page_table_lock);
647 }
648
David Rientjesaa2e8782012-05-29 15:06:17 -0700649 return 0;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800650}
651
Andi Kleencc5d4622011-03-22 16:33:13 -0700652static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -0800653{
Andi Kleencc5d4622011-03-22 16:33:13 -0700654 return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -0800655}
656
657static inline struct page *alloc_hugepage_vma(int defrag,
658 struct vm_area_struct *vma,
Andi Kleencc5d4622011-03-22 16:33:13 -0700659 unsigned long haddr, int nd,
660 gfp_t extra_gfp)
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -0800661{
Andi Kleencc5d4622011-03-22 16:33:13 -0700662 return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp),
Andi Kleen5c4b4be2011-03-04 17:36:32 -0800663 HPAGE_PMD_ORDER, vma, haddr, nd);
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -0800664}
665
666#ifndef CONFIG_NUMA
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800667static inline struct page *alloc_hugepage(int defrag)
668{
Andi Kleencc5d4622011-03-22 16:33:13 -0700669 return alloc_pages(alloc_hugepage_gfpmask(defrag, 0),
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800670 HPAGE_PMD_ORDER);
671}
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -0800672#endif
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800673
674int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
675 unsigned long address, pmd_t *pmd,
676 unsigned int flags)
677{
678 struct page *page;
679 unsigned long haddr = address & HPAGE_PMD_MASK;
680 pte_t *pte;
681
682 if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) {
683 if (unlikely(anon_vma_prepare(vma)))
684 return VM_FAULT_OOM;
Andrea Arcangeliba761492011-01-13 15:46:58 -0800685 if (unlikely(khugepaged_enter(vma)))
686 return VM_FAULT_OOM;
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -0800687 page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
Andi Kleencc5d4622011-03-22 16:33:13 -0700688 vma, haddr, numa_node_id(), 0);
Andi Kleen81ab4202011-04-14 15:22:06 -0700689 if (unlikely(!page)) {
690 count_vm_event(THP_FAULT_FALLBACK);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800691 goto out;
Andi Kleen81ab4202011-04-14 15:22:06 -0700692 }
693 count_vm_event(THP_FAULT_ALLOC);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -0800694 if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
695 put_page(page);
696 goto out;
697 }
David Rientjesedad9d22012-05-29 15:06:17 -0700698 if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd,
699 page))) {
700 mem_cgroup_uncharge_page(page);
701 put_page(page);
702 goto out;
703 }
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800704
David Rientjesedad9d22012-05-29 15:06:17 -0700705 return 0;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800706 }
707out:
708 /*
709 * Use __pte_alloc instead of pte_alloc_map, because we can't
710 * run pte_offset_map on the pmd, if an huge pmd could
711 * materialize from under us from a different thread.
712 */
713 if (unlikely(__pte_alloc(mm, vma, pmd, address)))
714 return VM_FAULT_OOM;
715 /* if an huge pmd materialized from under us just retry later */
716 if (unlikely(pmd_trans_huge(*pmd)))
717 return 0;
718 /*
719 * A regular pmd is established and it can't morph into a huge pmd
720 * from under us anymore at this point because we hold the mmap_sem
721 * read mode and khugepaged takes it in write mode. So now it's
722 * safe to run pte_offset_map().
723 */
724 pte = pte_offset_map(pmd, address);
725 return handle_pte_fault(mm, vma, address, pte, pmd, flags);
726}
727
728int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
729 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
730 struct vm_area_struct *vma)
731{
732 struct page *src_page;
733 pmd_t pmd;
734 pgtable_t pgtable;
735 int ret;
736
737 ret = -ENOMEM;
738 pgtable = pte_alloc_one(dst_mm, addr);
739 if (unlikely(!pgtable))
740 goto out;
741
742 spin_lock(&dst_mm->page_table_lock);
743 spin_lock_nested(&src_mm->page_table_lock, SINGLE_DEPTH_NESTING);
744
745 ret = -EAGAIN;
746 pmd = *src_pmd;
747 if (unlikely(!pmd_trans_huge(pmd))) {
748 pte_free(dst_mm, pgtable);
749 goto out_unlock;
750 }
751 if (unlikely(pmd_trans_splitting(pmd))) {
752 /* split huge page running from under us */
753 spin_unlock(&src_mm->page_table_lock);
754 spin_unlock(&dst_mm->page_table_lock);
755 pte_free(dst_mm, pgtable);
756
757 wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */
758 goto out;
759 }
760 src_page = pmd_page(pmd);
761 VM_BUG_ON(!PageHead(src_page));
762 get_page(src_page);
763 page_dup_rmap(src_page);
764 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
765
766 pmdp_set_wrprotect(src_mm, addr, src_pmd);
767 pmd = pmd_mkold(pmd_wrprotect(pmd));
768 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
Gerald Schaefere3ebcf642012-10-08 16:30:07 -0700769 pgtable_trans_huge_deposit(dst_mm, pgtable);
Andrea Arcangeli1c641e82012-03-05 14:59:20 -0800770 dst_mm->nr_ptes++;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800771
772 ret = 0;
773out_unlock:
774 spin_unlock(&src_mm->page_table_lock);
775 spin_unlock(&dst_mm->page_table_lock);
776out:
777 return ret;
778}
779
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800780static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
781 struct vm_area_struct *vma,
782 unsigned long address,
783 pmd_t *pmd, pmd_t orig_pmd,
784 struct page *page,
785 unsigned long haddr)
786{
787 pgtable_t pgtable;
788 pmd_t _pmd;
789 int ret = 0, i;
790 struct page **pages;
Sagi Grimberg2ec74c32012-10-08 16:33:33 -0700791 unsigned long mmun_start; /* For mmu_notifiers */
792 unsigned long mmun_end; /* For mmu_notifiers */
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800793
794 pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
795 GFP_KERNEL);
796 if (unlikely(!pages)) {
797 ret |= VM_FAULT_OOM;
798 goto out;
799 }
800
801 for (i = 0; i < HPAGE_PMD_NR; i++) {
Andi Kleencc5d4622011-03-22 16:33:13 -0700802 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
803 __GFP_OTHER_NODE,
Andi Kleen19ee1512011-03-04 17:36:31 -0800804 vma, address, page_to_nid(page));
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -0800805 if (unlikely(!pages[i] ||
806 mem_cgroup_newpage_charge(pages[i], mm,
807 GFP_KERNEL))) {
808 if (pages[i])
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800809 put_page(pages[i]);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -0800810 mem_cgroup_uncharge_start();
811 while (--i >= 0) {
812 mem_cgroup_uncharge_page(pages[i]);
813 put_page(pages[i]);
814 }
815 mem_cgroup_uncharge_end();
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800816 kfree(pages);
817 ret |= VM_FAULT_OOM;
818 goto out;
819 }
820 }
821
822 for (i = 0; i < HPAGE_PMD_NR; i++) {
823 copy_user_highpage(pages[i], page + i,
Hillf Danton0089e482011-10-31 17:09:38 -0700824 haddr + PAGE_SIZE * i, vma);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800825 __SetPageUptodate(pages[i]);
826 cond_resched();
827 }
828
Sagi Grimberg2ec74c32012-10-08 16:33:33 -0700829 mmun_start = haddr;
830 mmun_end = haddr + HPAGE_PMD_SIZE;
831 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
832
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800833 spin_lock(&mm->page_table_lock);
834 if (unlikely(!pmd_same(*pmd, orig_pmd)))
835 goto out_free_pages;
836 VM_BUG_ON(!PageHead(page));
837
Sagi Grimberg2ec74c32012-10-08 16:33:33 -0700838 pmdp_clear_flush(vma, haddr, pmd);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800839 /* leave pmd empty until pte is filled */
840
Gerald Schaefere3ebcf642012-10-08 16:30:07 -0700841 pgtable = pgtable_trans_huge_withdraw(mm);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800842 pmd_populate(mm, &_pmd, pgtable);
843
844 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
845 pte_t *pte, entry;
846 entry = mk_pte(pages[i], vma->vm_page_prot);
847 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
848 page_add_new_anon_rmap(pages[i], vma, haddr);
849 pte = pte_offset_map(&_pmd, haddr);
850 VM_BUG_ON(!pte_none(*pte));
851 set_pte_at(mm, haddr, pte, entry);
852 pte_unmap(pte);
853 }
854 kfree(pages);
855
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800856 smp_wmb(); /* make pte visible before pmd */
857 pmd_populate(mm, pmd, pgtable);
858 page_remove_rmap(page);
859 spin_unlock(&mm->page_table_lock);
860
Sagi Grimberg2ec74c32012-10-08 16:33:33 -0700861 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
862
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800863 ret |= VM_FAULT_WRITE;
864 put_page(page);
865
866out:
867 return ret;
868
869out_free_pages:
870 spin_unlock(&mm->page_table_lock);
Sagi Grimberg2ec74c32012-10-08 16:33:33 -0700871 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -0800872 mem_cgroup_uncharge_start();
873 for (i = 0; i < HPAGE_PMD_NR; i++) {
874 mem_cgroup_uncharge_page(pages[i]);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800875 put_page(pages[i]);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -0800876 }
877 mem_cgroup_uncharge_end();
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800878 kfree(pages);
879 goto out;
880}
881
882int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
883 unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
884{
885 int ret = 0;
886 struct page *page, *new_page;
887 unsigned long haddr;
Sagi Grimberg2ec74c32012-10-08 16:33:33 -0700888 unsigned long mmun_start; /* For mmu_notifiers */
889 unsigned long mmun_end; /* For mmu_notifiers */
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800890
891 VM_BUG_ON(!vma->anon_vma);
892 spin_lock(&mm->page_table_lock);
893 if (unlikely(!pmd_same(*pmd, orig_pmd)))
894 goto out_unlock;
895
896 page = pmd_page(orig_pmd);
897 VM_BUG_ON(!PageCompound(page) || !PageHead(page));
898 haddr = address & HPAGE_PMD_MASK;
899 if (page_mapcount(page) == 1) {
900 pmd_t entry;
901 entry = pmd_mkyoung(orig_pmd);
902 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
903 if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1))
David Millerb113da62012-10-08 16:34:25 -0700904 update_mmu_cache_pmd(vma, address, pmd);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800905 ret |= VM_FAULT_WRITE;
906 goto out_unlock;
907 }
908 get_page(page);
909 spin_unlock(&mm->page_table_lock);
910
911 if (transparent_hugepage_enabled(vma) &&
912 !transparent_hugepage_debug_cow())
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -0800913 new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
Andi Kleencc5d4622011-03-22 16:33:13 -0700914 vma, haddr, numa_node_id(), 0);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800915 else
916 new_page = NULL;
917
918 if (unlikely(!new_page)) {
Andi Kleen81ab4202011-04-14 15:22:06 -0700919 count_vm_event(THP_FAULT_FALLBACK);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800920 ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
921 pmd, orig_pmd, page, haddr);
David Rientjes1f1d06c2012-05-29 15:06:23 -0700922 if (ret & VM_FAULT_OOM)
923 split_huge_page(page);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800924 put_page(page);
925 goto out;
926 }
Andi Kleen81ab4202011-04-14 15:22:06 -0700927 count_vm_event(THP_FAULT_ALLOC);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800928
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -0800929 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
930 put_page(new_page);
David Rientjes1f1d06c2012-05-29 15:06:23 -0700931 split_huge_page(page);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -0800932 put_page(page);
933 ret |= VM_FAULT_OOM;
934 goto out;
935 }
936
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800937 copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
938 __SetPageUptodate(new_page);
939
Sagi Grimberg2ec74c32012-10-08 16:33:33 -0700940 mmun_start = haddr;
941 mmun_end = haddr + HPAGE_PMD_SIZE;
942 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
943
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800944 spin_lock(&mm->page_table_lock);
945 put_page(page);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -0800946 if (unlikely(!pmd_same(*pmd, orig_pmd))) {
David Rientjes6f60b692012-05-29 15:06:26 -0700947 spin_unlock(&mm->page_table_lock);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -0800948 mem_cgroup_uncharge_page(new_page);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800949 put_page(new_page);
Sagi Grimberg2ec74c32012-10-08 16:33:33 -0700950 goto out_mn;
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -0800951 } else {
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800952 pmd_t entry;
953 VM_BUG_ON(!PageHead(page));
954 entry = mk_pmd(new_page, vma->vm_page_prot);
955 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
956 entry = pmd_mkhuge(entry);
Sagi Grimberg2ec74c32012-10-08 16:33:33 -0700957 pmdp_clear_flush(vma, haddr, pmd);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800958 page_add_new_anon_rmap(new_page, vma, haddr);
959 set_pmd_at(mm, haddr, pmd, entry);
David Millerb113da62012-10-08 16:34:25 -0700960 update_mmu_cache_pmd(vma, address, pmd);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800961 page_remove_rmap(page);
962 put_page(page);
963 ret |= VM_FAULT_WRITE;
964 }
Sagi Grimberg2ec74c32012-10-08 16:33:33 -0700965 spin_unlock(&mm->page_table_lock);
966out_mn:
967 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
968out:
969 return ret;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800970out_unlock:
971 spin_unlock(&mm->page_table_lock);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800972 return ret;
973}
974
David Rientjesb676b292012-10-08 16:34:03 -0700975struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800976 unsigned long addr,
977 pmd_t *pmd,
978 unsigned int flags)
979{
David Rientjesb676b292012-10-08 16:34:03 -0700980 struct mm_struct *mm = vma->vm_mm;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800981 struct page *page = NULL;
982
983 assert_spin_locked(&mm->page_table_lock);
984
985 if (flags & FOLL_WRITE && !pmd_write(*pmd))
986 goto out;
987
988 page = pmd_page(*pmd);
989 VM_BUG_ON(!PageHead(page));
990 if (flags & FOLL_TOUCH) {
991 pmd_t _pmd;
992 /*
993 * We should set the dirty bit only for FOLL_WRITE but
994 * for now the dirty bit in the pmd is meaningless.
995 * And if the dirty bit will become meaningful and
996 * we'll only set it with FOLL_WRITE, an atomic
997 * set_bit will be required on the pmd to set the
998 * young bit, instead of the current set_pmd_at.
999 */
1000 _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
1001 set_pmd_at(mm, addr & HPAGE_PMD_MASK, pmd, _pmd);
1002 }
David Rientjesb676b292012-10-08 16:34:03 -07001003 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
1004 if (page->mapping && trylock_page(page)) {
1005 lru_add_drain();
1006 if (page->mapping)
1007 mlock_vma_page(page);
1008 unlock_page(page);
1009 }
1010 }
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001011 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
1012 VM_BUG_ON(!PageCompound(page));
1013 if (flags & FOLL_GET)
Andrea Arcangeli70b50f92011-11-02 13:36:59 -07001014 get_page_foll(page);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001015
1016out:
1017 return page;
1018}
1019
1020int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
Shaohua Lif21760b2012-01-12 17:19:16 -08001021 pmd_t *pmd, unsigned long addr)
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001022{
1023 int ret = 0;
1024
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001025 if (__pmd_trans_huge_lock(pmd, vma) == 1) {
1026 struct page *page;
1027 pgtable_t pgtable;
David Millerf5c8ad42012-10-08 16:34:26 -07001028 pmd_t orig_pmd;
Gerald Schaefere3ebcf642012-10-08 16:30:07 -07001029 pgtable = pgtable_trans_huge_withdraw(tlb->mm);
David Millerf5c8ad42012-10-08 16:34:26 -07001030 orig_pmd = pmdp_get_and_clear(tlb->mm, addr, pmd);
1031 page = pmd_page(orig_pmd);
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001032 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1033 page_remove_rmap(page);
1034 VM_BUG_ON(page_mapcount(page) < 0);
1035 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1036 VM_BUG_ON(!PageHead(page));
1037 tlb->mm->nr_ptes--;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001038 spin_unlock(&tlb->mm->page_table_lock);
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001039 tlb_remove_page(tlb, page);
1040 pte_free(tlb->mm, pgtable);
1041 ret = 1;
1042 }
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001043 return ret;
1044}
1045
Johannes Weiner0ca16342011-01-13 15:47:02 -08001046int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1047 unsigned long addr, unsigned long end,
1048 unsigned char *vec)
1049{
1050 int ret = 0;
1051
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001052 if (__pmd_trans_huge_lock(pmd, vma) == 1) {
1053 /*
1054 * All logical pages in the range are present
1055 * if backed by a huge page.
1056 */
Johannes Weiner0ca16342011-01-13 15:47:02 -08001057 spin_unlock(&vma->vm_mm->page_table_lock);
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001058 memset(vec, 1, (end - addr) >> PAGE_SHIFT);
1059 ret = 1;
1060 }
Johannes Weiner0ca16342011-01-13 15:47:02 -08001061
1062 return ret;
1063}
1064
Andrea Arcangeli37a1c492011-10-31 17:08:30 -07001065int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
1066 unsigned long old_addr,
1067 unsigned long new_addr, unsigned long old_end,
1068 pmd_t *old_pmd, pmd_t *new_pmd)
1069{
1070 int ret = 0;
1071 pmd_t pmd;
1072
1073 struct mm_struct *mm = vma->vm_mm;
1074
1075 if ((old_addr & ~HPAGE_PMD_MASK) ||
1076 (new_addr & ~HPAGE_PMD_MASK) ||
1077 old_end - old_addr < HPAGE_PMD_SIZE ||
1078 (new_vma->vm_flags & VM_NOHUGEPAGE))
1079 goto out;
1080
1081 /*
1082 * The destination pmd shouldn't be established, free_pgtables()
1083 * should have release it.
1084 */
1085 if (WARN_ON(!pmd_none(*new_pmd))) {
1086 VM_BUG_ON(pmd_trans_huge(*new_pmd));
1087 goto out;
1088 }
1089
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001090 ret = __pmd_trans_huge_lock(old_pmd, vma);
1091 if (ret == 1) {
1092 pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
1093 VM_BUG_ON(!pmd_none(*new_pmd));
1094 set_pmd_at(mm, new_addr, new_pmd, pmd);
Andrea Arcangeli37a1c492011-10-31 17:08:30 -07001095 spin_unlock(&mm->page_table_lock);
1096 }
1097out:
1098 return ret;
1099}
1100
Johannes Weinercd7548a2011-01-13 15:47:04 -08001101int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1102 unsigned long addr, pgprot_t newprot)
1103{
1104 struct mm_struct *mm = vma->vm_mm;
1105 int ret = 0;
1106
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001107 if (__pmd_trans_huge_lock(pmd, vma) == 1) {
1108 pmd_t entry;
1109 entry = pmdp_get_and_clear(mm, addr, pmd);
1110 entry = pmd_modify(entry, newprot);
1111 set_pmd_at(mm, addr, pmd, entry);
Johannes Weinercd7548a2011-01-13 15:47:04 -08001112 spin_unlock(&vma->vm_mm->page_table_lock);
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001113 ret = 1;
1114 }
Johannes Weinercd7548a2011-01-13 15:47:04 -08001115
1116 return ret;
1117}
1118
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001119/*
1120 * Returns 1 if a given pmd maps a stable (not under splitting) thp.
1121 * Returns -1 if it maps a thp under splitting. Returns 0 otherwise.
1122 *
1123 * Note that if it returns 1, this routine returns without unlocking page
1124 * table locks. So callers must unlock them.
1125 */
1126int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
1127{
1128 spin_lock(&vma->vm_mm->page_table_lock);
1129 if (likely(pmd_trans_huge(*pmd))) {
1130 if (unlikely(pmd_trans_splitting(*pmd))) {
1131 spin_unlock(&vma->vm_mm->page_table_lock);
1132 wait_split_huge_page(vma->anon_vma, pmd);
1133 return -1;
1134 } else {
1135 /* Thp mapped by 'pmd' is stable, so we can
1136 * handle it as it is. */
1137 return 1;
1138 }
1139 }
1140 spin_unlock(&vma->vm_mm->page_table_lock);
1141 return 0;
1142}
1143
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001144pmd_t *page_check_address_pmd(struct page *page,
1145 struct mm_struct *mm,
1146 unsigned long address,
1147 enum page_check_address_pmd_flag flag)
1148{
1149 pgd_t *pgd;
1150 pud_t *pud;
1151 pmd_t *pmd, *ret = NULL;
1152
1153 if (address & ~HPAGE_PMD_MASK)
1154 goto out;
1155
1156 pgd = pgd_offset(mm, address);
1157 if (!pgd_present(*pgd))
1158 goto out;
1159
1160 pud = pud_offset(pgd, address);
1161 if (!pud_present(*pud))
1162 goto out;
1163
1164 pmd = pmd_offset(pud, address);
1165 if (pmd_none(*pmd))
1166 goto out;
1167 if (pmd_page(*pmd) != page)
1168 goto out;
Andrea Arcangeli94fcc582011-01-13 15:47:08 -08001169 /*
1170 * split_vma() may create temporary aliased mappings. There is
1171 * no risk as long as all huge pmd are found and have their
1172 * splitting bit set before __split_huge_page_refcount
1173 * runs. Finding the same huge pmd more than once during the
1174 * same rmap walk is not a problem.
1175 */
1176 if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
1177 pmd_trans_splitting(*pmd))
1178 goto out;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001179 if (pmd_trans_huge(*pmd)) {
1180 VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
1181 !pmd_trans_splitting(*pmd));
1182 ret = pmd;
1183 }
1184out:
1185 return ret;
1186}
1187
1188static int __split_huge_page_splitting(struct page *page,
1189 struct vm_area_struct *vma,
1190 unsigned long address)
1191{
1192 struct mm_struct *mm = vma->vm_mm;
1193 pmd_t *pmd;
1194 int ret = 0;
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001195 /* For mmu_notifiers */
1196 const unsigned long mmun_start = address;
1197 const unsigned long mmun_end = address + HPAGE_PMD_SIZE;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001198
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001199 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001200 spin_lock(&mm->page_table_lock);
1201 pmd = page_check_address_pmd(page, mm, address,
1202 PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG);
1203 if (pmd) {
1204 /*
1205 * We can't temporarily set the pmd to null in order
1206 * to split it, the pmd must remain marked huge at all
1207 * times or the VM won't take the pmd_trans_huge paths
Peter Zijlstra2b575eb2011-05-24 17:12:11 -07001208 * and it won't wait on the anon_vma->root->mutex to
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001209 * serialize against split_huge_page*.
1210 */
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001211 pmdp_splitting_flush(vma, address, pmd);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001212 ret = 1;
1213 }
1214 spin_unlock(&mm->page_table_lock);
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001215 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001216
1217 return ret;
1218}
1219
1220static void __split_huge_page_refcount(struct page *page)
1221{
1222 int i;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001223 struct zone *zone = page_zone(page);
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001224 struct lruvec *lruvec;
Andrea Arcangeli70b50f92011-11-02 13:36:59 -07001225 int tail_count = 0;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001226
1227 /* prevent PageLRU to go away from under us, and freeze lru stats */
1228 spin_lock_irq(&zone->lru_lock);
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001229 lruvec = mem_cgroup_page_lruvec(page, zone);
1230
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001231 compound_lock(page);
KAMEZAWA Hiroyukie94c8a92012-01-12 17:18:20 -08001232 /* complete memcg works before add pages to LRU */
1233 mem_cgroup_split_huge_fixup(page);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001234
Shaohua Li45676882012-01-12 17:19:18 -08001235 for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001236 struct page *page_tail = page + i;
1237
Andrea Arcangeli70b50f92011-11-02 13:36:59 -07001238 /* tail_page->_mapcount cannot change */
1239 BUG_ON(page_mapcount(page_tail) < 0);
1240 tail_count += page_mapcount(page_tail);
1241 /* check for overflow */
1242 BUG_ON(tail_count < 0);
1243 BUG_ON(atomic_read(&page_tail->_count) != 0);
1244 /*
1245 * tail_page->_count is zero and not changing from
1246 * under us. But get_page_unless_zero() may be running
1247 * from under us on the tail_page. If we used
1248 * atomic_set() below instead of atomic_add(), we
1249 * would then run atomic_set() concurrently with
1250 * get_page_unless_zero(), and atomic_set() is
1251 * implemented in C not using locked ops. spin_unlock
1252 * on x86 sometime uses locked ops because of PPro
1253 * errata 66, 92, so unless somebody can guarantee
1254 * atomic_set() here would be safe on all archs (and
1255 * not only on x86), it's safer to use atomic_add().
1256 */
1257 atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1,
1258 &page_tail->_count);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001259
1260 /* after clearing PageTail the gup refcount can be released */
1261 smp_mb();
1262
Jin Dongminga6d30dd2011-02-01 15:52:40 -08001263 /*
1264 * retain hwpoison flag of the poisoned tail page:
1265 * fix for the unsuitable process killed on Guest Machine(KVM)
1266 * by the memory-failure.
1267 */
1268 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001269 page_tail->flags |= (page->flags &
1270 ((1L << PG_referenced) |
1271 (1L << PG_swapbacked) |
1272 (1L << PG_mlocked) |
1273 (1L << PG_uptodate)));
1274 page_tail->flags |= (1L << PG_dirty);
1275
Andrea Arcangeli70b50f92011-11-02 13:36:59 -07001276 /* clear PageTail before overwriting first_page */
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001277 smp_wmb();
1278
1279 /*
1280 * __split_huge_page_splitting() already set the
1281 * splitting bit in all pmd that could map this
1282 * hugepage, that will ensure no CPU can alter the
1283 * mapcount on the head page. The mapcount is only
1284 * accounted in the head page and it has to be
1285 * transferred to all tail pages in the below code. So
1286 * for this code to be safe, the split the mapcount
1287 * can't change. But that doesn't mean userland can't
1288 * keep changing and reading the page contents while
1289 * we transfer the mapcount, so the pmd splitting
1290 * status is achieved setting a reserved bit in the
1291 * pmd, not by clearing the present bit.
1292 */
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001293 page_tail->_mapcount = page->_mapcount;
1294
1295 BUG_ON(page_tail->mapping);
1296 page_tail->mapping = page->mapping;
1297
Shaohua Li45676882012-01-12 17:19:18 -08001298 page_tail->index = page->index + i;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001299
1300 BUG_ON(!PageAnon(page_tail));
1301 BUG_ON(!PageUptodate(page_tail));
1302 BUG_ON(!PageDirty(page_tail));
1303 BUG_ON(!PageSwapBacked(page_tail));
1304
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001305 lru_add_page_tail(page, page_tail, lruvec);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001306 }
Andrea Arcangeli70b50f92011-11-02 13:36:59 -07001307 atomic_sub(tail_count, &page->_count);
1308 BUG_ON(atomic_read(&page->_count) <= 0);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001309
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001310 __mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1);
Andrea Arcangeli79134172011-01-13 15:46:58 -08001311 __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
1312
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001313 ClearPageCompound(page);
1314 compound_unlock(page);
1315 spin_unlock_irq(&zone->lru_lock);
1316
1317 for (i = 1; i < HPAGE_PMD_NR; i++) {
1318 struct page *page_tail = page + i;
1319 BUG_ON(page_count(page_tail) <= 0);
1320 /*
1321 * Tail pages may be freed if there wasn't any mapping
1322 * like if add_to_swap() is running on a lru page that
1323 * had its mapping zapped. And freeing these pages
1324 * requires taking the lru_lock so we do the put_page
1325 * of the tail pages after the split is complete.
1326 */
1327 put_page(page_tail);
1328 }
1329
1330 /*
1331 * Only the head page (now become a regular page) is required
1332 * to be pinned by the caller.
1333 */
1334 BUG_ON(page_count(page) <= 0);
1335}
1336
1337static int __split_huge_page_map(struct page *page,
1338 struct vm_area_struct *vma,
1339 unsigned long address)
1340{
1341 struct mm_struct *mm = vma->vm_mm;
1342 pmd_t *pmd, _pmd;
1343 int ret = 0, i;
1344 pgtable_t pgtable;
1345 unsigned long haddr;
1346
1347 spin_lock(&mm->page_table_lock);
1348 pmd = page_check_address_pmd(page, mm, address,
1349 PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG);
1350 if (pmd) {
Gerald Schaefere3ebcf642012-10-08 16:30:07 -07001351 pgtable = pgtable_trans_huge_withdraw(mm);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001352 pmd_populate(mm, &_pmd, pgtable);
1353
Gerald Schaefere3ebcf642012-10-08 16:30:07 -07001354 haddr = address;
1355 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001356 pte_t *pte, entry;
1357 BUG_ON(PageCompound(page+i));
1358 entry = mk_pte(page + i, vma->vm_page_prot);
1359 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1360 if (!pmd_write(*pmd))
1361 entry = pte_wrprotect(entry);
1362 else
1363 BUG_ON(page_mapcount(page) != 1);
1364 if (!pmd_young(*pmd))
1365 entry = pte_mkold(entry);
1366 pte = pte_offset_map(&_pmd, haddr);
1367 BUG_ON(!pte_none(*pte));
1368 set_pte_at(mm, haddr, pte, entry);
1369 pte_unmap(pte);
1370 }
1371
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001372 smp_wmb(); /* make pte visible before pmd */
1373 /*
1374 * Up to this point the pmd is present and huge and
1375 * userland has the whole access to the hugepage
1376 * during the split (which happens in place). If we
1377 * overwrite the pmd with the not-huge version
1378 * pointing to the pte here (which of course we could
1379 * if all CPUs were bug free), userland could trigger
1380 * a small page size TLB miss on the small sized TLB
1381 * while the hugepage TLB entry is still established
1382 * in the huge TLB. Some CPU doesn't like that. See
1383 * http://support.amd.com/us/Processor_TechDocs/41322.pdf,
1384 * Erratum 383 on page 93. Intel should be safe but is
1385 * also warns that it's only safe if the permission
1386 * and cache attributes of the two entries loaded in
1387 * the two TLB is identical (which should be the case
1388 * here). But it is generally safer to never allow
1389 * small and huge TLB entries for the same virtual
1390 * address to be loaded simultaneously. So instead of
1391 * doing "pmd_populate(); flush_tlb_range();" we first
1392 * mark the current pmd notpresent (atomically because
1393 * here the pmd_trans_huge and pmd_trans_splitting
1394 * must remain set at all times on the pmd until the
1395 * split is complete for this pmd), then we flush the
1396 * SMP TLB and finally we write the non-huge version
1397 * of the pmd entry with pmd_populate.
1398 */
Gerald Schaefer46dcde72012-10-08 16:30:09 -07001399 pmdp_invalidate(vma, address, pmd);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001400 pmd_populate(mm, pmd, pgtable);
1401 ret = 1;
1402 }
1403 spin_unlock(&mm->page_table_lock);
1404
1405 return ret;
1406}
1407
Peter Zijlstra2b575eb2011-05-24 17:12:11 -07001408/* must be called with anon_vma->root->mutex hold */
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001409static void __split_huge_page(struct page *page,
1410 struct anon_vma *anon_vma)
1411{
1412 int mapcount, mapcount2;
Michel Lespinassebf181b92012-10-08 16:31:39 -07001413 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001414 struct anon_vma_chain *avc;
1415
1416 BUG_ON(!PageHead(page));
1417 BUG_ON(PageTail(page));
1418
1419 mapcount = 0;
Michel Lespinassebf181b92012-10-08 16:31:39 -07001420 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001421 struct vm_area_struct *vma = avc->vma;
1422 unsigned long addr = vma_address(page, vma);
1423 BUG_ON(is_vma_temporary_stack(vma));
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001424 mapcount += __split_huge_page_splitting(page, vma, addr);
1425 }
Andrea Arcangeli05759d32011-01-13 15:46:53 -08001426 /*
1427 * It is critical that new vmas are added to the tail of the
1428 * anon_vma list. This guarantes that if copy_huge_pmd() runs
1429 * and establishes a child pmd before
1430 * __split_huge_page_splitting() freezes the parent pmd (so if
1431 * we fail to prevent copy_huge_pmd() from running until the
1432 * whole __split_huge_page() is complete), we will still see
1433 * the newly established pmd of the child later during the
1434 * walk, to be able to set it as pmd_trans_splitting too.
1435 */
1436 if (mapcount != page_mapcount(page))
1437 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1438 mapcount, page_mapcount(page));
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001439 BUG_ON(mapcount != page_mapcount(page));
1440
1441 __split_huge_page_refcount(page);
1442
1443 mapcount2 = 0;
Michel Lespinassebf181b92012-10-08 16:31:39 -07001444 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001445 struct vm_area_struct *vma = avc->vma;
1446 unsigned long addr = vma_address(page, vma);
1447 BUG_ON(is_vma_temporary_stack(vma));
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001448 mapcount2 += __split_huge_page_map(page, vma, addr);
1449 }
Andrea Arcangeli05759d32011-01-13 15:46:53 -08001450 if (mapcount != mapcount2)
1451 printk(KERN_ERR "mapcount %d mapcount2 %d page_mapcount %d\n",
1452 mapcount, mapcount2, page_mapcount(page));
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001453 BUG_ON(mapcount != mapcount2);
1454}
1455
1456int split_huge_page(struct page *page)
1457{
1458 struct anon_vma *anon_vma;
1459 int ret = 1;
1460
1461 BUG_ON(!PageAnon(page));
1462 anon_vma = page_lock_anon_vma(page);
1463 if (!anon_vma)
1464 goto out;
1465 ret = 0;
1466 if (!PageCompound(page))
1467 goto out_unlock;
1468
1469 BUG_ON(!PageSwapBacked(page));
1470 __split_huge_page(page, anon_vma);
Andi Kleen81ab4202011-04-14 15:22:06 -07001471 count_vm_event(THP_SPLIT);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001472
1473 BUG_ON(PageCompound(page));
1474out_unlock:
1475 page_unlock_anon_vma(anon_vma);
1476out:
1477 return ret;
1478}
1479
Konstantin Khlebnikov4b6e1e32012-10-08 16:28:40 -07001480#define VM_NO_THP (VM_SPECIAL|VM_MIXEDMAP|VM_HUGETLB|VM_SHARED|VM_MAYSHARE)
Andrea Arcangeli78f11a22011-04-27 15:26:45 -07001481
Andrea Arcangeli60ab3242011-01-13 15:47:18 -08001482int hugepage_madvise(struct vm_area_struct *vma,
1483 unsigned long *vm_flags, int advice)
Andrea Arcangeli0af4e982011-01-13 15:46:55 -08001484{
Gerald Schaefer8e720332012-10-08 16:30:12 -07001485 struct mm_struct *mm = vma->vm_mm;
1486
Andrea Arcangelia664b2d2011-01-13 15:47:17 -08001487 switch (advice) {
1488 case MADV_HUGEPAGE:
1489 /*
1490 * Be somewhat over-protective like KSM for now!
1491 */
Andrea Arcangeli78f11a22011-04-27 15:26:45 -07001492 if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP))
Andrea Arcangelia664b2d2011-01-13 15:47:17 -08001493 return -EINVAL;
Gerald Schaefer8e720332012-10-08 16:30:12 -07001494 if (mm->def_flags & VM_NOHUGEPAGE)
1495 return -EINVAL;
Andrea Arcangelia664b2d2011-01-13 15:47:17 -08001496 *vm_flags &= ~VM_NOHUGEPAGE;
1497 *vm_flags |= VM_HUGEPAGE;
Andrea Arcangeli60ab3242011-01-13 15:47:18 -08001498 /*
1499 * If the vma become good for khugepaged to scan,
1500 * register it here without waiting a page fault that
1501 * may not happen any time soon.
1502 */
1503 if (unlikely(khugepaged_enter_vma_merge(vma)))
1504 return -ENOMEM;
Andrea Arcangelia664b2d2011-01-13 15:47:17 -08001505 break;
1506 case MADV_NOHUGEPAGE:
1507 /*
1508 * Be somewhat over-protective like KSM for now!
1509 */
Andrea Arcangeli78f11a22011-04-27 15:26:45 -07001510 if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP))
Andrea Arcangelia664b2d2011-01-13 15:47:17 -08001511 return -EINVAL;
1512 *vm_flags &= ~VM_HUGEPAGE;
1513 *vm_flags |= VM_NOHUGEPAGE;
Andrea Arcangeli60ab3242011-01-13 15:47:18 -08001514 /*
1515 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
1516 * this vma even if we leave the mm registered in khugepaged if
1517 * it got registered before VM_NOHUGEPAGE was set.
1518 */
Andrea Arcangelia664b2d2011-01-13 15:47:17 -08001519 break;
1520 }
Andrea Arcangeli0af4e982011-01-13 15:46:55 -08001521
1522 return 0;
1523}
1524
Andrea Arcangeliba761492011-01-13 15:46:58 -08001525static int __init khugepaged_slab_init(void)
1526{
1527 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
1528 sizeof(struct mm_slot),
1529 __alignof__(struct mm_slot), 0, NULL);
1530 if (!mm_slot_cache)
1531 return -ENOMEM;
1532
1533 return 0;
1534}
1535
1536static void __init khugepaged_slab_free(void)
1537{
1538 kmem_cache_destroy(mm_slot_cache);
1539 mm_slot_cache = NULL;
1540}
1541
1542static inline struct mm_slot *alloc_mm_slot(void)
1543{
1544 if (!mm_slot_cache) /* initialization failed */
1545 return NULL;
1546 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
1547}
1548
1549static inline void free_mm_slot(struct mm_slot *mm_slot)
1550{
1551 kmem_cache_free(mm_slot_cache, mm_slot);
1552}
1553
1554static int __init mm_slots_hash_init(void)
1555{
1556 mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head),
1557 GFP_KERNEL);
1558 if (!mm_slots_hash)
1559 return -ENOMEM;
1560 return 0;
1561}
1562
1563#if 0
1564static void __init mm_slots_hash_free(void)
1565{
1566 kfree(mm_slots_hash);
1567 mm_slots_hash = NULL;
1568}
1569#endif
1570
1571static struct mm_slot *get_mm_slot(struct mm_struct *mm)
1572{
1573 struct mm_slot *mm_slot;
1574 struct hlist_head *bucket;
1575 struct hlist_node *node;
1576
1577 bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
1578 % MM_SLOTS_HASH_HEADS];
1579 hlist_for_each_entry(mm_slot, node, bucket, hash) {
1580 if (mm == mm_slot->mm)
1581 return mm_slot;
1582 }
1583 return NULL;
1584}
1585
1586static void insert_to_mm_slots_hash(struct mm_struct *mm,
1587 struct mm_slot *mm_slot)
1588{
1589 struct hlist_head *bucket;
1590
1591 bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
1592 % MM_SLOTS_HASH_HEADS];
1593 mm_slot->mm = mm;
1594 hlist_add_head(&mm_slot->hash, bucket);
1595}
1596
1597static inline int khugepaged_test_exit(struct mm_struct *mm)
1598{
1599 return atomic_read(&mm->mm_users) == 0;
1600}
1601
1602int __khugepaged_enter(struct mm_struct *mm)
1603{
1604 struct mm_slot *mm_slot;
1605 int wakeup;
1606
1607 mm_slot = alloc_mm_slot();
1608 if (!mm_slot)
1609 return -ENOMEM;
1610
1611 /* __khugepaged_exit() must not run from under us */
1612 VM_BUG_ON(khugepaged_test_exit(mm));
1613 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
1614 free_mm_slot(mm_slot);
1615 return 0;
1616 }
1617
1618 spin_lock(&khugepaged_mm_lock);
1619 insert_to_mm_slots_hash(mm, mm_slot);
1620 /*
1621 * Insert just behind the scanning cursor, to let the area settle
1622 * down a little.
1623 */
1624 wakeup = list_empty(&khugepaged_scan.mm_head);
1625 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
1626 spin_unlock(&khugepaged_mm_lock);
1627
1628 atomic_inc(&mm->mm_count);
1629 if (wakeup)
1630 wake_up_interruptible(&khugepaged_wait);
1631
1632 return 0;
1633}
1634
1635int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
1636{
1637 unsigned long hstart, hend;
1638 if (!vma->anon_vma)
1639 /*
1640 * Not yet faulted in so we will register later in the
1641 * page fault if needed.
1642 */
1643 return 0;
Andrea Arcangeli78f11a22011-04-27 15:26:45 -07001644 if (vma->vm_ops)
Andrea Arcangeliba761492011-01-13 15:46:58 -08001645 /* khugepaged not yet working on file or special mappings */
1646 return 0;
Konstantin Khlebnikovb3b9c292012-10-08 16:28:34 -07001647 VM_BUG_ON(vma->vm_flags & VM_NO_THP);
Andrea Arcangeliba761492011-01-13 15:46:58 -08001648 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1649 hend = vma->vm_end & HPAGE_PMD_MASK;
1650 if (hstart < hend)
1651 return khugepaged_enter(vma);
1652 return 0;
1653}
1654
1655void __khugepaged_exit(struct mm_struct *mm)
1656{
1657 struct mm_slot *mm_slot;
1658 int free = 0;
1659
1660 spin_lock(&khugepaged_mm_lock);
1661 mm_slot = get_mm_slot(mm);
1662 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
1663 hlist_del(&mm_slot->hash);
1664 list_del(&mm_slot->mm_node);
1665 free = 1;
1666 }
Chris Wrightd788e802011-07-25 17:12:14 -07001667 spin_unlock(&khugepaged_mm_lock);
Andrea Arcangeliba761492011-01-13 15:46:58 -08001668
1669 if (free) {
Andrea Arcangeliba761492011-01-13 15:46:58 -08001670 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1671 free_mm_slot(mm_slot);
1672 mmdrop(mm);
1673 } else if (mm_slot) {
Andrea Arcangeliba761492011-01-13 15:46:58 -08001674 /*
1675 * This is required to serialize against
1676 * khugepaged_test_exit() (which is guaranteed to run
1677 * under mmap sem read mode). Stop here (after we
1678 * return all pagetables will be destroyed) until
1679 * khugepaged has finished working on the pagetables
1680 * under the mmap_sem.
1681 */
1682 down_write(&mm->mmap_sem);
1683 up_write(&mm->mmap_sem);
Chris Wrightd788e802011-07-25 17:12:14 -07001684 }
Andrea Arcangeliba761492011-01-13 15:46:58 -08001685}
1686
1687static void release_pte_page(struct page *page)
1688{
1689 /* 0 stands for page_is_file_cache(page) == false */
1690 dec_zone_page_state(page, NR_ISOLATED_ANON + 0);
1691 unlock_page(page);
1692 putback_lru_page(page);
1693}
1694
1695static void release_pte_pages(pte_t *pte, pte_t *_pte)
1696{
1697 while (--_pte >= pte) {
1698 pte_t pteval = *_pte;
1699 if (!pte_none(pteval))
1700 release_pte_page(pte_page(pteval));
1701 }
1702}
1703
Andrea Arcangeliba761492011-01-13 15:46:58 -08001704static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
1705 unsigned long address,
1706 pte_t *pte)
1707{
1708 struct page *page;
1709 pte_t *_pte;
Bob Liu344aa352012-12-11 16:00:34 -08001710 int referenced = 0, none = 0;
Andrea Arcangeliba761492011-01-13 15:46:58 -08001711 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
1712 _pte++, address += PAGE_SIZE) {
1713 pte_t pteval = *_pte;
1714 if (pte_none(pteval)) {
1715 if (++none <= khugepaged_max_ptes_none)
1716 continue;
Bob Liu344aa352012-12-11 16:00:34 -08001717 else
Andrea Arcangeliba761492011-01-13 15:46:58 -08001718 goto out;
Andrea Arcangeliba761492011-01-13 15:46:58 -08001719 }
Bob Liu344aa352012-12-11 16:00:34 -08001720 if (!pte_present(pteval) || !pte_write(pteval))
Andrea Arcangeliba761492011-01-13 15:46:58 -08001721 goto out;
Andrea Arcangeliba761492011-01-13 15:46:58 -08001722 page = vm_normal_page(vma, address, pteval);
Bob Liu344aa352012-12-11 16:00:34 -08001723 if (unlikely(!page))
Andrea Arcangeliba761492011-01-13 15:46:58 -08001724 goto out;
Bob Liu344aa352012-12-11 16:00:34 -08001725
Andrea Arcangeliba761492011-01-13 15:46:58 -08001726 VM_BUG_ON(PageCompound(page));
1727 BUG_ON(!PageAnon(page));
1728 VM_BUG_ON(!PageSwapBacked(page));
1729
1730 /* cannot use mapcount: can't collapse if there's a gup pin */
Bob Liu344aa352012-12-11 16:00:34 -08001731 if (page_count(page) != 1)
Andrea Arcangeliba761492011-01-13 15:46:58 -08001732 goto out;
Andrea Arcangeliba761492011-01-13 15:46:58 -08001733 /*
1734 * We can do it before isolate_lru_page because the
1735 * page can't be freed from under us. NOTE: PG_lock
1736 * is needed to serialize against split_huge_page
1737 * when invoked from the VM.
1738 */
Bob Liu344aa352012-12-11 16:00:34 -08001739 if (!trylock_page(page))
Andrea Arcangeliba761492011-01-13 15:46:58 -08001740 goto out;
Andrea Arcangeliba761492011-01-13 15:46:58 -08001741 /*
1742 * Isolate the page to avoid collapsing an hugepage
1743 * currently in use by the VM.
1744 */
1745 if (isolate_lru_page(page)) {
1746 unlock_page(page);
Andrea Arcangeliba761492011-01-13 15:46:58 -08001747 goto out;
1748 }
1749 /* 0 stands for page_is_file_cache(page) == false */
1750 inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
1751 VM_BUG_ON(!PageLocked(page));
1752 VM_BUG_ON(PageLRU(page));
1753
1754 /* If there is no mapped pte young don't collapse the page */
Andrea Arcangeli8ee53822011-01-13 15:47:10 -08001755 if (pte_young(pteval) || PageReferenced(page) ||
1756 mmu_notifier_test_young(vma->vm_mm, address))
Andrea Arcangeliba761492011-01-13 15:46:58 -08001757 referenced = 1;
1758 }
Bob Liu344aa352012-12-11 16:00:34 -08001759 if (likely(referenced))
1760 return 1;
Andrea Arcangeliba761492011-01-13 15:46:58 -08001761out:
Bob Liu344aa352012-12-11 16:00:34 -08001762 release_pte_pages(pte, _pte);
1763 return 0;
Andrea Arcangeliba761492011-01-13 15:46:58 -08001764}
1765
1766static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
1767 struct vm_area_struct *vma,
1768 unsigned long address,
1769 spinlock_t *ptl)
1770{
1771 pte_t *_pte;
1772 for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
1773 pte_t pteval = *_pte;
1774 struct page *src_page;
1775
1776 if (pte_none(pteval)) {
1777 clear_user_highpage(page, address);
1778 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
1779 } else {
1780 src_page = pte_page(pteval);
1781 copy_user_highpage(page, src_page, address, vma);
1782 VM_BUG_ON(page_mapcount(src_page) != 1);
Andrea Arcangeliba761492011-01-13 15:46:58 -08001783 release_pte_page(src_page);
1784 /*
1785 * ptl mostly unnecessary, but preempt has to
1786 * be disabled to update the per-cpu stats
1787 * inside page_remove_rmap().
1788 */
1789 spin_lock(ptl);
1790 /*
1791 * paravirt calls inside pte_clear here are
1792 * superfluous.
1793 */
1794 pte_clear(vma->vm_mm, address, _pte);
1795 page_remove_rmap(src_page);
1796 spin_unlock(ptl);
1797 free_page_and_swap_cache(src_page);
1798 }
1799
1800 address += PAGE_SIZE;
1801 page++;
1802 }
1803}
1804
Xiao Guangrong26234f32012-10-08 16:29:51 -07001805static void khugepaged_alloc_sleep(void)
1806{
1807 wait_event_freezable_timeout(khugepaged_wait, false,
1808 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
1809}
1810
1811#ifdef CONFIG_NUMA
1812static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
1813{
1814 if (IS_ERR(*hpage)) {
1815 if (!*wait)
1816 return false;
1817
1818 *wait = false;
Xiao Guangronge3b41262012-10-08 16:32:57 -07001819 *hpage = NULL;
Xiao Guangrong26234f32012-10-08 16:29:51 -07001820 khugepaged_alloc_sleep();
1821 } else if (*hpage) {
1822 put_page(*hpage);
1823 *hpage = NULL;
1824 }
1825
1826 return true;
1827}
1828
1829static struct page
1830*khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
1831 struct vm_area_struct *vma, unsigned long address,
1832 int node)
1833{
1834 VM_BUG_ON(*hpage);
1835 /*
1836 * Allocate the page while the vma is still valid and under
1837 * the mmap_sem read mode so there is no memory allocation
1838 * later when we take the mmap_sem in write mode. This is more
1839 * friendly behavior (OTOH it may actually hide bugs) to
1840 * filesystems in userland with daemons allocating memory in
1841 * the userland I/O paths. Allocating memory with the
1842 * mmap_sem in read mode is good idea also to allow greater
1843 * scalability.
1844 */
1845 *hpage = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
1846 node, __GFP_OTHER_NODE);
1847
1848 /*
1849 * After allocating the hugepage, release the mmap_sem read lock in
1850 * preparation for taking it in write mode.
1851 */
1852 up_read(&mm->mmap_sem);
1853 if (unlikely(!*hpage)) {
1854 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
1855 *hpage = ERR_PTR(-ENOMEM);
1856 return NULL;
1857 }
1858
1859 count_vm_event(THP_COLLAPSE_ALLOC);
1860 return *hpage;
1861}
1862#else
1863static struct page *khugepaged_alloc_hugepage(bool *wait)
1864{
1865 struct page *hpage;
1866
1867 do {
1868 hpage = alloc_hugepage(khugepaged_defrag());
1869 if (!hpage) {
1870 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
1871 if (!*wait)
1872 return NULL;
1873
1874 *wait = false;
1875 khugepaged_alloc_sleep();
1876 } else
1877 count_vm_event(THP_COLLAPSE_ALLOC);
1878 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
1879
1880 return hpage;
1881}
1882
1883static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
1884{
1885 if (!*hpage)
1886 *hpage = khugepaged_alloc_hugepage(wait);
1887
1888 if (unlikely(!*hpage))
1889 return false;
1890
1891 return true;
1892}
1893
1894static struct page
1895*khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
1896 struct vm_area_struct *vma, unsigned long address,
1897 int node)
1898{
1899 up_read(&mm->mmap_sem);
1900 VM_BUG_ON(!*hpage);
1901 return *hpage;
1902}
1903#endif
1904
Andrea Arcangeliba761492011-01-13 15:46:58 -08001905static void collapse_huge_page(struct mm_struct *mm,
Xiao Guangrong26234f32012-10-08 16:29:51 -07001906 unsigned long address,
1907 struct page **hpage,
1908 struct vm_area_struct *vma,
1909 int node)
Andrea Arcangeliba761492011-01-13 15:46:58 -08001910{
Andrea Arcangeliba761492011-01-13 15:46:58 -08001911 pgd_t *pgd;
1912 pud_t *pud;
1913 pmd_t *pmd, _pmd;
1914 pte_t *pte;
1915 pgtable_t pgtable;
1916 struct page *new_page;
1917 spinlock_t *ptl;
1918 int isolated;
1919 unsigned long hstart, hend;
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001920 unsigned long mmun_start; /* For mmu_notifiers */
1921 unsigned long mmun_end; /* For mmu_notifiers */
Andrea Arcangeliba761492011-01-13 15:46:58 -08001922
1923 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
Andrea Arcangeli692e0b32011-05-24 17:12:14 -07001924
Xiao Guangrong26234f32012-10-08 16:29:51 -07001925 /* release the mmap_sem read lock. */
1926 new_page = khugepaged_alloc_page(hpage, mm, vma, address, node);
1927 if (!new_page)
Andrea Arcangelice83d212011-01-13 15:47:06 -08001928 return;
Andrea Arcangelice83d212011-01-13 15:47:06 -08001929
Xiao Guangrong420256ef2012-10-08 16:29:49 -07001930 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)))
Andrea Arcangeli692e0b32011-05-24 17:12:14 -07001931 return;
Andrea Arcangeliba761492011-01-13 15:46:58 -08001932
1933 /*
1934 * Prevent all access to pagetables with the exception of
1935 * gup_fast later hanlded by the ptep_clear_flush and the VM
1936 * handled by the anon_vma lock + PG_lock.
1937 */
1938 down_write(&mm->mmap_sem);
1939 if (unlikely(khugepaged_test_exit(mm)))
1940 goto out;
1941
1942 vma = find_vma(mm, address);
1943 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1944 hend = vma->vm_end & HPAGE_PMD_MASK;
1945 if (address < hstart || address + HPAGE_PMD_SIZE > hend)
1946 goto out;
1947
Andrea Arcangeli60ab3242011-01-13 15:47:18 -08001948 if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
1949 (vma->vm_flags & VM_NOHUGEPAGE))
Andrea Arcangeliba761492011-01-13 15:46:58 -08001950 goto out;
1951
Andrea Arcangeli78f11a22011-04-27 15:26:45 -07001952 if (!vma->anon_vma || vma->vm_ops)
Andrea Arcangeliba761492011-01-13 15:46:58 -08001953 goto out;
Andrea Arcangelia7d6e4e2011-02-15 19:02:45 +01001954 if (is_vma_temporary_stack(vma))
1955 goto out;
Konstantin Khlebnikovb3b9c292012-10-08 16:28:34 -07001956 VM_BUG_ON(vma->vm_flags & VM_NO_THP);
Andrea Arcangeliba761492011-01-13 15:46:58 -08001957
1958 pgd = pgd_offset(mm, address);
1959 if (!pgd_present(*pgd))
1960 goto out;
1961
1962 pud = pud_offset(pgd, address);
1963 if (!pud_present(*pud))
1964 goto out;
1965
1966 pmd = pmd_offset(pud, address);
1967 /* pmd can't go away or become huge under us */
1968 if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
1969 goto out;
1970
Andrea Arcangeliba761492011-01-13 15:46:58 -08001971 anon_vma_lock(vma->anon_vma);
1972
1973 pte = pte_offset_map(pmd, address);
1974 ptl = pte_lockptr(mm, pmd);
1975
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001976 mmun_start = address;
1977 mmun_end = address + HPAGE_PMD_SIZE;
1978 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
Andrea Arcangeliba761492011-01-13 15:46:58 -08001979 spin_lock(&mm->page_table_lock); /* probably unnecessary */
1980 /*
1981 * After this gup_fast can't run anymore. This also removes
1982 * any huge TLB entry from the CPU so we won't allow
1983 * huge and small TLB entries for the same virtual address
1984 * to avoid the risk of CPU bugs in that area.
1985 */
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001986 _pmd = pmdp_clear_flush(vma, address, pmd);
Andrea Arcangeliba761492011-01-13 15:46:58 -08001987 spin_unlock(&mm->page_table_lock);
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001988 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
Andrea Arcangeliba761492011-01-13 15:46:58 -08001989
1990 spin_lock(ptl);
1991 isolated = __collapse_huge_page_isolate(vma, address, pte);
1992 spin_unlock(ptl);
Andrea Arcangeliba761492011-01-13 15:46:58 -08001993
1994 if (unlikely(!isolated)) {
Johannes Weiner453c7192011-01-20 14:44:18 -08001995 pte_unmap(pte);
Andrea Arcangeliba761492011-01-13 15:46:58 -08001996 spin_lock(&mm->page_table_lock);
1997 BUG_ON(!pmd_none(*pmd));
1998 set_pmd_at(mm, address, pmd, _pmd);
1999 spin_unlock(&mm->page_table_lock);
2000 anon_vma_unlock(vma->anon_vma);
Andrea Arcangelice83d212011-01-13 15:47:06 -08002001 goto out;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002002 }
2003
2004 /*
2005 * All pages are isolated and locked so anon_vma rmap
2006 * can't run anymore.
2007 */
2008 anon_vma_unlock(vma->anon_vma);
2009
2010 __collapse_huge_page_copy(pte, new_page, vma, address, ptl);
Johannes Weiner453c7192011-01-20 14:44:18 -08002011 pte_unmap(pte);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002012 __SetPageUptodate(new_page);
2013 pgtable = pmd_pgtable(_pmd);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002014
2015 _pmd = mk_pmd(new_page, vma->vm_page_prot);
2016 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
2017 _pmd = pmd_mkhuge(_pmd);
2018
2019 /*
2020 * spin_lock() below is not the equivalent of smp_wmb(), so
2021 * this is needed to avoid the copy_huge_page writes to become
2022 * visible after the set_pmd_at() write.
2023 */
2024 smp_wmb();
2025
2026 spin_lock(&mm->page_table_lock);
2027 BUG_ON(!pmd_none(*pmd));
2028 page_add_new_anon_rmap(new_page, vma, address);
2029 set_pmd_at(mm, address, pmd, _pmd);
David Millerb113da62012-10-08 16:34:25 -07002030 update_mmu_cache_pmd(vma, address, pmd);
Gerald Schaefere3ebcf642012-10-08 16:30:07 -07002031 pgtable_trans_huge_deposit(mm, pgtable);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002032 spin_unlock(&mm->page_table_lock);
2033
2034 *hpage = NULL;
Xiao Guangrong420256ef2012-10-08 16:29:49 -07002035
Andrea Arcangeliba761492011-01-13 15:46:58 -08002036 khugepaged_pages_collapsed++;
Andrea Arcangelice83d212011-01-13 15:47:06 -08002037out_up_write:
Andrea Arcangeliba761492011-01-13 15:46:58 -08002038 up_write(&mm->mmap_sem);
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002039 return;
2040
Andrea Arcangelice83d212011-01-13 15:47:06 -08002041out:
KAMEZAWA Hiroyuki678ff892011-02-10 15:01:36 -08002042 mem_cgroup_uncharge_page(new_page);
Andrea Arcangelice83d212011-01-13 15:47:06 -08002043 goto out_up_write;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002044}
2045
2046static int khugepaged_scan_pmd(struct mm_struct *mm,
2047 struct vm_area_struct *vma,
2048 unsigned long address,
2049 struct page **hpage)
2050{
2051 pgd_t *pgd;
2052 pud_t *pud;
2053 pmd_t *pmd;
2054 pte_t *pte, *_pte;
2055 int ret = 0, referenced = 0, none = 0;
2056 struct page *page;
2057 unsigned long _address;
2058 spinlock_t *ptl;
Andi Kleen5c4b4be2011-03-04 17:36:32 -08002059 int node = -1;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002060
2061 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
2062
2063 pgd = pgd_offset(mm, address);
2064 if (!pgd_present(*pgd))
2065 goto out;
2066
2067 pud = pud_offset(pgd, address);
2068 if (!pud_present(*pud))
2069 goto out;
2070
2071 pmd = pmd_offset(pud, address);
2072 if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
2073 goto out;
2074
2075 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
2076 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
2077 _pte++, _address += PAGE_SIZE) {
2078 pte_t pteval = *_pte;
2079 if (pte_none(pteval)) {
2080 if (++none <= khugepaged_max_ptes_none)
2081 continue;
2082 else
2083 goto out_unmap;
2084 }
2085 if (!pte_present(pteval) || !pte_write(pteval))
2086 goto out_unmap;
2087 page = vm_normal_page(vma, _address, pteval);
2088 if (unlikely(!page))
2089 goto out_unmap;
Andi Kleen5c4b4be2011-03-04 17:36:32 -08002090 /*
2091 * Chose the node of the first page. This could
2092 * be more sophisticated and look at more pages,
2093 * but isn't for now.
2094 */
2095 if (node == -1)
2096 node = page_to_nid(page);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002097 VM_BUG_ON(PageCompound(page));
2098 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
2099 goto out_unmap;
2100 /* cannot use mapcount: can't collapse if there's a gup pin */
2101 if (page_count(page) != 1)
2102 goto out_unmap;
Andrea Arcangeli8ee53822011-01-13 15:47:10 -08002103 if (pte_young(pteval) || PageReferenced(page) ||
2104 mmu_notifier_test_young(vma->vm_mm, address))
Andrea Arcangeliba761492011-01-13 15:46:58 -08002105 referenced = 1;
2106 }
2107 if (referenced)
2108 ret = 1;
2109out_unmap:
2110 pte_unmap_unlock(pte, ptl);
Andrea Arcangelice83d212011-01-13 15:47:06 -08002111 if (ret)
2112 /* collapse_huge_page will return with the mmap_sem released */
Andi Kleen5c4b4be2011-03-04 17:36:32 -08002113 collapse_huge_page(mm, address, hpage, vma, node);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002114out:
2115 return ret;
2116}
2117
2118static void collect_mm_slot(struct mm_slot *mm_slot)
2119{
2120 struct mm_struct *mm = mm_slot->mm;
2121
Hugh Dickinsb9980cd2012-02-08 17:13:40 -08002122 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
Andrea Arcangeliba761492011-01-13 15:46:58 -08002123
2124 if (khugepaged_test_exit(mm)) {
2125 /* free mm_slot */
2126 hlist_del(&mm_slot->hash);
2127 list_del(&mm_slot->mm_node);
2128
2129 /*
2130 * Not strictly needed because the mm exited already.
2131 *
2132 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
2133 */
2134
2135 /* khugepaged_mm_lock actually not necessary for the below */
2136 free_mm_slot(mm_slot);
2137 mmdrop(mm);
2138 }
2139}
2140
2141static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2142 struct page **hpage)
H Hartley Sweeten2f1da642011-10-31 17:09:25 -07002143 __releases(&khugepaged_mm_lock)
2144 __acquires(&khugepaged_mm_lock)
Andrea Arcangeliba761492011-01-13 15:46:58 -08002145{
2146 struct mm_slot *mm_slot;
2147 struct mm_struct *mm;
2148 struct vm_area_struct *vma;
2149 int progress = 0;
2150
2151 VM_BUG_ON(!pages);
Hugh Dickinsb9980cd2012-02-08 17:13:40 -08002152 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
Andrea Arcangeliba761492011-01-13 15:46:58 -08002153
2154 if (khugepaged_scan.mm_slot)
2155 mm_slot = khugepaged_scan.mm_slot;
2156 else {
2157 mm_slot = list_entry(khugepaged_scan.mm_head.next,
2158 struct mm_slot, mm_node);
2159 khugepaged_scan.address = 0;
2160 khugepaged_scan.mm_slot = mm_slot;
2161 }
2162 spin_unlock(&khugepaged_mm_lock);
2163
2164 mm = mm_slot->mm;
2165 down_read(&mm->mmap_sem);
2166 if (unlikely(khugepaged_test_exit(mm)))
2167 vma = NULL;
2168 else
2169 vma = find_vma(mm, khugepaged_scan.address);
2170
2171 progress++;
2172 for (; vma; vma = vma->vm_next) {
2173 unsigned long hstart, hend;
2174
2175 cond_resched();
2176 if (unlikely(khugepaged_test_exit(mm))) {
2177 progress++;
2178 break;
2179 }
2180
Andrea Arcangeli60ab3242011-01-13 15:47:18 -08002181 if ((!(vma->vm_flags & VM_HUGEPAGE) &&
2182 !khugepaged_always()) ||
2183 (vma->vm_flags & VM_NOHUGEPAGE)) {
Andrea Arcangelia7d6e4e2011-02-15 19:02:45 +01002184 skip:
Andrea Arcangeliba761492011-01-13 15:46:58 -08002185 progress++;
2186 continue;
2187 }
Andrea Arcangeli78f11a22011-04-27 15:26:45 -07002188 if (!vma->anon_vma || vma->vm_ops)
Andrea Arcangelia7d6e4e2011-02-15 19:02:45 +01002189 goto skip;
2190 if (is_vma_temporary_stack(vma))
2191 goto skip;
Konstantin Khlebnikovb3b9c292012-10-08 16:28:34 -07002192 VM_BUG_ON(vma->vm_flags & VM_NO_THP);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002193
2194 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2195 hend = vma->vm_end & HPAGE_PMD_MASK;
Andrea Arcangelia7d6e4e2011-02-15 19:02:45 +01002196 if (hstart >= hend)
2197 goto skip;
2198 if (khugepaged_scan.address > hend)
2199 goto skip;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002200 if (khugepaged_scan.address < hstart)
2201 khugepaged_scan.address = hstart;
Andrea Arcangelia7d6e4e2011-02-15 19:02:45 +01002202 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002203
2204 while (khugepaged_scan.address < hend) {
2205 int ret;
2206 cond_resched();
2207 if (unlikely(khugepaged_test_exit(mm)))
2208 goto breakouterloop;
2209
2210 VM_BUG_ON(khugepaged_scan.address < hstart ||
2211 khugepaged_scan.address + HPAGE_PMD_SIZE >
2212 hend);
2213 ret = khugepaged_scan_pmd(mm, vma,
2214 khugepaged_scan.address,
2215 hpage);
2216 /* move to next address */
2217 khugepaged_scan.address += HPAGE_PMD_SIZE;
2218 progress += HPAGE_PMD_NR;
2219 if (ret)
2220 /* we released mmap_sem so break loop */
2221 goto breakouterloop_mmap_sem;
2222 if (progress >= pages)
2223 goto breakouterloop;
2224 }
2225 }
2226breakouterloop:
2227 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
2228breakouterloop_mmap_sem:
2229
2230 spin_lock(&khugepaged_mm_lock);
Andrea Arcangelia7d6e4e2011-02-15 19:02:45 +01002231 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002232 /*
2233 * Release the current mm_slot if this mm is about to die, or
2234 * if we scanned all vmas of this mm.
2235 */
2236 if (khugepaged_test_exit(mm) || !vma) {
2237 /*
2238 * Make sure that if mm_users is reaching zero while
2239 * khugepaged runs here, khugepaged_exit will find
2240 * mm_slot not pointing to the exiting mm.
2241 */
2242 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2243 khugepaged_scan.mm_slot = list_entry(
2244 mm_slot->mm_node.next,
2245 struct mm_slot, mm_node);
2246 khugepaged_scan.address = 0;
2247 } else {
2248 khugepaged_scan.mm_slot = NULL;
2249 khugepaged_full_scans++;
2250 }
2251
2252 collect_mm_slot(mm_slot);
2253 }
2254
2255 return progress;
2256}
2257
2258static int khugepaged_has_work(void)
2259{
2260 return !list_empty(&khugepaged_scan.mm_head) &&
2261 khugepaged_enabled();
2262}
2263
2264static int khugepaged_wait_event(void)
2265{
2266 return !list_empty(&khugepaged_scan.mm_head) ||
Xiao Guangrong2017c0b2012-10-08 16:29:44 -07002267 kthread_should_stop();
Andrea Arcangeliba761492011-01-13 15:46:58 -08002268}
2269
Xiao Guangrongd5169042012-10-08 16:29:48 -07002270static void khugepaged_do_scan(void)
2271{
2272 struct page *hpage = NULL;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002273 unsigned int progress = 0, pass_through_head = 0;
2274 unsigned int pages = khugepaged_pages_to_scan;
Xiao Guangrongd5169042012-10-08 16:29:48 -07002275 bool wait = true;
Andrea Arcangeliba761492011-01-13 15:46:58 -08002276
2277 barrier(); /* write khugepaged_pages_to_scan to local stack */
2278
2279 while (progress < pages) {
Xiao Guangrong26234f32012-10-08 16:29:51 -07002280 if (!khugepaged_prealloc_page(&hpage, &wait))
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002281 break;
Xiao Guangrong26234f32012-10-08 16:29:51 -07002282
Xiao Guangrong420256ef2012-10-08 16:29:49 -07002283 cond_resched();
Andrea Arcangeliba761492011-01-13 15:46:58 -08002284
Andrea Arcangeli878aee72011-01-13 15:47:10 -08002285 if (unlikely(kthread_should_stop() || freezing(current)))
2286 break;
2287
Andrea Arcangeliba761492011-01-13 15:46:58 -08002288 spin_lock(&khugepaged_mm_lock);
2289 if (!khugepaged_scan.mm_slot)
2290 pass_through_head++;
2291 if (khugepaged_has_work() &&
2292 pass_through_head < 2)
2293 progress += khugepaged_scan_mm_slot(pages - progress,
Xiao Guangrongd5169042012-10-08 16:29:48 -07002294 &hpage);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002295 else
2296 progress = pages;
2297 spin_unlock(&khugepaged_mm_lock);
2298 }
Andrea Arcangeliba761492011-01-13 15:46:58 -08002299
Xiao Guangrongd5169042012-10-08 16:29:48 -07002300 if (!IS_ERR_OR_NULL(hpage))
2301 put_page(hpage);
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002302}
2303
Xiao Guangrong2017c0b2012-10-08 16:29:44 -07002304static void khugepaged_wait_work(void)
2305{
2306 try_to_freeze();
2307
2308 if (khugepaged_has_work()) {
2309 if (!khugepaged_scan_sleep_millisecs)
2310 return;
2311
2312 wait_event_freezable_timeout(khugepaged_wait,
2313 kthread_should_stop(),
2314 msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
2315 return;
2316 }
2317
2318 if (khugepaged_enabled())
2319 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2320}
2321
Andrea Arcangeliba761492011-01-13 15:46:58 -08002322static int khugepaged(void *none)
2323{
2324 struct mm_slot *mm_slot;
2325
Andrea Arcangeli878aee72011-01-13 15:47:10 -08002326 set_freezable();
Andrea Arcangeliba761492011-01-13 15:46:58 -08002327 set_user_nice(current, 19);
2328
Xiao Guangrongb7231782012-10-08 16:29:54 -07002329 while (!kthread_should_stop()) {
2330 khugepaged_do_scan();
2331 khugepaged_wait_work();
2332 }
Andrea Arcangeliba761492011-01-13 15:46:58 -08002333
2334 spin_lock(&khugepaged_mm_lock);
2335 mm_slot = khugepaged_scan.mm_slot;
2336 khugepaged_scan.mm_slot = NULL;
2337 if (mm_slot)
2338 collect_mm_slot(mm_slot);
2339 spin_unlock(&khugepaged_mm_lock);
Andrea Arcangeliba761492011-01-13 15:46:58 -08002340 return 0;
2341}
2342
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08002343void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd)
2344{
2345 struct page *page;
2346
2347 spin_lock(&mm->page_table_lock);
2348 if (unlikely(!pmd_trans_huge(*pmd))) {
2349 spin_unlock(&mm->page_table_lock);
2350 return;
2351 }
2352 page = pmd_page(*pmd);
2353 VM_BUG_ON(!page_count(page));
2354 get_page(page);
2355 spin_unlock(&mm->page_table_lock);
2356
2357 split_huge_page(page);
2358
2359 put_page(page);
2360 BUG_ON(pmd_trans_huge(*pmd));
2361}
Andrea Arcangeli94fcc582011-01-13 15:47:08 -08002362
2363static void split_huge_page_address(struct mm_struct *mm,
2364 unsigned long address)
2365{
2366 pgd_t *pgd;
2367 pud_t *pud;
2368 pmd_t *pmd;
2369
2370 VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
2371
2372 pgd = pgd_offset(mm, address);
2373 if (!pgd_present(*pgd))
2374 return;
2375
2376 pud = pud_offset(pgd, address);
2377 if (!pud_present(*pud))
2378 return;
2379
2380 pmd = pmd_offset(pud, address);
2381 if (!pmd_present(*pmd))
2382 return;
2383 /*
2384 * Caller holds the mmap_sem write mode, so a huge pmd cannot
2385 * materialize from under us.
2386 */
2387 split_huge_page_pmd(mm, pmd);
2388}
2389
2390void __vma_adjust_trans_huge(struct vm_area_struct *vma,
2391 unsigned long start,
2392 unsigned long end,
2393 long adjust_next)
2394{
2395 /*
2396 * If the new start address isn't hpage aligned and it could
2397 * previously contain an hugepage: check if we need to split
2398 * an huge pmd.
2399 */
2400 if (start & ~HPAGE_PMD_MASK &&
2401 (start & HPAGE_PMD_MASK) >= vma->vm_start &&
2402 (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2403 split_huge_page_address(vma->vm_mm, start);
2404
2405 /*
2406 * If the new end address isn't hpage aligned and it could
2407 * previously contain an hugepage: check if we need to split
2408 * an huge pmd.
2409 */
2410 if (end & ~HPAGE_PMD_MASK &&
2411 (end & HPAGE_PMD_MASK) >= vma->vm_start &&
2412 (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2413 split_huge_page_address(vma->vm_mm, end);
2414
2415 /*
2416 * If we're also updating the vma->vm_next->vm_start, if the new
2417 * vm_next->vm_start isn't page aligned and it could previously
2418 * contain an hugepage: check if we need to split an huge pmd.
2419 */
2420 if (adjust_next > 0) {
2421 struct vm_area_struct *next = vma->vm_next;
2422 unsigned long nstart = next->vm_start;
2423 nstart += adjust_next << PAGE_SHIFT;
2424 if (nstart & ~HPAGE_PMD_MASK &&
2425 (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
2426 (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
2427 split_huge_page_address(next->vm_mm, nstart);
2428 }
2429}