blob: 30c3cec8202396a0cc223361f999fc094083b7ab [file] [log] [blame]
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001/*
2 * Copyright (C) 2009 Red Hat, Inc.
3 *
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
6 */
7
8#include <linux/mm.h>
9#include <linux/sched.h>
10#include <linux/highmem.h>
11#include <linux/hugetlb.h>
12#include <linux/mmu_notifier.h>
13#include <linux/rmap.h>
14#include <linux/swap.h>
Andrea Arcangeliba761492011-01-13 15:46:58 -080015#include <linux/mm_inline.h>
16#include <linux/kthread.h>
17#include <linux/khugepaged.h>
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -080018#include <asm/tlb.h>
19#include <asm/pgalloc.h>
20#include "internal.h"
21
Andrea Arcangeliba761492011-01-13 15:46:58 -080022/*
23 * By default transparent hugepage support is enabled for all mappings
24 * and khugepaged scans all mappings. Defrag is only invoked by
25 * khugepaged hugepage allocations and by page faults inside
26 * MADV_HUGEPAGE regions to avoid the risk of slowing down short lived
27 * allocations.
28 */
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -080029unsigned long transparent_hugepage_flags __read_mostly =
Andrea Arcangeli13ece882011-01-13 15:47:07 -080030#ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
Andrea Arcangeliba761492011-01-13 15:46:58 -080031 (1<<TRANSPARENT_HUGEPAGE_FLAG)|
Andrea Arcangeli13ece882011-01-13 15:47:07 -080032#endif
33#ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
34 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
35#endif
Andrea Arcangelid39d33c2011-01-13 15:47:05 -080036 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
Andrea Arcangeliba761492011-01-13 15:46:58 -080037 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
38
39/* default scan 8*512 pte (or vmas) every 30 second */
40static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
41static unsigned int khugepaged_pages_collapsed;
42static unsigned int khugepaged_full_scans;
43static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
44/* during fragmentation poll the hugepage allocator once every minute */
45static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
46static struct task_struct *khugepaged_thread __read_mostly;
47static DEFINE_MUTEX(khugepaged_mutex);
48static DEFINE_SPINLOCK(khugepaged_mm_lock);
49static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
50/*
51 * default collapse hugepages if there is at least one pte mapped like
52 * it would have happened if the vma was large enough during page
53 * fault.
54 */
55static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
56
57static int khugepaged(void *none);
58static int mm_slots_hash_init(void);
59static int khugepaged_slab_init(void);
60static void khugepaged_slab_free(void);
61
62#define MM_SLOTS_HASH_HEADS 1024
63static struct hlist_head *mm_slots_hash __read_mostly;
64static struct kmem_cache *mm_slot_cache __read_mostly;
65
66/**
67 * struct mm_slot - hash lookup from mm to mm_slot
68 * @hash: hash collision list
69 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
70 * @mm: the mm that this information is valid for
71 */
72struct mm_slot {
73 struct hlist_node hash;
74 struct list_head mm_node;
75 struct mm_struct *mm;
76};
77
78/**
79 * struct khugepaged_scan - cursor for scanning
80 * @mm_head: the head of the mm list to scan
81 * @mm_slot: the current mm_slot we are scanning
82 * @address: the next address inside that to be scanned
83 *
84 * There is only the one khugepaged_scan instance of this cursor structure.
85 */
86struct khugepaged_scan {
87 struct list_head mm_head;
88 struct mm_slot *mm_slot;
89 unsigned long address;
90} khugepaged_scan = {
91 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
92};
93
Andrea Arcangelif0005652011-01-13 15:47:04 -080094
95static int set_recommended_min_free_kbytes(void)
96{
97 struct zone *zone;
98 int nr_zones = 0;
99 unsigned long recommended_min;
100 extern int min_free_kbytes;
101
102 if (!test_bit(TRANSPARENT_HUGEPAGE_FLAG,
103 &transparent_hugepage_flags) &&
104 !test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
105 &transparent_hugepage_flags))
106 return 0;
107
108 for_each_populated_zone(zone)
109 nr_zones++;
110
111 /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */
112 recommended_min = pageblock_nr_pages * nr_zones * 2;
113
114 /*
115 * Make sure that on average at least two pageblocks are almost free
116 * of another type, one for a migratetype to fall back to and a
117 * second to avoid subsequent fallbacks of other types There are 3
118 * MIGRATE_TYPES we care about.
119 */
120 recommended_min += pageblock_nr_pages * nr_zones *
121 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
122
123 /* don't ever allow to reserve more than 5% of the lowmem */
124 recommended_min = min(recommended_min,
125 (unsigned long) nr_free_buffer_pages() / 20);
126 recommended_min <<= (PAGE_SHIFT-10);
127
128 if (recommended_min > min_free_kbytes)
129 min_free_kbytes = recommended_min;
130 setup_per_zone_wmarks();
131 return 0;
132}
133late_initcall(set_recommended_min_free_kbytes);
134
Andrea Arcangeliba761492011-01-13 15:46:58 -0800135static int start_khugepaged(void)
136{
137 int err = 0;
138 if (khugepaged_enabled()) {
139 int wakeup;
140 if (unlikely(!mm_slot_cache || !mm_slots_hash)) {
141 err = -ENOMEM;
142 goto out;
143 }
144 mutex_lock(&khugepaged_mutex);
145 if (!khugepaged_thread)
146 khugepaged_thread = kthread_run(khugepaged, NULL,
147 "khugepaged");
148 if (unlikely(IS_ERR(khugepaged_thread))) {
149 printk(KERN_ERR
150 "khugepaged: kthread_run(khugepaged) failed\n");
151 err = PTR_ERR(khugepaged_thread);
152 khugepaged_thread = NULL;
153 }
154 wakeup = !list_empty(&khugepaged_scan.mm_head);
155 mutex_unlock(&khugepaged_mutex);
156 if (wakeup)
157 wake_up_interruptible(&khugepaged_wait);
Andrea Arcangelif0005652011-01-13 15:47:04 -0800158
159 set_recommended_min_free_kbytes();
Andrea Arcangeliba761492011-01-13 15:46:58 -0800160 } else
161 /* wakeup to exit */
162 wake_up_interruptible(&khugepaged_wait);
163out:
164 return err;
165}
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800166
167#ifdef CONFIG_SYSFS
Andrea Arcangeliba761492011-01-13 15:46:58 -0800168
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800169static ssize_t double_flag_show(struct kobject *kobj,
170 struct kobj_attribute *attr, char *buf,
171 enum transparent_hugepage_flag enabled,
172 enum transparent_hugepage_flag req_madv)
173{
174 if (test_bit(enabled, &transparent_hugepage_flags)) {
175 VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags));
176 return sprintf(buf, "[always] madvise never\n");
177 } else if (test_bit(req_madv, &transparent_hugepage_flags))
178 return sprintf(buf, "always [madvise] never\n");
179 else
180 return sprintf(buf, "always madvise [never]\n");
181}
182static ssize_t double_flag_store(struct kobject *kobj,
183 struct kobj_attribute *attr,
184 const char *buf, size_t count,
185 enum transparent_hugepage_flag enabled,
186 enum transparent_hugepage_flag req_madv)
187{
188 if (!memcmp("always", buf,
189 min(sizeof("always")-1, count))) {
190 set_bit(enabled, &transparent_hugepage_flags);
191 clear_bit(req_madv, &transparent_hugepage_flags);
192 } else if (!memcmp("madvise", buf,
193 min(sizeof("madvise")-1, count))) {
194 clear_bit(enabled, &transparent_hugepage_flags);
195 set_bit(req_madv, &transparent_hugepage_flags);
196 } else if (!memcmp("never", buf,
197 min(sizeof("never")-1, count))) {
198 clear_bit(enabled, &transparent_hugepage_flags);
199 clear_bit(req_madv, &transparent_hugepage_flags);
200 } else
201 return -EINVAL;
202
203 return count;
204}
205
206static ssize_t enabled_show(struct kobject *kobj,
207 struct kobj_attribute *attr, char *buf)
208{
209 return double_flag_show(kobj, attr, buf,
210 TRANSPARENT_HUGEPAGE_FLAG,
211 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
212}
213static ssize_t enabled_store(struct kobject *kobj,
214 struct kobj_attribute *attr,
215 const char *buf, size_t count)
216{
Andrea Arcangeliba761492011-01-13 15:46:58 -0800217 ssize_t ret;
218
219 ret = double_flag_store(kobj, attr, buf, count,
220 TRANSPARENT_HUGEPAGE_FLAG,
221 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
222
223 if (ret > 0) {
224 int err = start_khugepaged();
225 if (err)
226 ret = err;
227 }
228
Andrea Arcangelif0005652011-01-13 15:47:04 -0800229 if (ret > 0 &&
230 (test_bit(TRANSPARENT_HUGEPAGE_FLAG,
231 &transparent_hugepage_flags) ||
232 test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
233 &transparent_hugepage_flags)))
234 set_recommended_min_free_kbytes();
235
Andrea Arcangeliba761492011-01-13 15:46:58 -0800236 return ret;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800237}
238static struct kobj_attribute enabled_attr =
239 __ATTR(enabled, 0644, enabled_show, enabled_store);
240
241static ssize_t single_flag_show(struct kobject *kobj,
242 struct kobj_attribute *attr, char *buf,
243 enum transparent_hugepage_flag flag)
244{
245 if (test_bit(flag, &transparent_hugepage_flags))
246 return sprintf(buf, "[yes] no\n");
247 else
248 return sprintf(buf, "yes [no]\n");
249}
250static ssize_t single_flag_store(struct kobject *kobj,
251 struct kobj_attribute *attr,
252 const char *buf, size_t count,
253 enum transparent_hugepage_flag flag)
254{
255 if (!memcmp("yes", buf,
256 min(sizeof("yes")-1, count))) {
257 set_bit(flag, &transparent_hugepage_flags);
258 } else if (!memcmp("no", buf,
259 min(sizeof("no")-1, count))) {
260 clear_bit(flag, &transparent_hugepage_flags);
261 } else
262 return -EINVAL;
263
264 return count;
265}
266
267/*
268 * Currently defrag only disables __GFP_NOWAIT for allocation. A blind
269 * __GFP_REPEAT is too aggressive, it's never worth swapping tons of
270 * memory just to allocate one more hugepage.
271 */
272static ssize_t defrag_show(struct kobject *kobj,
273 struct kobj_attribute *attr, char *buf)
274{
275 return double_flag_show(kobj, attr, buf,
276 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
277 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
278}
279static ssize_t defrag_store(struct kobject *kobj,
280 struct kobj_attribute *attr,
281 const char *buf, size_t count)
282{
283 return double_flag_store(kobj, attr, buf, count,
284 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
285 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
286}
287static struct kobj_attribute defrag_attr =
288 __ATTR(defrag, 0644, defrag_show, defrag_store);
289
290#ifdef CONFIG_DEBUG_VM
291static ssize_t debug_cow_show(struct kobject *kobj,
292 struct kobj_attribute *attr, char *buf)
293{
294 return single_flag_show(kobj, attr, buf,
295 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
296}
297static ssize_t debug_cow_store(struct kobject *kobj,
298 struct kobj_attribute *attr,
299 const char *buf, size_t count)
300{
301 return single_flag_store(kobj, attr, buf, count,
302 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
303}
304static struct kobj_attribute debug_cow_attr =
305 __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
306#endif /* CONFIG_DEBUG_VM */
307
308static struct attribute *hugepage_attr[] = {
309 &enabled_attr.attr,
310 &defrag_attr.attr,
311#ifdef CONFIG_DEBUG_VM
312 &debug_cow_attr.attr,
313#endif
314 NULL,
315};
316
317static struct attribute_group hugepage_attr_group = {
318 .attrs = hugepage_attr,
Andrea Arcangeliba761492011-01-13 15:46:58 -0800319};
320
321static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
322 struct kobj_attribute *attr,
323 char *buf)
324{
325 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
326}
327
328static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
329 struct kobj_attribute *attr,
330 const char *buf, size_t count)
331{
332 unsigned long msecs;
333 int err;
334
335 err = strict_strtoul(buf, 10, &msecs);
336 if (err || msecs > UINT_MAX)
337 return -EINVAL;
338
339 khugepaged_scan_sleep_millisecs = msecs;
340 wake_up_interruptible(&khugepaged_wait);
341
342 return count;
343}
344static struct kobj_attribute scan_sleep_millisecs_attr =
345 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
346 scan_sleep_millisecs_store);
347
348static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
349 struct kobj_attribute *attr,
350 char *buf)
351{
352 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
353}
354
355static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
356 struct kobj_attribute *attr,
357 const char *buf, size_t count)
358{
359 unsigned long msecs;
360 int err;
361
362 err = strict_strtoul(buf, 10, &msecs);
363 if (err || msecs > UINT_MAX)
364 return -EINVAL;
365
366 khugepaged_alloc_sleep_millisecs = msecs;
367 wake_up_interruptible(&khugepaged_wait);
368
369 return count;
370}
371static struct kobj_attribute alloc_sleep_millisecs_attr =
372 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
373 alloc_sleep_millisecs_store);
374
375static ssize_t pages_to_scan_show(struct kobject *kobj,
376 struct kobj_attribute *attr,
377 char *buf)
378{
379 return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
380}
381static ssize_t pages_to_scan_store(struct kobject *kobj,
382 struct kobj_attribute *attr,
383 const char *buf, size_t count)
384{
385 int err;
386 unsigned long pages;
387
388 err = strict_strtoul(buf, 10, &pages);
389 if (err || !pages || pages > UINT_MAX)
390 return -EINVAL;
391
392 khugepaged_pages_to_scan = pages;
393
394 return count;
395}
396static struct kobj_attribute pages_to_scan_attr =
397 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
398 pages_to_scan_store);
399
400static ssize_t pages_collapsed_show(struct kobject *kobj,
401 struct kobj_attribute *attr,
402 char *buf)
403{
404 return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
405}
406static struct kobj_attribute pages_collapsed_attr =
407 __ATTR_RO(pages_collapsed);
408
409static ssize_t full_scans_show(struct kobject *kobj,
410 struct kobj_attribute *attr,
411 char *buf)
412{
413 return sprintf(buf, "%u\n", khugepaged_full_scans);
414}
415static struct kobj_attribute full_scans_attr =
416 __ATTR_RO(full_scans);
417
418static ssize_t khugepaged_defrag_show(struct kobject *kobj,
419 struct kobj_attribute *attr, char *buf)
420{
421 return single_flag_show(kobj, attr, buf,
422 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
423}
424static ssize_t khugepaged_defrag_store(struct kobject *kobj,
425 struct kobj_attribute *attr,
426 const char *buf, size_t count)
427{
428 return single_flag_store(kobj, attr, buf, count,
429 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
430}
431static struct kobj_attribute khugepaged_defrag_attr =
432 __ATTR(defrag, 0644, khugepaged_defrag_show,
433 khugepaged_defrag_store);
434
435/*
436 * max_ptes_none controls if khugepaged should collapse hugepages over
437 * any unmapped ptes in turn potentially increasing the memory
438 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
439 * reduce the available free memory in the system as it
440 * runs. Increasing max_ptes_none will instead potentially reduce the
441 * free memory in the system during the khugepaged scan.
442 */
443static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
444 struct kobj_attribute *attr,
445 char *buf)
446{
447 return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
448}
449static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
450 struct kobj_attribute *attr,
451 const char *buf, size_t count)
452{
453 int err;
454 unsigned long max_ptes_none;
455
456 err = strict_strtoul(buf, 10, &max_ptes_none);
457 if (err || max_ptes_none > HPAGE_PMD_NR-1)
458 return -EINVAL;
459
460 khugepaged_max_ptes_none = max_ptes_none;
461
462 return count;
463}
464static struct kobj_attribute khugepaged_max_ptes_none_attr =
465 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
466 khugepaged_max_ptes_none_store);
467
468static struct attribute *khugepaged_attr[] = {
469 &khugepaged_defrag_attr.attr,
470 &khugepaged_max_ptes_none_attr.attr,
471 &pages_to_scan_attr.attr,
472 &pages_collapsed_attr.attr,
473 &full_scans_attr.attr,
474 &scan_sleep_millisecs_attr.attr,
475 &alloc_sleep_millisecs_attr.attr,
476 NULL,
477};
478
479static struct attribute_group khugepaged_attr_group = {
480 .attrs = khugepaged_attr,
481 .name = "khugepaged",
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800482};
483#endif /* CONFIG_SYSFS */
484
485static int __init hugepage_init(void)
486{
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800487 int err;
Andrea Arcangeliba761492011-01-13 15:46:58 -0800488#ifdef CONFIG_SYSFS
489 static struct kobject *hugepage_kobj;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800490
Andrea Arcangeliba761492011-01-13 15:46:58 -0800491 err = -ENOMEM;
492 hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
493 if (unlikely(!hugepage_kobj)) {
494 printk(KERN_ERR "hugepage: failed kobject create\n");
495 goto out;
496 }
497
498 err = sysfs_create_group(hugepage_kobj, &hugepage_attr_group);
499 if (err) {
500 printk(KERN_ERR "hugepage: failed register hugeage group\n");
501 goto out;
502 }
503
504 err = sysfs_create_group(hugepage_kobj, &khugepaged_attr_group);
505 if (err) {
506 printk(KERN_ERR "hugepage: failed register hugeage group\n");
507 goto out;
508 }
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800509#endif
Andrea Arcangeliba761492011-01-13 15:46:58 -0800510
511 err = khugepaged_slab_init();
512 if (err)
513 goto out;
514
515 err = mm_slots_hash_init();
516 if (err) {
517 khugepaged_slab_free();
518 goto out;
519 }
520
521 start_khugepaged();
522
Andrea Arcangelif0005652011-01-13 15:47:04 -0800523 set_recommended_min_free_kbytes();
524
Andrea Arcangeliba761492011-01-13 15:46:58 -0800525out:
526 return err;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800527}
528module_init(hugepage_init)
529
530static int __init setup_transparent_hugepage(char *str)
531{
532 int ret = 0;
533 if (!str)
534 goto out;
535 if (!strcmp(str, "always")) {
536 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
537 &transparent_hugepage_flags);
538 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
539 &transparent_hugepage_flags);
540 ret = 1;
541 } else if (!strcmp(str, "madvise")) {
542 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
543 &transparent_hugepage_flags);
544 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
545 &transparent_hugepage_flags);
546 ret = 1;
547 } else if (!strcmp(str, "never")) {
548 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
549 &transparent_hugepage_flags);
550 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
551 &transparent_hugepage_flags);
552 ret = 1;
553 }
554out:
555 if (!ret)
556 printk(KERN_WARNING
557 "transparent_hugepage= cannot parse, ignored\n");
558 return ret;
559}
560__setup("transparent_hugepage=", setup_transparent_hugepage);
561
562static void prepare_pmd_huge_pte(pgtable_t pgtable,
563 struct mm_struct *mm)
564{
565 assert_spin_locked(&mm->page_table_lock);
566
567 /* FIFO */
568 if (!mm->pmd_huge_pte)
569 INIT_LIST_HEAD(&pgtable->lru);
570 else
571 list_add(&pgtable->lru, &mm->pmd_huge_pte->lru);
572 mm->pmd_huge_pte = pgtable;
573}
574
575static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
576{
577 if (likely(vma->vm_flags & VM_WRITE))
578 pmd = pmd_mkwrite(pmd);
579 return pmd;
580}
581
582static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
583 struct vm_area_struct *vma,
584 unsigned long haddr, pmd_t *pmd,
585 struct page *page)
586{
587 int ret = 0;
588 pgtable_t pgtable;
589
590 VM_BUG_ON(!PageCompound(page));
591 pgtable = pte_alloc_one(mm, haddr);
592 if (unlikely(!pgtable)) {
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -0800593 mem_cgroup_uncharge_page(page);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800594 put_page(page);
595 return VM_FAULT_OOM;
596 }
597
598 clear_huge_page(page, haddr, HPAGE_PMD_NR);
599 __SetPageUptodate(page);
600
601 spin_lock(&mm->page_table_lock);
602 if (unlikely(!pmd_none(*pmd))) {
603 spin_unlock(&mm->page_table_lock);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -0800604 mem_cgroup_uncharge_page(page);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800605 put_page(page);
606 pte_free(mm, pgtable);
607 } else {
608 pmd_t entry;
609 entry = mk_pmd(page, vma->vm_page_prot);
610 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
611 entry = pmd_mkhuge(entry);
612 /*
613 * The spinlocking to take the lru_lock inside
614 * page_add_new_anon_rmap() acts as a full memory
615 * barrier to be sure clear_huge_page writes become
616 * visible after the set_pmd_at() write.
617 */
618 page_add_new_anon_rmap(page, vma, haddr);
619 set_pmd_at(mm, haddr, pmd, entry);
620 prepare_pmd_huge_pte(pgtable, mm);
621 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
622 spin_unlock(&mm->page_table_lock);
623 }
624
625 return ret;
626}
627
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -0800628static inline gfp_t alloc_hugepage_gfpmask(int defrag)
629{
630 return GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT);
631}
632
633static inline struct page *alloc_hugepage_vma(int defrag,
634 struct vm_area_struct *vma,
635 unsigned long haddr)
636{
637 return alloc_pages_vma(alloc_hugepage_gfpmask(defrag),
638 HPAGE_PMD_ORDER, vma, haddr);
639}
640
641#ifndef CONFIG_NUMA
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800642static inline struct page *alloc_hugepage(int defrag)
643{
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -0800644 return alloc_pages(alloc_hugepage_gfpmask(defrag),
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800645 HPAGE_PMD_ORDER);
646}
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -0800647#endif
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800648
649int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
650 unsigned long address, pmd_t *pmd,
651 unsigned int flags)
652{
653 struct page *page;
654 unsigned long haddr = address & HPAGE_PMD_MASK;
655 pte_t *pte;
656
657 if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) {
658 if (unlikely(anon_vma_prepare(vma)))
659 return VM_FAULT_OOM;
Andrea Arcangeliba761492011-01-13 15:46:58 -0800660 if (unlikely(khugepaged_enter(vma)))
661 return VM_FAULT_OOM;
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -0800662 page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
663 vma, haddr);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800664 if (unlikely(!page))
665 goto out;
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -0800666 if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
667 put_page(page);
668 goto out;
669 }
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800670
671 return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page);
672 }
673out:
674 /*
675 * Use __pte_alloc instead of pte_alloc_map, because we can't
676 * run pte_offset_map on the pmd, if an huge pmd could
677 * materialize from under us from a different thread.
678 */
679 if (unlikely(__pte_alloc(mm, vma, pmd, address)))
680 return VM_FAULT_OOM;
681 /* if an huge pmd materialized from under us just retry later */
682 if (unlikely(pmd_trans_huge(*pmd)))
683 return 0;
684 /*
685 * A regular pmd is established and it can't morph into a huge pmd
686 * from under us anymore at this point because we hold the mmap_sem
687 * read mode and khugepaged takes it in write mode. So now it's
688 * safe to run pte_offset_map().
689 */
690 pte = pte_offset_map(pmd, address);
691 return handle_pte_fault(mm, vma, address, pte, pmd, flags);
692}
693
694int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
695 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
696 struct vm_area_struct *vma)
697{
698 struct page *src_page;
699 pmd_t pmd;
700 pgtable_t pgtable;
701 int ret;
702
703 ret = -ENOMEM;
704 pgtable = pte_alloc_one(dst_mm, addr);
705 if (unlikely(!pgtable))
706 goto out;
707
708 spin_lock(&dst_mm->page_table_lock);
709 spin_lock_nested(&src_mm->page_table_lock, SINGLE_DEPTH_NESTING);
710
711 ret = -EAGAIN;
712 pmd = *src_pmd;
713 if (unlikely(!pmd_trans_huge(pmd))) {
714 pte_free(dst_mm, pgtable);
715 goto out_unlock;
716 }
717 if (unlikely(pmd_trans_splitting(pmd))) {
718 /* split huge page running from under us */
719 spin_unlock(&src_mm->page_table_lock);
720 spin_unlock(&dst_mm->page_table_lock);
721 pte_free(dst_mm, pgtable);
722
723 wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */
724 goto out;
725 }
726 src_page = pmd_page(pmd);
727 VM_BUG_ON(!PageHead(src_page));
728 get_page(src_page);
729 page_dup_rmap(src_page);
730 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
731
732 pmdp_set_wrprotect(src_mm, addr, src_pmd);
733 pmd = pmd_mkold(pmd_wrprotect(pmd));
734 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
735 prepare_pmd_huge_pte(pgtable, dst_mm);
736
737 ret = 0;
738out_unlock:
739 spin_unlock(&src_mm->page_table_lock);
740 spin_unlock(&dst_mm->page_table_lock);
741out:
742 return ret;
743}
744
745/* no "address" argument so destroys page coloring of some arch */
746pgtable_t get_pmd_huge_pte(struct mm_struct *mm)
747{
748 pgtable_t pgtable;
749
750 assert_spin_locked(&mm->page_table_lock);
751
752 /* FIFO */
753 pgtable = mm->pmd_huge_pte;
754 if (list_empty(&pgtable->lru))
755 mm->pmd_huge_pte = NULL;
756 else {
757 mm->pmd_huge_pte = list_entry(pgtable->lru.next,
758 struct page, lru);
759 list_del(&pgtable->lru);
760 }
761 return pgtable;
762}
763
764static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
765 struct vm_area_struct *vma,
766 unsigned long address,
767 pmd_t *pmd, pmd_t orig_pmd,
768 struct page *page,
769 unsigned long haddr)
770{
771 pgtable_t pgtable;
772 pmd_t _pmd;
773 int ret = 0, i;
774 struct page **pages;
775
776 pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
777 GFP_KERNEL);
778 if (unlikely(!pages)) {
779 ret |= VM_FAULT_OOM;
780 goto out;
781 }
782
783 for (i = 0; i < HPAGE_PMD_NR; i++) {
784 pages[i] = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
785 vma, address);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -0800786 if (unlikely(!pages[i] ||
787 mem_cgroup_newpage_charge(pages[i], mm,
788 GFP_KERNEL))) {
789 if (pages[i])
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800790 put_page(pages[i]);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -0800791 mem_cgroup_uncharge_start();
792 while (--i >= 0) {
793 mem_cgroup_uncharge_page(pages[i]);
794 put_page(pages[i]);
795 }
796 mem_cgroup_uncharge_end();
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800797 kfree(pages);
798 ret |= VM_FAULT_OOM;
799 goto out;
800 }
801 }
802
803 for (i = 0; i < HPAGE_PMD_NR; i++) {
804 copy_user_highpage(pages[i], page + i,
805 haddr + PAGE_SHIFT*i, vma);
806 __SetPageUptodate(pages[i]);
807 cond_resched();
808 }
809
810 spin_lock(&mm->page_table_lock);
811 if (unlikely(!pmd_same(*pmd, orig_pmd)))
812 goto out_free_pages;
813 VM_BUG_ON(!PageHead(page));
814
815 pmdp_clear_flush_notify(vma, haddr, pmd);
816 /* leave pmd empty until pte is filled */
817
818 pgtable = get_pmd_huge_pte(mm);
819 pmd_populate(mm, &_pmd, pgtable);
820
821 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
822 pte_t *pte, entry;
823 entry = mk_pte(pages[i], vma->vm_page_prot);
824 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
825 page_add_new_anon_rmap(pages[i], vma, haddr);
826 pte = pte_offset_map(&_pmd, haddr);
827 VM_BUG_ON(!pte_none(*pte));
828 set_pte_at(mm, haddr, pte, entry);
829 pte_unmap(pte);
830 }
831 kfree(pages);
832
833 mm->nr_ptes++;
834 smp_wmb(); /* make pte visible before pmd */
835 pmd_populate(mm, pmd, pgtable);
836 page_remove_rmap(page);
837 spin_unlock(&mm->page_table_lock);
838
839 ret |= VM_FAULT_WRITE;
840 put_page(page);
841
842out:
843 return ret;
844
845out_free_pages:
846 spin_unlock(&mm->page_table_lock);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -0800847 mem_cgroup_uncharge_start();
848 for (i = 0; i < HPAGE_PMD_NR; i++) {
849 mem_cgroup_uncharge_page(pages[i]);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800850 put_page(pages[i]);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -0800851 }
852 mem_cgroup_uncharge_end();
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800853 kfree(pages);
854 goto out;
855}
856
857int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
858 unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
859{
860 int ret = 0;
861 struct page *page, *new_page;
862 unsigned long haddr;
863
864 VM_BUG_ON(!vma->anon_vma);
865 spin_lock(&mm->page_table_lock);
866 if (unlikely(!pmd_same(*pmd, orig_pmd)))
867 goto out_unlock;
868
869 page = pmd_page(orig_pmd);
870 VM_BUG_ON(!PageCompound(page) || !PageHead(page));
871 haddr = address & HPAGE_PMD_MASK;
872 if (page_mapcount(page) == 1) {
873 pmd_t entry;
874 entry = pmd_mkyoung(orig_pmd);
875 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
876 if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1))
877 update_mmu_cache(vma, address, entry);
878 ret |= VM_FAULT_WRITE;
879 goto out_unlock;
880 }
881 get_page(page);
882 spin_unlock(&mm->page_table_lock);
883
884 if (transparent_hugepage_enabled(vma) &&
885 !transparent_hugepage_debug_cow())
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -0800886 new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
887 vma, haddr);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800888 else
889 new_page = NULL;
890
891 if (unlikely(!new_page)) {
892 ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
893 pmd, orig_pmd, page, haddr);
894 put_page(page);
895 goto out;
896 }
897
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -0800898 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
899 put_page(new_page);
900 put_page(page);
901 ret |= VM_FAULT_OOM;
902 goto out;
903 }
904
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800905 copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
906 __SetPageUptodate(new_page);
907
908 spin_lock(&mm->page_table_lock);
909 put_page(page);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -0800910 if (unlikely(!pmd_same(*pmd, orig_pmd))) {
911 mem_cgroup_uncharge_page(new_page);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800912 put_page(new_page);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -0800913 } else {
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800914 pmd_t entry;
915 VM_BUG_ON(!PageHead(page));
916 entry = mk_pmd(new_page, vma->vm_page_prot);
917 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
918 entry = pmd_mkhuge(entry);
919 pmdp_clear_flush_notify(vma, haddr, pmd);
920 page_add_new_anon_rmap(new_page, vma, haddr);
921 set_pmd_at(mm, haddr, pmd, entry);
922 update_mmu_cache(vma, address, entry);
923 page_remove_rmap(page);
924 put_page(page);
925 ret |= VM_FAULT_WRITE;
926 }
927out_unlock:
928 spin_unlock(&mm->page_table_lock);
929out:
930 return ret;
931}
932
933struct page *follow_trans_huge_pmd(struct mm_struct *mm,
934 unsigned long addr,
935 pmd_t *pmd,
936 unsigned int flags)
937{
938 struct page *page = NULL;
939
940 assert_spin_locked(&mm->page_table_lock);
941
942 if (flags & FOLL_WRITE && !pmd_write(*pmd))
943 goto out;
944
945 page = pmd_page(*pmd);
946 VM_BUG_ON(!PageHead(page));
947 if (flags & FOLL_TOUCH) {
948 pmd_t _pmd;
949 /*
950 * We should set the dirty bit only for FOLL_WRITE but
951 * for now the dirty bit in the pmd is meaningless.
952 * And if the dirty bit will become meaningful and
953 * we'll only set it with FOLL_WRITE, an atomic
954 * set_bit will be required on the pmd to set the
955 * young bit, instead of the current set_pmd_at.
956 */
957 _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
958 set_pmd_at(mm, addr & HPAGE_PMD_MASK, pmd, _pmd);
959 }
960 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
961 VM_BUG_ON(!PageCompound(page));
962 if (flags & FOLL_GET)
963 get_page(page);
964
965out:
966 return page;
967}
968
969int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
970 pmd_t *pmd)
971{
972 int ret = 0;
973
974 spin_lock(&tlb->mm->page_table_lock);
975 if (likely(pmd_trans_huge(*pmd))) {
976 if (unlikely(pmd_trans_splitting(*pmd))) {
977 spin_unlock(&tlb->mm->page_table_lock);
978 wait_split_huge_page(vma->anon_vma,
979 pmd);
980 } else {
981 struct page *page;
982 pgtable_t pgtable;
983 pgtable = get_pmd_huge_pte(tlb->mm);
984 page = pmd_page(*pmd);
985 pmd_clear(pmd);
986 page_remove_rmap(page);
987 VM_BUG_ON(page_mapcount(page) < 0);
988 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
989 VM_BUG_ON(!PageHead(page));
990 spin_unlock(&tlb->mm->page_table_lock);
991 tlb_remove_page(tlb, page);
992 pte_free(tlb->mm, pgtable);
993 ret = 1;
994 }
995 } else
996 spin_unlock(&tlb->mm->page_table_lock);
997
998 return ret;
999}
1000
Johannes Weiner0ca16342011-01-13 15:47:02 -08001001int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1002 unsigned long addr, unsigned long end,
1003 unsigned char *vec)
1004{
1005 int ret = 0;
1006
1007 spin_lock(&vma->vm_mm->page_table_lock);
1008 if (likely(pmd_trans_huge(*pmd))) {
1009 ret = !pmd_trans_splitting(*pmd);
1010 spin_unlock(&vma->vm_mm->page_table_lock);
1011 if (unlikely(!ret))
1012 wait_split_huge_page(vma->anon_vma, pmd);
1013 else {
1014 /*
1015 * All logical pages in the range are present
1016 * if backed by a huge page.
1017 */
1018 memset(vec, 1, (end - addr) >> PAGE_SHIFT);
1019 }
1020 } else
1021 spin_unlock(&vma->vm_mm->page_table_lock);
1022
1023 return ret;
1024}
1025
Johannes Weinercd7548a2011-01-13 15:47:04 -08001026int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1027 unsigned long addr, pgprot_t newprot)
1028{
1029 struct mm_struct *mm = vma->vm_mm;
1030 int ret = 0;
1031
1032 spin_lock(&mm->page_table_lock);
1033 if (likely(pmd_trans_huge(*pmd))) {
1034 if (unlikely(pmd_trans_splitting(*pmd))) {
1035 spin_unlock(&mm->page_table_lock);
1036 wait_split_huge_page(vma->anon_vma, pmd);
1037 } else {
1038 pmd_t entry;
1039
1040 entry = pmdp_get_and_clear(mm, addr, pmd);
1041 entry = pmd_modify(entry, newprot);
1042 set_pmd_at(mm, addr, pmd, entry);
1043 spin_unlock(&vma->vm_mm->page_table_lock);
1044 flush_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE);
1045 ret = 1;
1046 }
1047 } else
1048 spin_unlock(&vma->vm_mm->page_table_lock);
1049
1050 return ret;
1051}
1052
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001053pmd_t *page_check_address_pmd(struct page *page,
1054 struct mm_struct *mm,
1055 unsigned long address,
1056 enum page_check_address_pmd_flag flag)
1057{
1058 pgd_t *pgd;
1059 pud_t *pud;
1060 pmd_t *pmd, *ret = NULL;
1061
1062 if (address & ~HPAGE_PMD_MASK)
1063 goto out;
1064
1065 pgd = pgd_offset(mm, address);
1066 if (!pgd_present(*pgd))
1067 goto out;
1068
1069 pud = pud_offset(pgd, address);
1070 if (!pud_present(*pud))
1071 goto out;
1072
1073 pmd = pmd_offset(pud, address);
1074 if (pmd_none(*pmd))
1075 goto out;
1076 if (pmd_page(*pmd) != page)
1077 goto out;
1078 VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
1079 pmd_trans_splitting(*pmd));
1080 if (pmd_trans_huge(*pmd)) {
1081 VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
1082 !pmd_trans_splitting(*pmd));
1083 ret = pmd;
1084 }
1085out:
1086 return ret;
1087}
1088
1089static int __split_huge_page_splitting(struct page *page,
1090 struct vm_area_struct *vma,
1091 unsigned long address)
1092{
1093 struct mm_struct *mm = vma->vm_mm;
1094 pmd_t *pmd;
1095 int ret = 0;
1096
1097 spin_lock(&mm->page_table_lock);
1098 pmd = page_check_address_pmd(page, mm, address,
1099 PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG);
1100 if (pmd) {
1101 /*
1102 * We can't temporarily set the pmd to null in order
1103 * to split it, the pmd must remain marked huge at all
1104 * times or the VM won't take the pmd_trans_huge paths
1105 * and it won't wait on the anon_vma->root->lock to
1106 * serialize against split_huge_page*.
1107 */
1108 pmdp_splitting_flush_notify(vma, address, pmd);
1109 ret = 1;
1110 }
1111 spin_unlock(&mm->page_table_lock);
1112
1113 return ret;
1114}
1115
1116static void __split_huge_page_refcount(struct page *page)
1117{
1118 int i;
1119 unsigned long head_index = page->index;
1120 struct zone *zone = page_zone(page);
1121
1122 /* prevent PageLRU to go away from under us, and freeze lru stats */
1123 spin_lock_irq(&zone->lru_lock);
1124 compound_lock(page);
1125
1126 for (i = 1; i < HPAGE_PMD_NR; i++) {
1127 struct page *page_tail = page + i;
1128
1129 /* tail_page->_count cannot change */
1130 atomic_sub(atomic_read(&page_tail->_count), &page->_count);
1131 BUG_ON(page_count(page) <= 0);
1132 atomic_add(page_mapcount(page) + 1, &page_tail->_count);
1133 BUG_ON(atomic_read(&page_tail->_count) <= 0);
1134
1135 /* after clearing PageTail the gup refcount can be released */
1136 smp_mb();
1137
1138 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1139 page_tail->flags |= (page->flags &
1140 ((1L << PG_referenced) |
1141 (1L << PG_swapbacked) |
1142 (1L << PG_mlocked) |
1143 (1L << PG_uptodate)));
1144 page_tail->flags |= (1L << PG_dirty);
1145
1146 /*
1147 * 1) clear PageTail before overwriting first_page
1148 * 2) clear PageTail before clearing PageHead for VM_BUG_ON
1149 */
1150 smp_wmb();
1151
1152 /*
1153 * __split_huge_page_splitting() already set the
1154 * splitting bit in all pmd that could map this
1155 * hugepage, that will ensure no CPU can alter the
1156 * mapcount on the head page. The mapcount is only
1157 * accounted in the head page and it has to be
1158 * transferred to all tail pages in the below code. So
1159 * for this code to be safe, the split the mapcount
1160 * can't change. But that doesn't mean userland can't
1161 * keep changing and reading the page contents while
1162 * we transfer the mapcount, so the pmd splitting
1163 * status is achieved setting a reserved bit in the
1164 * pmd, not by clearing the present bit.
1165 */
1166 BUG_ON(page_mapcount(page_tail));
1167 page_tail->_mapcount = page->_mapcount;
1168
1169 BUG_ON(page_tail->mapping);
1170 page_tail->mapping = page->mapping;
1171
1172 page_tail->index = ++head_index;
1173
1174 BUG_ON(!PageAnon(page_tail));
1175 BUG_ON(!PageUptodate(page_tail));
1176 BUG_ON(!PageDirty(page_tail));
1177 BUG_ON(!PageSwapBacked(page_tail));
1178
1179 lru_add_page_tail(zone, page, page_tail);
1180 }
1181
Andrea Arcangeli79134172011-01-13 15:46:58 -08001182 __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
1183 __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
1184
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001185 ClearPageCompound(page);
1186 compound_unlock(page);
1187 spin_unlock_irq(&zone->lru_lock);
1188
1189 for (i = 1; i < HPAGE_PMD_NR; i++) {
1190 struct page *page_tail = page + i;
1191 BUG_ON(page_count(page_tail) <= 0);
1192 /*
1193 * Tail pages may be freed if there wasn't any mapping
1194 * like if add_to_swap() is running on a lru page that
1195 * had its mapping zapped. And freeing these pages
1196 * requires taking the lru_lock so we do the put_page
1197 * of the tail pages after the split is complete.
1198 */
1199 put_page(page_tail);
1200 }
1201
1202 /*
1203 * Only the head page (now become a regular page) is required
1204 * to be pinned by the caller.
1205 */
1206 BUG_ON(page_count(page) <= 0);
1207}
1208
1209static int __split_huge_page_map(struct page *page,
1210 struct vm_area_struct *vma,
1211 unsigned long address)
1212{
1213 struct mm_struct *mm = vma->vm_mm;
1214 pmd_t *pmd, _pmd;
1215 int ret = 0, i;
1216 pgtable_t pgtable;
1217 unsigned long haddr;
1218
1219 spin_lock(&mm->page_table_lock);
1220 pmd = page_check_address_pmd(page, mm, address,
1221 PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG);
1222 if (pmd) {
1223 pgtable = get_pmd_huge_pte(mm);
1224 pmd_populate(mm, &_pmd, pgtable);
1225
1226 for (i = 0, haddr = address; i < HPAGE_PMD_NR;
1227 i++, haddr += PAGE_SIZE) {
1228 pte_t *pte, entry;
1229 BUG_ON(PageCompound(page+i));
1230 entry = mk_pte(page + i, vma->vm_page_prot);
1231 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1232 if (!pmd_write(*pmd))
1233 entry = pte_wrprotect(entry);
1234 else
1235 BUG_ON(page_mapcount(page) != 1);
1236 if (!pmd_young(*pmd))
1237 entry = pte_mkold(entry);
1238 pte = pte_offset_map(&_pmd, haddr);
1239 BUG_ON(!pte_none(*pte));
1240 set_pte_at(mm, haddr, pte, entry);
1241 pte_unmap(pte);
1242 }
1243
1244 mm->nr_ptes++;
1245 smp_wmb(); /* make pte visible before pmd */
1246 /*
1247 * Up to this point the pmd is present and huge and
1248 * userland has the whole access to the hugepage
1249 * during the split (which happens in place). If we
1250 * overwrite the pmd with the not-huge version
1251 * pointing to the pte here (which of course we could
1252 * if all CPUs were bug free), userland could trigger
1253 * a small page size TLB miss on the small sized TLB
1254 * while the hugepage TLB entry is still established
1255 * in the huge TLB. Some CPU doesn't like that. See
1256 * http://support.amd.com/us/Processor_TechDocs/41322.pdf,
1257 * Erratum 383 on page 93. Intel should be safe but is
1258 * also warns that it's only safe if the permission
1259 * and cache attributes of the two entries loaded in
1260 * the two TLB is identical (which should be the case
1261 * here). But it is generally safer to never allow
1262 * small and huge TLB entries for the same virtual
1263 * address to be loaded simultaneously. So instead of
1264 * doing "pmd_populate(); flush_tlb_range();" we first
1265 * mark the current pmd notpresent (atomically because
1266 * here the pmd_trans_huge and pmd_trans_splitting
1267 * must remain set at all times on the pmd until the
1268 * split is complete for this pmd), then we flush the
1269 * SMP TLB and finally we write the non-huge version
1270 * of the pmd entry with pmd_populate.
1271 */
1272 set_pmd_at(mm, address, pmd, pmd_mknotpresent(*pmd));
1273 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
1274 pmd_populate(mm, pmd, pgtable);
1275 ret = 1;
1276 }
1277 spin_unlock(&mm->page_table_lock);
1278
1279 return ret;
1280}
1281
1282/* must be called with anon_vma->root->lock hold */
1283static void __split_huge_page(struct page *page,
1284 struct anon_vma *anon_vma)
1285{
1286 int mapcount, mapcount2;
1287 struct anon_vma_chain *avc;
1288
1289 BUG_ON(!PageHead(page));
1290 BUG_ON(PageTail(page));
1291
1292 mapcount = 0;
1293 list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
1294 struct vm_area_struct *vma = avc->vma;
1295 unsigned long addr = vma_address(page, vma);
1296 BUG_ON(is_vma_temporary_stack(vma));
1297 if (addr == -EFAULT)
1298 continue;
1299 mapcount += __split_huge_page_splitting(page, vma, addr);
1300 }
Andrea Arcangeli05759d32011-01-13 15:46:53 -08001301 /*
1302 * It is critical that new vmas are added to the tail of the
1303 * anon_vma list. This guarantes that if copy_huge_pmd() runs
1304 * and establishes a child pmd before
1305 * __split_huge_page_splitting() freezes the parent pmd (so if
1306 * we fail to prevent copy_huge_pmd() from running until the
1307 * whole __split_huge_page() is complete), we will still see
1308 * the newly established pmd of the child later during the
1309 * walk, to be able to set it as pmd_trans_splitting too.
1310 */
1311 if (mapcount != page_mapcount(page))
1312 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1313 mapcount, page_mapcount(page));
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001314 BUG_ON(mapcount != page_mapcount(page));
1315
1316 __split_huge_page_refcount(page);
1317
1318 mapcount2 = 0;
1319 list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
1320 struct vm_area_struct *vma = avc->vma;
1321 unsigned long addr = vma_address(page, vma);
1322 BUG_ON(is_vma_temporary_stack(vma));
1323 if (addr == -EFAULT)
1324 continue;
1325 mapcount2 += __split_huge_page_map(page, vma, addr);
1326 }
Andrea Arcangeli05759d32011-01-13 15:46:53 -08001327 if (mapcount != mapcount2)
1328 printk(KERN_ERR "mapcount %d mapcount2 %d page_mapcount %d\n",
1329 mapcount, mapcount2, page_mapcount(page));
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001330 BUG_ON(mapcount != mapcount2);
1331}
1332
1333int split_huge_page(struct page *page)
1334{
1335 struct anon_vma *anon_vma;
1336 int ret = 1;
1337
1338 BUG_ON(!PageAnon(page));
1339 anon_vma = page_lock_anon_vma(page);
1340 if (!anon_vma)
1341 goto out;
1342 ret = 0;
1343 if (!PageCompound(page))
1344 goto out_unlock;
1345
1346 BUG_ON(!PageSwapBacked(page));
1347 __split_huge_page(page, anon_vma);
1348
1349 BUG_ON(PageCompound(page));
1350out_unlock:
1351 page_unlock_anon_vma(anon_vma);
1352out:
1353 return ret;
1354}
1355
Andrea Arcangeli0af4e982011-01-13 15:46:55 -08001356int hugepage_madvise(unsigned long *vm_flags)
1357{
1358 /*
1359 * Be somewhat over-protective like KSM for now!
1360 */
1361 if (*vm_flags & (VM_HUGEPAGE | VM_SHARED | VM_MAYSHARE |
1362 VM_PFNMAP | VM_IO | VM_DONTEXPAND |
1363 VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE |
1364 VM_MIXEDMAP | VM_SAO))
1365 return -EINVAL;
1366
1367 *vm_flags |= VM_HUGEPAGE;
1368
1369 return 0;
1370}
1371
Andrea Arcangeliba761492011-01-13 15:46:58 -08001372static int __init khugepaged_slab_init(void)
1373{
1374 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
1375 sizeof(struct mm_slot),
1376 __alignof__(struct mm_slot), 0, NULL);
1377 if (!mm_slot_cache)
1378 return -ENOMEM;
1379
1380 return 0;
1381}
1382
1383static void __init khugepaged_slab_free(void)
1384{
1385 kmem_cache_destroy(mm_slot_cache);
1386 mm_slot_cache = NULL;
1387}
1388
1389static inline struct mm_slot *alloc_mm_slot(void)
1390{
1391 if (!mm_slot_cache) /* initialization failed */
1392 return NULL;
1393 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
1394}
1395
1396static inline void free_mm_slot(struct mm_slot *mm_slot)
1397{
1398 kmem_cache_free(mm_slot_cache, mm_slot);
1399}
1400
1401static int __init mm_slots_hash_init(void)
1402{
1403 mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head),
1404 GFP_KERNEL);
1405 if (!mm_slots_hash)
1406 return -ENOMEM;
1407 return 0;
1408}
1409
1410#if 0
1411static void __init mm_slots_hash_free(void)
1412{
1413 kfree(mm_slots_hash);
1414 mm_slots_hash = NULL;
1415}
1416#endif
1417
1418static struct mm_slot *get_mm_slot(struct mm_struct *mm)
1419{
1420 struct mm_slot *mm_slot;
1421 struct hlist_head *bucket;
1422 struct hlist_node *node;
1423
1424 bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
1425 % MM_SLOTS_HASH_HEADS];
1426 hlist_for_each_entry(mm_slot, node, bucket, hash) {
1427 if (mm == mm_slot->mm)
1428 return mm_slot;
1429 }
1430 return NULL;
1431}
1432
1433static void insert_to_mm_slots_hash(struct mm_struct *mm,
1434 struct mm_slot *mm_slot)
1435{
1436 struct hlist_head *bucket;
1437
1438 bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
1439 % MM_SLOTS_HASH_HEADS];
1440 mm_slot->mm = mm;
1441 hlist_add_head(&mm_slot->hash, bucket);
1442}
1443
1444static inline int khugepaged_test_exit(struct mm_struct *mm)
1445{
1446 return atomic_read(&mm->mm_users) == 0;
1447}
1448
1449int __khugepaged_enter(struct mm_struct *mm)
1450{
1451 struct mm_slot *mm_slot;
1452 int wakeup;
1453
1454 mm_slot = alloc_mm_slot();
1455 if (!mm_slot)
1456 return -ENOMEM;
1457
1458 /* __khugepaged_exit() must not run from under us */
1459 VM_BUG_ON(khugepaged_test_exit(mm));
1460 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
1461 free_mm_slot(mm_slot);
1462 return 0;
1463 }
1464
1465 spin_lock(&khugepaged_mm_lock);
1466 insert_to_mm_slots_hash(mm, mm_slot);
1467 /*
1468 * Insert just behind the scanning cursor, to let the area settle
1469 * down a little.
1470 */
1471 wakeup = list_empty(&khugepaged_scan.mm_head);
1472 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
1473 spin_unlock(&khugepaged_mm_lock);
1474
1475 atomic_inc(&mm->mm_count);
1476 if (wakeup)
1477 wake_up_interruptible(&khugepaged_wait);
1478
1479 return 0;
1480}
1481
1482int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
1483{
1484 unsigned long hstart, hend;
1485 if (!vma->anon_vma)
1486 /*
1487 * Not yet faulted in so we will register later in the
1488 * page fault if needed.
1489 */
1490 return 0;
1491 if (vma->vm_file || vma->vm_ops)
1492 /* khugepaged not yet working on file or special mappings */
1493 return 0;
1494 VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
1495 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1496 hend = vma->vm_end & HPAGE_PMD_MASK;
1497 if (hstart < hend)
1498 return khugepaged_enter(vma);
1499 return 0;
1500}
1501
1502void __khugepaged_exit(struct mm_struct *mm)
1503{
1504 struct mm_slot *mm_slot;
1505 int free = 0;
1506
1507 spin_lock(&khugepaged_mm_lock);
1508 mm_slot = get_mm_slot(mm);
1509 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
1510 hlist_del(&mm_slot->hash);
1511 list_del(&mm_slot->mm_node);
1512 free = 1;
1513 }
1514
1515 if (free) {
1516 spin_unlock(&khugepaged_mm_lock);
1517 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1518 free_mm_slot(mm_slot);
1519 mmdrop(mm);
1520 } else if (mm_slot) {
1521 spin_unlock(&khugepaged_mm_lock);
1522 /*
1523 * This is required to serialize against
1524 * khugepaged_test_exit() (which is guaranteed to run
1525 * under mmap sem read mode). Stop here (after we
1526 * return all pagetables will be destroyed) until
1527 * khugepaged has finished working on the pagetables
1528 * under the mmap_sem.
1529 */
1530 down_write(&mm->mmap_sem);
1531 up_write(&mm->mmap_sem);
1532 } else
1533 spin_unlock(&khugepaged_mm_lock);
1534}
1535
1536static void release_pte_page(struct page *page)
1537{
1538 /* 0 stands for page_is_file_cache(page) == false */
1539 dec_zone_page_state(page, NR_ISOLATED_ANON + 0);
1540 unlock_page(page);
1541 putback_lru_page(page);
1542}
1543
1544static void release_pte_pages(pte_t *pte, pte_t *_pte)
1545{
1546 while (--_pte >= pte) {
1547 pte_t pteval = *_pte;
1548 if (!pte_none(pteval))
1549 release_pte_page(pte_page(pteval));
1550 }
1551}
1552
1553static void release_all_pte_pages(pte_t *pte)
1554{
1555 release_pte_pages(pte, pte + HPAGE_PMD_NR);
1556}
1557
1558static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
1559 unsigned long address,
1560 pte_t *pte)
1561{
1562 struct page *page;
1563 pte_t *_pte;
1564 int referenced = 0, isolated = 0, none = 0;
1565 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
1566 _pte++, address += PAGE_SIZE) {
1567 pte_t pteval = *_pte;
1568 if (pte_none(pteval)) {
1569 if (++none <= khugepaged_max_ptes_none)
1570 continue;
1571 else {
1572 release_pte_pages(pte, _pte);
1573 goto out;
1574 }
1575 }
1576 if (!pte_present(pteval) || !pte_write(pteval)) {
1577 release_pte_pages(pte, _pte);
1578 goto out;
1579 }
1580 page = vm_normal_page(vma, address, pteval);
1581 if (unlikely(!page)) {
1582 release_pte_pages(pte, _pte);
1583 goto out;
1584 }
1585 VM_BUG_ON(PageCompound(page));
1586 BUG_ON(!PageAnon(page));
1587 VM_BUG_ON(!PageSwapBacked(page));
1588
1589 /* cannot use mapcount: can't collapse if there's a gup pin */
1590 if (page_count(page) != 1) {
1591 release_pte_pages(pte, _pte);
1592 goto out;
1593 }
1594 /*
1595 * We can do it before isolate_lru_page because the
1596 * page can't be freed from under us. NOTE: PG_lock
1597 * is needed to serialize against split_huge_page
1598 * when invoked from the VM.
1599 */
1600 if (!trylock_page(page)) {
1601 release_pte_pages(pte, _pte);
1602 goto out;
1603 }
1604 /*
1605 * Isolate the page to avoid collapsing an hugepage
1606 * currently in use by the VM.
1607 */
1608 if (isolate_lru_page(page)) {
1609 unlock_page(page);
1610 release_pte_pages(pte, _pte);
1611 goto out;
1612 }
1613 /* 0 stands for page_is_file_cache(page) == false */
1614 inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
1615 VM_BUG_ON(!PageLocked(page));
1616 VM_BUG_ON(PageLRU(page));
1617
1618 /* If there is no mapped pte young don't collapse the page */
1619 if (pte_young(pteval))
1620 referenced = 1;
1621 }
1622 if (unlikely(!referenced))
1623 release_all_pte_pages(pte);
1624 else
1625 isolated = 1;
1626out:
1627 return isolated;
1628}
1629
1630static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
1631 struct vm_area_struct *vma,
1632 unsigned long address,
1633 spinlock_t *ptl)
1634{
1635 pte_t *_pte;
1636 for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
1637 pte_t pteval = *_pte;
1638 struct page *src_page;
1639
1640 if (pte_none(pteval)) {
1641 clear_user_highpage(page, address);
1642 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
1643 } else {
1644 src_page = pte_page(pteval);
1645 copy_user_highpage(page, src_page, address, vma);
1646 VM_BUG_ON(page_mapcount(src_page) != 1);
1647 VM_BUG_ON(page_count(src_page) != 2);
1648 release_pte_page(src_page);
1649 /*
1650 * ptl mostly unnecessary, but preempt has to
1651 * be disabled to update the per-cpu stats
1652 * inside page_remove_rmap().
1653 */
1654 spin_lock(ptl);
1655 /*
1656 * paravirt calls inside pte_clear here are
1657 * superfluous.
1658 */
1659 pte_clear(vma->vm_mm, address, _pte);
1660 page_remove_rmap(src_page);
1661 spin_unlock(ptl);
1662 free_page_and_swap_cache(src_page);
1663 }
1664
1665 address += PAGE_SIZE;
1666 page++;
1667 }
1668}
1669
1670static void collapse_huge_page(struct mm_struct *mm,
1671 unsigned long address,
Andrea Arcangelice83d212011-01-13 15:47:06 -08001672 struct page **hpage,
1673 struct vm_area_struct *vma)
Andrea Arcangeliba761492011-01-13 15:46:58 -08001674{
Andrea Arcangeliba761492011-01-13 15:46:58 -08001675 pgd_t *pgd;
1676 pud_t *pud;
1677 pmd_t *pmd, _pmd;
1678 pte_t *pte;
1679 pgtable_t pgtable;
1680 struct page *new_page;
1681 spinlock_t *ptl;
1682 int isolated;
1683 unsigned long hstart, hend;
1684
1685 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08001686#ifndef CONFIG_NUMA
Andrea Arcangeliba761492011-01-13 15:46:58 -08001687 VM_BUG_ON(!*hpage);
Andrea Arcangelice83d212011-01-13 15:47:06 -08001688 new_page = *hpage;
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08001689#else
1690 VM_BUG_ON(*hpage);
Andrea Arcangelice83d212011-01-13 15:47:06 -08001691 /*
1692 * Allocate the page while the vma is still valid and under
1693 * the mmap_sem read mode so there is no memory allocation
1694 * later when we take the mmap_sem in write mode. This is more
1695 * friendly behavior (OTOH it may actually hide bugs) to
1696 * filesystems in userland with daemons allocating memory in
1697 * the userland I/O paths. Allocating memory with the
1698 * mmap_sem in read mode is good idea also to allow greater
1699 * scalability.
1700 */
1701 new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address);
1702 if (unlikely(!new_page)) {
1703 up_read(&mm->mmap_sem);
1704 *hpage = ERR_PTR(-ENOMEM);
1705 return;
1706 }
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08001707#endif
Andrea Arcangelice83d212011-01-13 15:47:06 -08001708 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
1709 up_read(&mm->mmap_sem);
1710 put_page(new_page);
1711 return;
1712 }
1713
1714 /* after allocating the hugepage upgrade to mmap_sem write mode */
1715 up_read(&mm->mmap_sem);
Andrea Arcangeliba761492011-01-13 15:46:58 -08001716
1717 /*
1718 * Prevent all access to pagetables with the exception of
1719 * gup_fast later hanlded by the ptep_clear_flush and the VM
1720 * handled by the anon_vma lock + PG_lock.
1721 */
1722 down_write(&mm->mmap_sem);
1723 if (unlikely(khugepaged_test_exit(mm)))
1724 goto out;
1725
1726 vma = find_vma(mm, address);
1727 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1728 hend = vma->vm_end & HPAGE_PMD_MASK;
1729 if (address < hstart || address + HPAGE_PMD_SIZE > hend)
1730 goto out;
1731
1732 if (!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always())
1733 goto out;
1734
1735 /* VM_PFNMAP vmas may have vm_ops null but vm_file set */
1736 if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
1737 goto out;
1738 VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
1739
1740 pgd = pgd_offset(mm, address);
1741 if (!pgd_present(*pgd))
1742 goto out;
1743
1744 pud = pud_offset(pgd, address);
1745 if (!pud_present(*pud))
1746 goto out;
1747
1748 pmd = pmd_offset(pud, address);
1749 /* pmd can't go away or become huge under us */
1750 if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
1751 goto out;
1752
Andrea Arcangeliba761492011-01-13 15:46:58 -08001753 anon_vma_lock(vma->anon_vma);
1754
1755 pte = pte_offset_map(pmd, address);
1756 ptl = pte_lockptr(mm, pmd);
1757
1758 spin_lock(&mm->page_table_lock); /* probably unnecessary */
1759 /*
1760 * After this gup_fast can't run anymore. This also removes
1761 * any huge TLB entry from the CPU so we won't allow
1762 * huge and small TLB entries for the same virtual address
1763 * to avoid the risk of CPU bugs in that area.
1764 */
1765 _pmd = pmdp_clear_flush_notify(vma, address, pmd);
1766 spin_unlock(&mm->page_table_lock);
1767
1768 spin_lock(ptl);
1769 isolated = __collapse_huge_page_isolate(vma, address, pte);
1770 spin_unlock(ptl);
1771 pte_unmap(pte);
1772
1773 if (unlikely(!isolated)) {
1774 spin_lock(&mm->page_table_lock);
1775 BUG_ON(!pmd_none(*pmd));
1776 set_pmd_at(mm, address, pmd, _pmd);
1777 spin_unlock(&mm->page_table_lock);
1778 anon_vma_unlock(vma->anon_vma);
1779 mem_cgroup_uncharge_page(new_page);
Andrea Arcangelice83d212011-01-13 15:47:06 -08001780 goto out;
Andrea Arcangeliba761492011-01-13 15:46:58 -08001781 }
1782
1783 /*
1784 * All pages are isolated and locked so anon_vma rmap
1785 * can't run anymore.
1786 */
1787 anon_vma_unlock(vma->anon_vma);
1788
1789 __collapse_huge_page_copy(pte, new_page, vma, address, ptl);
1790 __SetPageUptodate(new_page);
1791 pgtable = pmd_pgtable(_pmd);
1792 VM_BUG_ON(page_count(pgtable) != 1);
1793 VM_BUG_ON(page_mapcount(pgtable) != 0);
1794
1795 _pmd = mk_pmd(new_page, vma->vm_page_prot);
1796 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1797 _pmd = pmd_mkhuge(_pmd);
1798
1799 /*
1800 * spin_lock() below is not the equivalent of smp_wmb(), so
1801 * this is needed to avoid the copy_huge_page writes to become
1802 * visible after the set_pmd_at() write.
1803 */
1804 smp_wmb();
1805
1806 spin_lock(&mm->page_table_lock);
1807 BUG_ON(!pmd_none(*pmd));
1808 page_add_new_anon_rmap(new_page, vma, address);
1809 set_pmd_at(mm, address, pmd, _pmd);
1810 update_mmu_cache(vma, address, entry);
1811 prepare_pmd_huge_pte(pgtable, mm);
1812 mm->nr_ptes--;
1813 spin_unlock(&mm->page_table_lock);
1814
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08001815#ifndef CONFIG_NUMA
Andrea Arcangeliba761492011-01-13 15:46:58 -08001816 *hpage = NULL;
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08001817#endif
Andrea Arcangeliba761492011-01-13 15:46:58 -08001818 khugepaged_pages_collapsed++;
Andrea Arcangelice83d212011-01-13 15:47:06 -08001819out_up_write:
Andrea Arcangeliba761492011-01-13 15:46:58 -08001820 up_write(&mm->mmap_sem);
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08001821 return;
1822
Andrea Arcangelice83d212011-01-13 15:47:06 -08001823out:
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08001824#ifdef CONFIG_NUMA
1825 put_page(new_page);
1826#endif
Andrea Arcangelice83d212011-01-13 15:47:06 -08001827 goto out_up_write;
Andrea Arcangeliba761492011-01-13 15:46:58 -08001828}
1829
1830static int khugepaged_scan_pmd(struct mm_struct *mm,
1831 struct vm_area_struct *vma,
1832 unsigned long address,
1833 struct page **hpage)
1834{
1835 pgd_t *pgd;
1836 pud_t *pud;
1837 pmd_t *pmd;
1838 pte_t *pte, *_pte;
1839 int ret = 0, referenced = 0, none = 0;
1840 struct page *page;
1841 unsigned long _address;
1842 spinlock_t *ptl;
1843
1844 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1845
1846 pgd = pgd_offset(mm, address);
1847 if (!pgd_present(*pgd))
1848 goto out;
1849
1850 pud = pud_offset(pgd, address);
1851 if (!pud_present(*pud))
1852 goto out;
1853
1854 pmd = pmd_offset(pud, address);
1855 if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
1856 goto out;
1857
1858 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1859 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1860 _pte++, _address += PAGE_SIZE) {
1861 pte_t pteval = *_pte;
1862 if (pte_none(pteval)) {
1863 if (++none <= khugepaged_max_ptes_none)
1864 continue;
1865 else
1866 goto out_unmap;
1867 }
1868 if (!pte_present(pteval) || !pte_write(pteval))
1869 goto out_unmap;
1870 page = vm_normal_page(vma, _address, pteval);
1871 if (unlikely(!page))
1872 goto out_unmap;
1873 VM_BUG_ON(PageCompound(page));
1874 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
1875 goto out_unmap;
1876 /* cannot use mapcount: can't collapse if there's a gup pin */
1877 if (page_count(page) != 1)
1878 goto out_unmap;
1879 if (pte_young(pteval))
1880 referenced = 1;
1881 }
1882 if (referenced)
1883 ret = 1;
1884out_unmap:
1885 pte_unmap_unlock(pte, ptl);
Andrea Arcangelice83d212011-01-13 15:47:06 -08001886 if (ret)
1887 /* collapse_huge_page will return with the mmap_sem released */
1888 collapse_huge_page(mm, address, hpage, vma);
Andrea Arcangeliba761492011-01-13 15:46:58 -08001889out:
1890 return ret;
1891}
1892
1893static void collect_mm_slot(struct mm_slot *mm_slot)
1894{
1895 struct mm_struct *mm = mm_slot->mm;
1896
1897 VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
1898
1899 if (khugepaged_test_exit(mm)) {
1900 /* free mm_slot */
1901 hlist_del(&mm_slot->hash);
1902 list_del(&mm_slot->mm_node);
1903
1904 /*
1905 * Not strictly needed because the mm exited already.
1906 *
1907 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1908 */
1909
1910 /* khugepaged_mm_lock actually not necessary for the below */
1911 free_mm_slot(mm_slot);
1912 mmdrop(mm);
1913 }
1914}
1915
1916static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
1917 struct page **hpage)
1918{
1919 struct mm_slot *mm_slot;
1920 struct mm_struct *mm;
1921 struct vm_area_struct *vma;
1922 int progress = 0;
1923
1924 VM_BUG_ON(!pages);
1925 VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
1926
1927 if (khugepaged_scan.mm_slot)
1928 mm_slot = khugepaged_scan.mm_slot;
1929 else {
1930 mm_slot = list_entry(khugepaged_scan.mm_head.next,
1931 struct mm_slot, mm_node);
1932 khugepaged_scan.address = 0;
1933 khugepaged_scan.mm_slot = mm_slot;
1934 }
1935 spin_unlock(&khugepaged_mm_lock);
1936
1937 mm = mm_slot->mm;
1938 down_read(&mm->mmap_sem);
1939 if (unlikely(khugepaged_test_exit(mm)))
1940 vma = NULL;
1941 else
1942 vma = find_vma(mm, khugepaged_scan.address);
1943
1944 progress++;
1945 for (; vma; vma = vma->vm_next) {
1946 unsigned long hstart, hend;
1947
1948 cond_resched();
1949 if (unlikely(khugepaged_test_exit(mm))) {
1950 progress++;
1951 break;
1952 }
1953
1954 if (!(vma->vm_flags & VM_HUGEPAGE) &&
1955 !khugepaged_always()) {
1956 progress++;
1957 continue;
1958 }
1959
1960 /* VM_PFNMAP vmas may have vm_ops null but vm_file set */
1961 if (!vma->anon_vma || vma->vm_ops || vma->vm_file) {
1962 khugepaged_scan.address = vma->vm_end;
1963 progress++;
1964 continue;
1965 }
1966 VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
1967
1968 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1969 hend = vma->vm_end & HPAGE_PMD_MASK;
1970 if (hstart >= hend) {
1971 progress++;
1972 continue;
1973 }
1974 if (khugepaged_scan.address < hstart)
1975 khugepaged_scan.address = hstart;
1976 if (khugepaged_scan.address > hend) {
1977 khugepaged_scan.address = hend + HPAGE_PMD_SIZE;
1978 progress++;
1979 continue;
1980 }
1981 BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
1982
1983 while (khugepaged_scan.address < hend) {
1984 int ret;
1985 cond_resched();
1986 if (unlikely(khugepaged_test_exit(mm)))
1987 goto breakouterloop;
1988
1989 VM_BUG_ON(khugepaged_scan.address < hstart ||
1990 khugepaged_scan.address + HPAGE_PMD_SIZE >
1991 hend);
1992 ret = khugepaged_scan_pmd(mm, vma,
1993 khugepaged_scan.address,
1994 hpage);
1995 /* move to next address */
1996 khugepaged_scan.address += HPAGE_PMD_SIZE;
1997 progress += HPAGE_PMD_NR;
1998 if (ret)
1999 /* we released mmap_sem so break loop */
2000 goto breakouterloop_mmap_sem;
2001 if (progress >= pages)
2002 goto breakouterloop;
2003 }
2004 }
2005breakouterloop:
2006 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
2007breakouterloop_mmap_sem:
2008
2009 spin_lock(&khugepaged_mm_lock);
2010 BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2011 /*
2012 * Release the current mm_slot if this mm is about to die, or
2013 * if we scanned all vmas of this mm.
2014 */
2015 if (khugepaged_test_exit(mm) || !vma) {
2016 /*
2017 * Make sure that if mm_users is reaching zero while
2018 * khugepaged runs here, khugepaged_exit will find
2019 * mm_slot not pointing to the exiting mm.
2020 */
2021 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2022 khugepaged_scan.mm_slot = list_entry(
2023 mm_slot->mm_node.next,
2024 struct mm_slot, mm_node);
2025 khugepaged_scan.address = 0;
2026 } else {
2027 khugepaged_scan.mm_slot = NULL;
2028 khugepaged_full_scans++;
2029 }
2030
2031 collect_mm_slot(mm_slot);
2032 }
2033
2034 return progress;
2035}
2036
2037static int khugepaged_has_work(void)
2038{
2039 return !list_empty(&khugepaged_scan.mm_head) &&
2040 khugepaged_enabled();
2041}
2042
2043static int khugepaged_wait_event(void)
2044{
2045 return !list_empty(&khugepaged_scan.mm_head) ||
2046 !khugepaged_enabled();
2047}
2048
2049static void khugepaged_do_scan(struct page **hpage)
2050{
2051 unsigned int progress = 0, pass_through_head = 0;
2052 unsigned int pages = khugepaged_pages_to_scan;
2053
2054 barrier(); /* write khugepaged_pages_to_scan to local stack */
2055
2056 while (progress < pages) {
2057 cond_resched();
2058
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002059#ifndef CONFIG_NUMA
Andrea Arcangeliba761492011-01-13 15:46:58 -08002060 if (!*hpage) {
2061 *hpage = alloc_hugepage(khugepaged_defrag());
2062 if (unlikely(!*hpage))
2063 break;
2064 }
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002065#else
2066 if (IS_ERR(*hpage))
2067 break;
2068#endif
Andrea Arcangeliba761492011-01-13 15:46:58 -08002069
2070 spin_lock(&khugepaged_mm_lock);
2071 if (!khugepaged_scan.mm_slot)
2072 pass_through_head++;
2073 if (khugepaged_has_work() &&
2074 pass_through_head < 2)
2075 progress += khugepaged_scan_mm_slot(pages - progress,
2076 hpage);
2077 else
2078 progress = pages;
2079 spin_unlock(&khugepaged_mm_lock);
2080 }
2081}
2082
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002083static void khugepaged_alloc_sleep(void)
2084{
2085 DEFINE_WAIT(wait);
2086 add_wait_queue(&khugepaged_wait, &wait);
2087 schedule_timeout_interruptible(
2088 msecs_to_jiffies(
2089 khugepaged_alloc_sleep_millisecs));
2090 remove_wait_queue(&khugepaged_wait, &wait);
2091}
2092
2093#ifndef CONFIG_NUMA
Andrea Arcangeliba761492011-01-13 15:46:58 -08002094static struct page *khugepaged_alloc_hugepage(void)
2095{
2096 struct page *hpage;
2097
2098 do {
2099 hpage = alloc_hugepage(khugepaged_defrag());
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002100 if (!hpage)
2101 khugepaged_alloc_sleep();
Andrea Arcangeliba761492011-01-13 15:46:58 -08002102 } while (unlikely(!hpage) &&
2103 likely(khugepaged_enabled()));
2104 return hpage;
2105}
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002106#endif
Andrea Arcangeliba761492011-01-13 15:46:58 -08002107
2108static void khugepaged_loop(void)
2109{
2110 struct page *hpage;
2111
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002112#ifdef CONFIG_NUMA
2113 hpage = NULL;
2114#endif
Andrea Arcangeliba761492011-01-13 15:46:58 -08002115 while (likely(khugepaged_enabled())) {
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002116#ifndef CONFIG_NUMA
Andrea Arcangeliba761492011-01-13 15:46:58 -08002117 hpage = khugepaged_alloc_hugepage();
2118 if (unlikely(!hpage))
2119 break;
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002120#else
2121 if (IS_ERR(hpage)) {
2122 khugepaged_alloc_sleep();
2123 hpage = NULL;
2124 }
2125#endif
Andrea Arcangeliba761492011-01-13 15:46:58 -08002126
2127 khugepaged_do_scan(&hpage);
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002128#ifndef CONFIG_NUMA
Andrea Arcangeliba761492011-01-13 15:46:58 -08002129 if (hpage)
2130 put_page(hpage);
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002131#endif
Andrea Arcangeliba761492011-01-13 15:46:58 -08002132 if (khugepaged_has_work()) {
2133 DEFINE_WAIT(wait);
2134 if (!khugepaged_scan_sleep_millisecs)
2135 continue;
2136 add_wait_queue(&khugepaged_wait, &wait);
2137 schedule_timeout_interruptible(
2138 msecs_to_jiffies(
2139 khugepaged_scan_sleep_millisecs));
2140 remove_wait_queue(&khugepaged_wait, &wait);
2141 } else if (khugepaged_enabled())
2142 wait_event_interruptible(khugepaged_wait,
2143 khugepaged_wait_event());
2144 }
2145}
2146
2147static int khugepaged(void *none)
2148{
2149 struct mm_slot *mm_slot;
2150
2151 set_user_nice(current, 19);
2152
2153 /* serialize with start_khugepaged() */
2154 mutex_lock(&khugepaged_mutex);
2155
2156 for (;;) {
2157 mutex_unlock(&khugepaged_mutex);
2158 BUG_ON(khugepaged_thread != current);
2159 khugepaged_loop();
2160 BUG_ON(khugepaged_thread != current);
2161
2162 mutex_lock(&khugepaged_mutex);
2163 if (!khugepaged_enabled())
2164 break;
2165 }
2166
2167 spin_lock(&khugepaged_mm_lock);
2168 mm_slot = khugepaged_scan.mm_slot;
2169 khugepaged_scan.mm_slot = NULL;
2170 if (mm_slot)
2171 collect_mm_slot(mm_slot);
2172 spin_unlock(&khugepaged_mm_lock);
2173
2174 khugepaged_thread = NULL;
2175 mutex_unlock(&khugepaged_mutex);
2176
2177 return 0;
2178}
2179
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08002180void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd)
2181{
2182 struct page *page;
2183
2184 spin_lock(&mm->page_table_lock);
2185 if (unlikely(!pmd_trans_huge(*pmd))) {
2186 spin_unlock(&mm->page_table_lock);
2187 return;
2188 }
2189 page = pmd_page(*pmd);
2190 VM_BUG_ON(!page_count(page));
2191 get_page(page);
2192 spin_unlock(&mm->page_table_lock);
2193
2194 split_huge_page(page);
2195
2196 put_page(page);
2197 BUG_ON(pmd_trans_huge(*pmd));
2198}