blob: 78427af91de96980a8450ef2c8dbeccbb59078d0 [file] [log] [blame]
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001/*
2 * Copyright (C) 2009 Red Hat, Inc.
3 *
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
6 */
7
Andrew Mortonae3a8c12014-06-04 16:06:58 -07008#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -080010#include <linux/mm.h>
11#include <linux/sched.h>
Ingo Molnarf7ccbae2017-02-08 18:51:30 +010012#include <linux/sched/coredump.h>
Ingo Molnar6a3827d2017-02-08 18:51:31 +010013#include <linux/sched/numa_balancing.h>
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -080014#include <linux/highmem.h>
15#include <linux/hugetlb.h>
16#include <linux/mmu_notifier.h>
17#include <linux/rmap.h>
18#include <linux/swap.h>
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -080019#include <linux/shrinker.h>
Andrea Arcangeliba761492011-01-13 15:46:58 -080020#include <linux/mm_inline.h>
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -080021#include <linux/swapops.h>
Matthew Wilcox4897c762015-09-08 14:58:45 -070022#include <linux/dax.h>
Andrea Arcangeliba761492011-01-13 15:46:58 -080023#include <linux/khugepaged.h>
Andrea Arcangeli878aee72011-01-13 15:47:10 -080024#include <linux/freezer.h>
Dan Williamsf25748e32016-01-15 16:56:43 -080025#include <linux/pfn_t.h>
Andrea Arcangelia664b2d2011-01-13 15:47:17 -080026#include <linux/mman.h>
Dan Williams3565fce2016-01-15 16:56:55 -080027#include <linux/memremap.h>
Ralf Baechle325adeb2012-10-15 13:44:56 +020028#include <linux/pagemap.h>
Kirill A. Shutemov49071d42016-01-15 16:54:40 -080029#include <linux/debugfs.h>
Mel Gorman4daae3b2012-11-02 11:33:45 +000030#include <linux/migrate.h>
Sasha Levin43b5fbb2013-02-22 16:32:27 -080031#include <linux/hashtable.h>
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -070032#include <linux/userfaultfd_k.h>
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070033#include <linux/page_idle.h>
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -070034#include <linux/shmem_fs.h>
Michal Hocko6b31d592017-08-18 15:16:15 -070035#include <linux/oom.h>
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -080036
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -080037#include <asm/tlb.h>
38#include <asm/pgalloc.h>
39#include "internal.h"
40
Andrea Arcangeliba761492011-01-13 15:46:58 -080041/*
Michael DeGuzisb14d5952017-05-17 15:19:21 -040042 * By default, transparent hugepage support is disabled in order to avoid
43 * risking an increased memory footprint for applications that are not
44 * guaranteed to benefit from it. When transparent hugepage support is
45 * enabled, it is for all mappings, and khugepaged scans all mappings.
Jianguo Wu8bfa3f92013-11-12 15:07:16 -080046 * Defrag is invoked by khugepaged hugepage allocations and by page faults
47 * for all hugepage allocations.
Andrea Arcangeliba761492011-01-13 15:46:58 -080048 */
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -080049unsigned long transparent_hugepage_flags __read_mostly =
Andrea Arcangeli13ece882011-01-13 15:47:07 -080050#ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
Andrea Arcangeliba761492011-01-13 15:46:58 -080051 (1<<TRANSPARENT_HUGEPAGE_FLAG)|
Andrea Arcangeli13ece882011-01-13 15:47:07 -080052#endif
53#ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
54 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
55#endif
Mel Gorman444eb2a42016-03-17 14:19:23 -070056 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
Kirill A. Shutemov79da5402012-12-12 13:51:12 -080057 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
58 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
Andrea Arcangeliba761492011-01-13 15:46:58 -080059
Kirill A. Shutemov9a982252016-01-15 16:54:17 -080060static struct shrinker deferred_split_shrinker;
Andrea Arcangelif0005652011-01-13 15:47:04 -080061
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -080062static atomic_t huge_zero_refcount;
Wang, Yalin56873f42015-02-11 15:24:51 -080063struct page *huge_zero_page __read_mostly;
Kirill A. Shutemov4a6c1292012-12-12 13:50:47 -080064
Aaron Lu6fcb52a2016-10-07 17:00:08 -070065static struct page *get_huge_zero_page(void)
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -080066{
67 struct page *zero_page;
68retry:
69 if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
Jason Low4db0c3c2015-04-15 16:14:08 -070070 return READ_ONCE(huge_zero_page);
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -080071
72 zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
73 HPAGE_PMD_ORDER);
Kirill A. Shutemovd8a8e1f2012-12-12 13:51:09 -080074 if (!zero_page) {
75 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
Kirill A. Shutemov5918d102013-04-29 15:08:44 -070076 return NULL;
Kirill A. Shutemovd8a8e1f2012-12-12 13:51:09 -080077 }
78 count_vm_event(THP_ZERO_PAGE_ALLOC);
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -080079 preempt_disable();
Kirill A. Shutemov5918d102013-04-29 15:08:44 -070080 if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -080081 preempt_enable();
Yu Zhao5ddacbe2014-10-29 14:50:26 -070082 __free_pages(zero_page, compound_order(zero_page));
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -080083 goto retry;
84 }
85
86 /* We take additional reference here. It will be put back by shrinker */
87 atomic_set(&huge_zero_refcount, 2);
88 preempt_enable();
Jason Low4db0c3c2015-04-15 16:14:08 -070089 return READ_ONCE(huge_zero_page);
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -080090}
91
Aaron Lu6fcb52a2016-10-07 17:00:08 -070092static void put_huge_zero_page(void)
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -080093{
94 /*
95 * Counter should never go to zero here. Only shrinker can put
96 * last reference.
97 */
98 BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
99}
100
Aaron Lu6fcb52a2016-10-07 17:00:08 -0700101struct page *mm_get_huge_zero_page(struct mm_struct *mm)
102{
103 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
104 return READ_ONCE(huge_zero_page);
105
106 if (!get_huge_zero_page())
107 return NULL;
108
109 if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
110 put_huge_zero_page();
111
112 return READ_ONCE(huge_zero_page);
113}
114
115void mm_put_huge_zero_page(struct mm_struct *mm)
116{
117 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
118 put_huge_zero_page();
119}
120
Glauber Costa48896462013-08-28 10:18:15 +1000121static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
122 struct shrink_control *sc)
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800123{
Glauber Costa48896462013-08-28 10:18:15 +1000124 /* we can free zero page only if last reference remains */
125 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
126}
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800127
Glauber Costa48896462013-08-28 10:18:15 +1000128static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
129 struct shrink_control *sc)
130{
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800131 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
Kirill A. Shutemov5918d102013-04-29 15:08:44 -0700132 struct page *zero_page = xchg(&huge_zero_page, NULL);
133 BUG_ON(zero_page == NULL);
Yu Zhao5ddacbe2014-10-29 14:50:26 -0700134 __free_pages(zero_page, compound_order(zero_page));
Glauber Costa48896462013-08-28 10:18:15 +1000135 return HPAGE_PMD_NR;
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800136 }
137
138 return 0;
139}
140
141static struct shrinker huge_zero_page_shrinker = {
Glauber Costa48896462013-08-28 10:18:15 +1000142 .count_objects = shrink_huge_zero_page_count,
143 .scan_objects = shrink_huge_zero_page_scan,
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800144 .seeks = DEFAULT_SEEKS,
145};
146
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800147#ifdef CONFIG_SYSFS
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800148static ssize_t enabled_show(struct kobject *kobj,
149 struct kobj_attribute *attr, char *buf)
150{
Mel Gorman444eb2a42016-03-17 14:19:23 -0700151 if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags))
152 return sprintf(buf, "[always] madvise never\n");
153 else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags))
154 return sprintf(buf, "always [madvise] never\n");
155 else
156 return sprintf(buf, "always madvise [never]\n");
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800157}
Mel Gorman444eb2a42016-03-17 14:19:23 -0700158
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800159static ssize_t enabled_store(struct kobject *kobj,
160 struct kobj_attribute *attr,
161 const char *buf, size_t count)
162{
David Rientjes21440d72017-02-22 15:45:49 -0800163 ssize_t ret = count;
Andrea Arcangeliba761492011-01-13 15:46:58 -0800164
David Rientjes21440d72017-02-22 15:45:49 -0800165 if (!memcmp("always", buf,
166 min(sizeof("always")-1, count))) {
167 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
168 set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
169 } else if (!memcmp("madvise", buf,
170 min(sizeof("madvise")-1, count))) {
171 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
172 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
173 } else if (!memcmp("never", buf,
174 min(sizeof("never")-1, count))) {
175 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
176 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
177 } else
178 ret = -EINVAL;
Andrea Arcangeliba761492011-01-13 15:46:58 -0800179
180 if (ret > 0) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700181 int err = start_stop_khugepaged();
Andrea Arcangeliba761492011-01-13 15:46:58 -0800182 if (err)
183 ret = err;
184 }
Andrea Arcangeliba761492011-01-13 15:46:58 -0800185 return ret;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800186}
187static struct kobj_attribute enabled_attr =
188 __ATTR(enabled, 0644, enabled_show, enabled_store);
189
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700190ssize_t single_hugepage_flag_show(struct kobject *kobj,
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800191 struct kobj_attribute *attr, char *buf,
192 enum transparent_hugepage_flag flag)
193{
Ben Hutchingse27e6152011-04-14 15:22:21 -0700194 return sprintf(buf, "%d\n",
195 !!test_bit(flag, &transparent_hugepage_flags));
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800196}
Ben Hutchingse27e6152011-04-14 15:22:21 -0700197
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700198ssize_t single_hugepage_flag_store(struct kobject *kobj,
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800199 struct kobj_attribute *attr,
200 const char *buf, size_t count,
201 enum transparent_hugepage_flag flag)
202{
Ben Hutchingse27e6152011-04-14 15:22:21 -0700203 unsigned long value;
204 int ret;
205
206 ret = kstrtoul(buf, 10, &value);
207 if (ret < 0)
208 return ret;
209 if (value > 1)
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800210 return -EINVAL;
211
Ben Hutchingse27e6152011-04-14 15:22:21 -0700212 if (value)
213 set_bit(flag, &transparent_hugepage_flags);
214 else
215 clear_bit(flag, &transparent_hugepage_flags);
216
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800217 return count;
218}
219
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800220static ssize_t defrag_show(struct kobject *kobj,
221 struct kobj_attribute *attr, char *buf)
222{
Mel Gorman444eb2a42016-03-17 14:19:23 -0700223 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
David Rientjes21440d72017-02-22 15:45:49 -0800224 return sprintf(buf, "[always] defer defer+madvise madvise never\n");
Mel Gorman444eb2a42016-03-17 14:19:23 -0700225 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
David Rientjes21440d72017-02-22 15:45:49 -0800226 return sprintf(buf, "always [defer] defer+madvise madvise never\n");
227 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
228 return sprintf(buf, "always defer [defer+madvise] madvise never\n");
229 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
230 return sprintf(buf, "always defer defer+madvise [madvise] never\n");
231 return sprintf(buf, "always defer defer+madvise madvise [never]\n");
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800232}
David Rientjes21440d72017-02-22 15:45:49 -0800233
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800234static ssize_t defrag_store(struct kobject *kobj,
235 struct kobj_attribute *attr,
236 const char *buf, size_t count)
237{
David Rientjes21440d72017-02-22 15:45:49 -0800238 if (!memcmp("always", buf,
239 min(sizeof("always")-1, count))) {
240 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
241 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
242 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
243 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
David Rientjes21440d72017-02-22 15:45:49 -0800244 } else if (!memcmp("defer+madvise", buf,
245 min(sizeof("defer+madvise")-1, count))) {
246 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
247 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
248 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
249 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
David Rientjes4fad7fb2017-04-07 16:04:54 -0700250 } else if (!memcmp("defer", buf,
251 min(sizeof("defer")-1, count))) {
252 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
253 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
254 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
255 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
David Rientjes21440d72017-02-22 15:45:49 -0800256 } else if (!memcmp("madvise", buf,
257 min(sizeof("madvise")-1, count))) {
258 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
259 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
260 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
261 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
262 } else if (!memcmp("never", buf,
263 min(sizeof("never")-1, count))) {
264 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
265 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
266 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
267 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
268 } else
269 return -EINVAL;
270
271 return count;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800272}
273static struct kobj_attribute defrag_attr =
274 __ATTR(defrag, 0644, defrag_show, defrag_store);
275
Kirill A. Shutemov79da5402012-12-12 13:51:12 -0800276static ssize_t use_zero_page_show(struct kobject *kobj,
277 struct kobj_attribute *attr, char *buf)
278{
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700279 return single_hugepage_flag_show(kobj, attr, buf,
Kirill A. Shutemov79da5402012-12-12 13:51:12 -0800280 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
281}
282static ssize_t use_zero_page_store(struct kobject *kobj,
283 struct kobj_attribute *attr, const char *buf, size_t count)
284{
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700285 return single_hugepage_flag_store(kobj, attr, buf, count,
Kirill A. Shutemov79da5402012-12-12 13:51:12 -0800286 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
287}
288static struct kobj_attribute use_zero_page_attr =
289 __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store);
Hugh Dickins49920d22016-12-12 16:44:50 -0800290
291static ssize_t hpage_pmd_size_show(struct kobject *kobj,
292 struct kobj_attribute *attr, char *buf)
293{
294 return sprintf(buf, "%lu\n", HPAGE_PMD_SIZE);
295}
296static struct kobj_attribute hpage_pmd_size_attr =
297 __ATTR_RO(hpage_pmd_size);
298
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800299#ifdef CONFIG_DEBUG_VM
300static ssize_t debug_cow_show(struct kobject *kobj,
301 struct kobj_attribute *attr, char *buf)
302{
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700303 return single_hugepage_flag_show(kobj, attr, buf,
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800304 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
305}
306static ssize_t debug_cow_store(struct kobject *kobj,
307 struct kobj_attribute *attr,
308 const char *buf, size_t count)
309{
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700310 return single_hugepage_flag_store(kobj, attr, buf, count,
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800311 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
312}
313static struct kobj_attribute debug_cow_attr =
314 __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
315#endif /* CONFIG_DEBUG_VM */
316
317static struct attribute *hugepage_attr[] = {
318 &enabled_attr.attr,
319 &defrag_attr.attr,
Kirill A. Shutemov79da5402012-12-12 13:51:12 -0800320 &use_zero_page_attr.attr,
Hugh Dickins49920d22016-12-12 16:44:50 -0800321 &hpage_pmd_size_attr.attr,
Kirill A. Shutemove496cf32016-07-26 15:26:35 -0700322#if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
Kirill A. Shutemov5a6e75f2016-07-26 15:26:13 -0700323 &shmem_enabled_attr.attr,
324#endif
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800325#ifdef CONFIG_DEBUG_VM
326 &debug_cow_attr.attr,
327#endif
328 NULL,
329};
330
Arvind Yadav8aa95a22017-09-06 16:22:03 -0700331static const struct attribute_group hugepage_attr_group = {
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800332 .attrs = hugepage_attr,
Andrea Arcangeliba761492011-01-13 15:46:58 -0800333};
334
Shaohua Li569e5592012-01-12 17:19:11 -0800335static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
336{
337 int err;
338
339 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
340 if (unlikely(!*hugepage_kobj)) {
Andrew Mortonae3a8c12014-06-04 16:06:58 -0700341 pr_err("failed to create transparent hugepage kobject\n");
Shaohua Li569e5592012-01-12 17:19:11 -0800342 return -ENOMEM;
343 }
344
345 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
346 if (err) {
Andrew Mortonae3a8c12014-06-04 16:06:58 -0700347 pr_err("failed to register transparent hugepage group\n");
Shaohua Li569e5592012-01-12 17:19:11 -0800348 goto delete_obj;
349 }
350
351 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
352 if (err) {
Andrew Mortonae3a8c12014-06-04 16:06:58 -0700353 pr_err("failed to register transparent hugepage group\n");
Shaohua Li569e5592012-01-12 17:19:11 -0800354 goto remove_hp_group;
355 }
356
357 return 0;
358
359remove_hp_group:
360 sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
361delete_obj:
362 kobject_put(*hugepage_kobj);
363 return err;
364}
365
366static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
367{
368 sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
369 sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
370 kobject_put(hugepage_kobj);
371}
372#else
373static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
374{
375 return 0;
376}
377
378static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
379{
380}
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800381#endif /* CONFIG_SYSFS */
382
383static int __init hugepage_init(void)
384{
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800385 int err;
Shaohua Li569e5592012-01-12 17:19:11 -0800386 struct kobject *hugepage_kobj;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800387
Andrea Arcangeli4b7167b2011-01-13 15:47:09 -0800388 if (!has_transparent_hugepage()) {
389 transparent_hugepage_flags = 0;
Shaohua Li569e5592012-01-12 17:19:11 -0800390 return -EINVAL;
Andrea Arcangeli4b7167b2011-01-13 15:47:09 -0800391 }
392
Kirill A. Shutemovff20c2e2016-03-01 09:45:14 +0530393 /*
394 * hugepages can't be allocated by the buddy allocator
395 */
396 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER >= MAX_ORDER);
397 /*
398 * we use page->mapping and page->index in second tail page
399 * as list_head: assuming THP order >= 2
400 */
401 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2);
402
Shaohua Li569e5592012-01-12 17:19:11 -0800403 err = hugepage_init_sysfs(&hugepage_kobj);
404 if (err)
Kirill A. Shutemov65ebb642015-04-15 16:14:20 -0700405 goto err_sysfs;
Andrea Arcangeliba761492011-01-13 15:46:58 -0800406
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700407 err = khugepaged_init();
Andrea Arcangeliba761492011-01-13 15:46:58 -0800408 if (err)
Kirill A. Shutemov65ebb642015-04-15 16:14:20 -0700409 goto err_slab;
Andrea Arcangeliba761492011-01-13 15:46:58 -0800410
Kirill A. Shutemov65ebb642015-04-15 16:14:20 -0700411 err = register_shrinker(&huge_zero_page_shrinker);
412 if (err)
413 goto err_hzp_shrinker;
Kirill A. Shutemov9a982252016-01-15 16:54:17 -0800414 err = register_shrinker(&deferred_split_shrinker);
415 if (err)
416 goto err_split_shrinker;
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800417
Rik van Riel97562cd2011-01-13 15:47:12 -0800418 /*
419 * By default disable transparent hugepages on smaller systems,
420 * where the extra memory used could hurt more than TLB overhead
421 * is likely to save. The admin can still enable it through /sys.
422 */
Kirill A. Shutemov79553da2015-04-15 16:14:56 -0700423 if (totalram_pages < (512 << (20 - PAGE_SHIFT))) {
Rik van Riel97562cd2011-01-13 15:47:12 -0800424 transparent_hugepage_flags = 0;
Kirill A. Shutemov79553da2015-04-15 16:14:56 -0700425 return 0;
426 }
Rik van Riel97562cd2011-01-13 15:47:12 -0800427
Kirill A. Shutemov79553da2015-04-15 16:14:56 -0700428 err = start_stop_khugepaged();
Kirill A. Shutemov65ebb642015-04-15 16:14:20 -0700429 if (err)
430 goto err_khugepaged;
Andrea Arcangeliba761492011-01-13 15:46:58 -0800431
Shaohua Li569e5592012-01-12 17:19:11 -0800432 return 0;
Kirill A. Shutemov65ebb642015-04-15 16:14:20 -0700433err_khugepaged:
Kirill A. Shutemov9a982252016-01-15 16:54:17 -0800434 unregister_shrinker(&deferred_split_shrinker);
435err_split_shrinker:
Kirill A. Shutemov65ebb642015-04-15 16:14:20 -0700436 unregister_shrinker(&huge_zero_page_shrinker);
437err_hzp_shrinker:
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700438 khugepaged_destroy();
Kirill A. Shutemov65ebb642015-04-15 16:14:20 -0700439err_slab:
Shaohua Li569e5592012-01-12 17:19:11 -0800440 hugepage_exit_sysfs(hugepage_kobj);
Kirill A. Shutemov65ebb642015-04-15 16:14:20 -0700441err_sysfs:
Andrea Arcangeliba761492011-01-13 15:46:58 -0800442 return err;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800443}
Paul Gortmakera64fb3c2014-01-23 15:53:30 -0800444subsys_initcall(hugepage_init);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800445
446static int __init setup_transparent_hugepage(char *str)
447{
448 int ret = 0;
449 if (!str)
450 goto out;
451 if (!strcmp(str, "always")) {
452 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
453 &transparent_hugepage_flags);
454 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
455 &transparent_hugepage_flags);
456 ret = 1;
457 } else if (!strcmp(str, "madvise")) {
458 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
459 &transparent_hugepage_flags);
460 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
461 &transparent_hugepage_flags);
462 ret = 1;
463 } else if (!strcmp(str, "never")) {
464 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
465 &transparent_hugepage_flags);
466 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
467 &transparent_hugepage_flags);
468 ret = 1;
469 }
470out:
471 if (!ret)
Andrew Mortonae3a8c12014-06-04 16:06:58 -0700472 pr_warn("transparent_hugepage= cannot parse, ignored\n");
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800473 return ret;
474}
475__setup("transparent_hugepage=", setup_transparent_hugepage);
476
Linus Torvaldsf55e1012017-11-29 09:01:01 -0800477pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800478{
Linus Torvaldsf55e1012017-11-29 09:01:01 -0800479 if (likely(vma->vm_flags & VM_WRITE))
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800480 pmd = pmd_mkwrite(pmd);
481 return pmd;
482}
483
Kirill A. Shutemov9a982252016-01-15 16:54:17 -0800484static inline struct list_head *page_deferred_list(struct page *page)
485{
Matthew Wilcoxfa3015b2018-06-07 17:08:42 -0700486 /* ->lru in the tail pages is occupied by compound_head. */
487 return &page[2].deferred_list;
Kirill A. Shutemov9a982252016-01-15 16:54:17 -0800488}
489
490void prep_transhuge_page(struct page *page)
491{
492 /*
493 * we use page->mapping and page->indexlru in second tail page
494 * as list_head: assuming THP order >= 2
495 */
Kirill A. Shutemov9a982252016-01-15 16:54:17 -0800496
497 INIT_LIST_HEAD(page_deferred_list(page));
498 set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
499}
500
Toshi Kani74d2fad2016-10-07 16:59:56 -0700501unsigned long __thp_get_unmapped_area(struct file *filp, unsigned long len,
502 loff_t off, unsigned long flags, unsigned long size)
503{
504 unsigned long addr;
505 loff_t off_end = off + len;
506 loff_t off_align = round_up(off, size);
507 unsigned long len_pad;
508
509 if (off_end <= off_align || (off_end - off_align) < size)
510 return 0;
511
512 len_pad = len + size;
513 if (len_pad < len || (off + len_pad) < off)
514 return 0;
515
516 addr = current->mm->get_unmapped_area(filp, 0, len_pad,
517 off >> PAGE_SHIFT, flags);
518 if (IS_ERR_VALUE(addr))
519 return 0;
520
521 addr += (off - addr) & (size - 1);
522 return addr;
523}
524
525unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
526 unsigned long len, unsigned long pgoff, unsigned long flags)
527{
528 loff_t off = (loff_t)pgoff << PAGE_SHIFT;
529
530 if (addr)
531 goto out;
532 if (!IS_DAX(filp->f_mapping->host) || !IS_ENABLED(CONFIG_FS_DAX_PMD))
533 goto out;
534
535 addr = __thp_get_unmapped_area(filp, len, off, flags, PMD_SIZE);
536 if (addr)
537 return addr;
538
539 out:
540 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
541}
542EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
543
Jan Kara82b0f8c2016-12-14 15:06:58 -0800544static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page,
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -0700545 gfp_t gfp)
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800546{
Jan Kara82b0f8c2016-12-14 15:06:58 -0800547 struct vm_area_struct *vma = vmf->vma;
Johannes Weiner00501b52014-08-08 14:19:20 -0700548 struct mem_cgroup *memcg;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800549 pgtable_t pgtable;
Jan Kara82b0f8c2016-12-14 15:06:58 -0800550 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
Michal Hocko6b31d592017-08-18 15:16:15 -0700551 int ret = 0;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800552
Sasha Levin309381fea2014-01-23 15:52:54 -0800553 VM_BUG_ON_PAGE(!PageCompound(page), page);
Johannes Weiner00501b52014-08-08 14:19:20 -0700554
Tejun Heo2cf85582018-07-03 11:14:56 -0400555 if (mem_cgroup_try_charge_delay(page, vma->vm_mm, gfp, &memcg, true)) {
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -0700556 put_page(page);
557 count_vm_event(THP_FAULT_FALLBACK);
558 return VM_FAULT_FALLBACK;
559 }
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800560
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -0700561 pgtable = pte_alloc_one(vma->vm_mm, haddr);
Johannes Weiner00501b52014-08-08 14:19:20 -0700562 if (unlikely(!pgtable)) {
Michal Hocko6b31d592017-08-18 15:16:15 -0700563 ret = VM_FAULT_OOM;
564 goto release;
Johannes Weiner00501b52014-08-08 14:19:20 -0700565 }
566
Huang Yingc79b57e2017-09-06 16:25:04 -0700567 clear_huge_page(page, vmf->address, HPAGE_PMD_NR);
Minchan Kim52f37622013-04-29 15:08:15 -0700568 /*
569 * The memory barrier inside __SetPageUptodate makes sure that
570 * clear_huge_page writes become visible before the set_pmd_at()
571 * write.
572 */
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800573 __SetPageUptodate(page);
574
Jan Kara82b0f8c2016-12-14 15:06:58 -0800575 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
576 if (unlikely(!pmd_none(*vmf->pmd))) {
Michal Hocko6b31d592017-08-18 15:16:15 -0700577 goto unlock_release;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800578 } else {
579 pmd_t entry;
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -0700580
Michal Hocko6b31d592017-08-18 15:16:15 -0700581 ret = check_stable_address_space(vma->vm_mm);
582 if (ret)
583 goto unlock_release;
584
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -0700585 /* Deliver the page fault to userland */
586 if (userfaultfd_missing(vma)) {
587 int ret;
588
Jan Kara82b0f8c2016-12-14 15:06:58 -0800589 spin_unlock(vmf->ptl);
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -0800590 mem_cgroup_cancel_charge(page, memcg, true);
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -0700591 put_page(page);
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -0700592 pte_free(vma->vm_mm, pgtable);
Jan Kara82b0f8c2016-12-14 15:06:58 -0800593 ret = handle_userfault(vmf, VM_UFFD_MISSING);
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -0700594 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
595 return ret;
596 }
597
Kirill A. Shutemov31223592013-09-12 15:14:01 -0700598 entry = mk_huge_pmd(page, vma->vm_page_prot);
Linus Torvaldsf55e1012017-11-29 09:01:01 -0800599 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
Kirill A. Shutemovd281ee62016-01-15 16:52:16 -0800600 page_add_new_anon_rmap(page, vma, haddr, true);
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -0800601 mem_cgroup_commit_charge(page, memcg, false, true);
Johannes Weiner00501b52014-08-08 14:19:20 -0700602 lru_cache_add_active_or_unevictable(page, vma);
Jan Kara82b0f8c2016-12-14 15:06:58 -0800603 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
604 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -0700605 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
Kirill A. Shutemovc4812902017-11-15 17:35:37 -0800606 mm_inc_nr_ptes(vma->vm_mm);
Jan Kara82b0f8c2016-12-14 15:06:58 -0800607 spin_unlock(vmf->ptl);
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -0700608 count_vm_event(THP_FAULT_ALLOC);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800609 }
610
David Rientjesaa2e8782012-05-29 15:06:17 -0700611 return 0;
Michal Hocko6b31d592017-08-18 15:16:15 -0700612unlock_release:
613 spin_unlock(vmf->ptl);
614release:
615 if (pgtable)
616 pte_free(vma->vm_mm, pgtable);
617 mem_cgroup_cancel_charge(page, memcg, true);
618 put_page(page);
619 return ret;
620
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800621}
622
Mel Gorman444eb2a42016-03-17 14:19:23 -0700623/*
David Rientjes21440d72017-02-22 15:45:49 -0800624 * always: directly stall for all thp allocations
625 * defer: wake kswapd and fail if not immediately available
626 * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
627 * fail if not immediately available
628 * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
629 * available
630 * never: never stall for any thp allocation
Mel Gorman444eb2a42016-03-17 14:19:23 -0700631 */
632static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma)
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -0800633{
David Rientjes21440d72017-02-22 15:45:49 -0800634 const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE);
Mel Gorman444eb2a42016-03-17 14:19:23 -0700635
David Rientjes21440d72017-02-22 15:45:49 -0800636 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
Vlastimil Babka25160352016-07-28 15:49:25 -0700637 return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
David Rientjes21440d72017-02-22 15:45:49 -0800638 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
639 return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
640 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
641 return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
642 __GFP_KSWAPD_RECLAIM);
643 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
644 return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
645 0);
Vlastimil Babka25160352016-07-28 15:49:25 -0700646 return GFP_TRANSHUGE_LIGHT;
Mel Gorman444eb2a42016-03-17 14:19:23 -0700647}
648
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800649/* Caller must hold page table lock. */
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700650static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800651 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
Kirill A. Shutemov5918d102013-04-29 15:08:44 -0700652 struct page *zero_page)
Kirill A. Shutemovfc9fe822012-12-12 13:50:51 -0800653{
654 pmd_t entry;
Andrew Morton7c414162015-09-08 14:58:43 -0700655 if (!pmd_none(*pmd))
656 return false;
Kirill A. Shutemov5918d102013-04-29 15:08:44 -0700657 entry = mk_pmd(zero_page, vma->vm_page_prot);
Kirill A. Shutemovfc9fe822012-12-12 13:50:51 -0800658 entry = pmd_mkhuge(entry);
Matthew Wilcox12c9d702016-02-02 16:57:57 -0800659 if (pgtable)
660 pgtable_trans_huge_deposit(mm, pmd, pgtable);
Kirill A. Shutemovfc9fe822012-12-12 13:50:51 -0800661 set_pmd_at(mm, haddr, pmd, entry);
Kirill A. Shutemovc4812902017-11-15 17:35:37 -0800662 mm_inc_nr_ptes(mm);
Andrew Morton7c414162015-09-08 14:58:43 -0700663 return true;
Kirill A. Shutemovfc9fe822012-12-12 13:50:51 -0800664}
665
Jan Kara82b0f8c2016-12-14 15:06:58 -0800666int do_huge_pmd_anonymous_page(struct vm_fault *vmf)
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800667{
Jan Kara82b0f8c2016-12-14 15:06:58 -0800668 struct vm_area_struct *vma = vmf->vma;
Aneesh Kumar K.V077fcf12015-02-11 15:27:12 -0800669 gfp_t gfp;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800670 struct page *page;
Jan Kara82b0f8c2016-12-14 15:06:58 -0800671 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800672
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700673 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
Kirill A. Shutemovc0292552013-09-12 15:14:05 -0700674 return VM_FAULT_FALLBACK;
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700675 if (unlikely(anon_vma_prepare(vma)))
676 return VM_FAULT_OOM;
David Rientjes6d50e602014-10-29 14:50:31 -0700677 if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700678 return VM_FAULT_OOM;
Jan Kara82b0f8c2016-12-14 15:06:58 -0800679 if (!(vmf->flags & FAULT_FLAG_WRITE) &&
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -0700680 !mm_forbids_zeropage(vma->vm_mm) &&
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700681 transparent_hugepage_use_zero_page()) {
682 pgtable_t pgtable;
683 struct page *zero_page;
684 bool set;
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -0700685 int ret;
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -0700686 pgtable = pte_alloc_one(vma->vm_mm, haddr);
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700687 if (unlikely(!pgtable))
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800688 return VM_FAULT_OOM;
Aaron Lu6fcb52a2016-10-07 17:00:08 -0700689 zero_page = mm_get_huge_zero_page(vma->vm_mm);
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700690 if (unlikely(!zero_page)) {
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -0700691 pte_free(vma->vm_mm, pgtable);
Andi Kleen81ab4202011-04-14 15:22:06 -0700692 count_vm_event(THP_FAULT_FALLBACK);
Kirill A. Shutemovc0292552013-09-12 15:14:05 -0700693 return VM_FAULT_FALLBACK;
Andi Kleen81ab4202011-04-14 15:22:06 -0700694 }
Jan Kara82b0f8c2016-12-14 15:06:58 -0800695 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -0700696 ret = 0;
697 set = false;
Jan Kara82b0f8c2016-12-14 15:06:58 -0800698 if (pmd_none(*vmf->pmd)) {
Michal Hocko6b31d592017-08-18 15:16:15 -0700699 ret = check_stable_address_space(vma->vm_mm);
700 if (ret) {
701 spin_unlock(vmf->ptl);
702 } else if (userfaultfd_missing(vma)) {
Jan Kara82b0f8c2016-12-14 15:06:58 -0800703 spin_unlock(vmf->ptl);
704 ret = handle_userfault(vmf, VM_UFFD_MISSING);
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -0700705 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
706 } else {
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -0700707 set_huge_zero_page(pgtable, vma->vm_mm, vma,
Jan Kara82b0f8c2016-12-14 15:06:58 -0800708 haddr, vmf->pmd, zero_page);
709 spin_unlock(vmf->ptl);
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -0700710 set = true;
711 }
712 } else
Jan Kara82b0f8c2016-12-14 15:06:58 -0800713 spin_unlock(vmf->ptl);
Aaron Lu6fcb52a2016-10-07 17:00:08 -0700714 if (!set)
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -0700715 pte_free(vma->vm_mm, pgtable);
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -0700716 return ret;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800717 }
Mel Gorman444eb2a42016-03-17 14:19:23 -0700718 gfp = alloc_hugepage_direct_gfpmask(vma);
Aneesh Kumar K.V077fcf12015-02-11 15:27:12 -0800719 page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700720 if (unlikely(!page)) {
721 count_vm_event(THP_FAULT_FALLBACK);
Kirill A. Shutemovc0292552013-09-12 15:14:05 -0700722 return VM_FAULT_FALLBACK;
Kirill A. Shutemov128ec032013-09-12 15:14:03 -0700723 }
Kirill A. Shutemov9a982252016-01-15 16:54:17 -0800724 prep_transhuge_page(page);
Jan Kara82b0f8c2016-12-14 15:06:58 -0800725 return __do_huge_pmd_anonymous_page(vmf, page, gfp);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800726}
727
Matthew Wilcoxae18d6d2015-09-08 14:59:14 -0700728static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
Oliver O'Halloran3b6521f2017-05-08 15:59:43 -0700729 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
730 pgtable_t pgtable)
Matthew Wilcox5cad4652015-09-08 14:58:54 -0700731{
732 struct mm_struct *mm = vma->vm_mm;
733 pmd_t entry;
734 spinlock_t *ptl;
735
736 ptl = pmd_lock(mm, pmd);
Dan Williamsf25748e32016-01-15 16:56:43 -0800737 entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
738 if (pfn_t_devmap(pfn))
739 entry = pmd_mkdevmap(entry);
Ross Zwisler01871e52016-01-15 16:56:02 -0800740 if (write) {
Linus Torvaldsf55e1012017-11-29 09:01:01 -0800741 entry = pmd_mkyoung(pmd_mkdirty(entry));
742 entry = maybe_pmd_mkwrite(entry, vma);
Matthew Wilcox5cad4652015-09-08 14:58:54 -0700743 }
Oliver O'Halloran3b6521f2017-05-08 15:59:43 -0700744
745 if (pgtable) {
746 pgtable_trans_huge_deposit(mm, pmd, pgtable);
Kirill A. Shutemovc4812902017-11-15 17:35:37 -0800747 mm_inc_nr_ptes(mm);
Oliver O'Halloran3b6521f2017-05-08 15:59:43 -0700748 }
749
Ross Zwisler01871e52016-01-15 16:56:02 -0800750 set_pmd_at(mm, addr, pmd, entry);
751 update_mmu_cache_pmd(vma, addr, pmd);
Matthew Wilcox5cad4652015-09-08 14:58:54 -0700752 spin_unlock(ptl);
Matthew Wilcox5cad4652015-09-08 14:58:54 -0700753}
754
755int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
Dan Williamsf25748e32016-01-15 16:56:43 -0800756 pmd_t *pmd, pfn_t pfn, bool write)
Matthew Wilcox5cad4652015-09-08 14:58:54 -0700757{
758 pgprot_t pgprot = vma->vm_page_prot;
Oliver O'Halloran3b6521f2017-05-08 15:59:43 -0700759 pgtable_t pgtable = NULL;
Matthew Wilcox5cad4652015-09-08 14:58:54 -0700760 /*
761 * If we had pmd_special, we could avoid all these restrictions,
762 * but we need to be consistent with PTEs and architectures that
763 * can't support a 'special' bit.
764 */
Dave Jiange1fb4a02018-08-17 15:43:40 -0700765 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
766 !pfn_t_devmap(pfn));
Matthew Wilcox5cad4652015-09-08 14:58:54 -0700767 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
768 (VM_PFNMAP|VM_MIXEDMAP));
769 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
Matthew Wilcox5cad4652015-09-08 14:58:54 -0700770
771 if (addr < vma->vm_start || addr >= vma->vm_end)
772 return VM_FAULT_SIGBUS;
Borislav Petkov308a0472016-10-26 19:43:43 +0200773
Oliver O'Halloran3b6521f2017-05-08 15:59:43 -0700774 if (arch_needs_pgtable_deposit()) {
775 pgtable = pte_alloc_one(vma->vm_mm, addr);
776 if (!pgtable)
777 return VM_FAULT_OOM;
778 }
779
Borislav Petkov308a0472016-10-26 19:43:43 +0200780 track_pfn_insert(vma, &pgprot, pfn);
781
Oliver O'Halloran3b6521f2017-05-08 15:59:43 -0700782 insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write, pgtable);
Matthew Wilcoxae18d6d2015-09-08 14:59:14 -0700783 return VM_FAULT_NOPAGE;
Matthew Wilcox5cad4652015-09-08 14:58:54 -0700784}
Dan Williamsdee41072016-05-14 12:20:44 -0700785EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
Matthew Wilcox5cad4652015-09-08 14:58:54 -0700786
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800787#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
Linus Torvaldsf55e1012017-11-29 09:01:01 -0800788static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800789{
Linus Torvaldsf55e1012017-11-29 09:01:01 -0800790 if (likely(vma->vm_flags & VM_WRITE))
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800791 pud = pud_mkwrite(pud);
792 return pud;
793}
794
795static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
796 pud_t *pud, pfn_t pfn, pgprot_t prot, bool write)
797{
798 struct mm_struct *mm = vma->vm_mm;
799 pud_t entry;
800 spinlock_t *ptl;
801
802 ptl = pud_lock(mm, pud);
803 entry = pud_mkhuge(pfn_t_pud(pfn, prot));
804 if (pfn_t_devmap(pfn))
805 entry = pud_mkdevmap(entry);
806 if (write) {
Linus Torvaldsf55e1012017-11-29 09:01:01 -0800807 entry = pud_mkyoung(pud_mkdirty(entry));
808 entry = maybe_pud_mkwrite(entry, vma);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800809 }
810 set_pud_at(mm, addr, pud, entry);
811 update_mmu_cache_pud(vma, addr, pud);
812 spin_unlock(ptl);
813}
814
815int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
816 pud_t *pud, pfn_t pfn, bool write)
817{
818 pgprot_t pgprot = vma->vm_page_prot;
819 /*
820 * If we had pud_special, we could avoid all these restrictions,
821 * but we need to be consistent with PTEs and architectures that
822 * can't support a 'special' bit.
823 */
824 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
825 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
826 (VM_PFNMAP|VM_MIXEDMAP));
827 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
828 BUG_ON(!pfn_t_devmap(pfn));
829
830 if (addr < vma->vm_start || addr >= vma->vm_end)
831 return VM_FAULT_SIGBUS;
832
833 track_pfn_insert(vma, &pgprot, pfn);
834
835 insert_pfn_pud(vma, addr, pud, pfn, pgprot, write);
836 return VM_FAULT_NOPAGE;
837}
838EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
839#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
840
Dan Williams3565fce2016-01-15 16:56:55 -0800841static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
Kirill A. Shutemova8f97362017-11-27 06:21:25 +0300842 pmd_t *pmd, int flags)
Dan Williams3565fce2016-01-15 16:56:55 -0800843{
844 pmd_t _pmd;
845
Kirill A. Shutemova8f97362017-11-27 06:21:25 +0300846 _pmd = pmd_mkyoung(*pmd);
847 if (flags & FOLL_WRITE)
848 _pmd = pmd_mkdirty(_pmd);
Dan Williams3565fce2016-01-15 16:56:55 -0800849 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
Kirill A. Shutemova8f97362017-11-27 06:21:25 +0300850 pmd, _pmd, flags & FOLL_WRITE))
Dan Williams3565fce2016-01-15 16:56:55 -0800851 update_mmu_cache_pmd(vma, addr, pmd);
852}
853
854struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
855 pmd_t *pmd, int flags)
856{
857 unsigned long pfn = pmd_pfn(*pmd);
858 struct mm_struct *mm = vma->vm_mm;
859 struct dev_pagemap *pgmap;
860 struct page *page;
861
862 assert_spin_locked(pmd_lockptr(mm, pmd));
863
Keno Fischer8310d482017-01-24 15:17:48 -0800864 /*
865 * When we COW a devmap PMD entry, we split it into PTEs, so we should
866 * not be in this function with `flags & FOLL_COW` set.
867 */
868 WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
869
Linus Torvaldsf6f37322017-12-15 18:53:22 -0800870 if (flags & FOLL_WRITE && !pmd_write(*pmd))
Dan Williams3565fce2016-01-15 16:56:55 -0800871 return NULL;
872
873 if (pmd_present(*pmd) && pmd_devmap(*pmd))
874 /* pass */;
875 else
876 return NULL;
877
878 if (flags & FOLL_TOUCH)
Kirill A. Shutemova8f97362017-11-27 06:21:25 +0300879 touch_pmd(vma, addr, pmd, flags);
Dan Williams3565fce2016-01-15 16:56:55 -0800880
881 /*
882 * device mapped pages can only be returned if the
883 * caller will manage the page reference count.
884 */
885 if (!(flags & FOLL_GET))
886 return ERR_PTR(-EEXIST);
887
888 pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
889 pgmap = get_dev_pagemap(pfn, NULL);
890 if (!pgmap)
891 return ERR_PTR(-EFAULT);
892 page = pfn_to_page(pfn);
893 get_page(page);
894 put_dev_pagemap(pgmap);
895
896 return page;
897}
898
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800899int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
900 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
901 struct vm_area_struct *vma)
902{
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800903 spinlock_t *dst_ptl, *src_ptl;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800904 struct page *src_page;
905 pmd_t pmd;
Matthew Wilcox12c9d702016-02-02 16:57:57 -0800906 pgtable_t pgtable = NULL;
Kirill A. Shutemov628d47c2016-07-26 15:25:42 -0700907 int ret = -ENOMEM;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800908
Kirill A. Shutemov628d47c2016-07-26 15:25:42 -0700909 /* Skip if can be re-fill on fault */
910 if (!vma_is_anonymous(vma))
911 return 0;
912
913 pgtable = pte_alloc_one(dst_mm, addr);
914 if (unlikely(!pgtable))
915 goto out;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800916
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800917 dst_ptl = pmd_lock(dst_mm, dst_pmd);
918 src_ptl = pmd_lockptr(src_mm, src_pmd);
919 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800920
921 ret = -EAGAIN;
922 pmd = *src_pmd;
Zi Yan84c3fc42017-09-08 16:11:01 -0700923
924#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
925 if (unlikely(is_swap_pmd(pmd))) {
926 swp_entry_t entry = pmd_to_swp_entry(pmd);
927
928 VM_BUG_ON(!is_pmd_migration_entry(pmd));
929 if (is_write_migration_entry(entry)) {
930 make_migration_entry_read(&entry);
931 pmd = swp_entry_to_pmd(entry);
Naoya Horiguchiab6e3d02017-09-08 16:11:04 -0700932 if (pmd_swp_soft_dirty(*src_pmd))
933 pmd = pmd_swp_mksoft_dirty(pmd);
Zi Yan84c3fc42017-09-08 16:11:01 -0700934 set_pmd_at(src_mm, addr, src_pmd, pmd);
935 }
Zi Yandd8a67f2017-11-02 15:59:47 -0700936 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
Kirill A. Shutemovaf5b0f62017-11-15 17:35:40 -0800937 mm_inc_nr_ptes(dst_mm);
Zi Yandd8a67f2017-11-02 15:59:47 -0700938 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
Zi Yan84c3fc42017-09-08 16:11:01 -0700939 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
940 ret = 0;
941 goto out_unlock;
942 }
943#endif
944
Kirill A. Shutemov628d47c2016-07-26 15:25:42 -0700945 if (unlikely(!pmd_trans_huge(pmd))) {
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800946 pte_free(dst_mm, pgtable);
947 goto out_unlock;
948 }
Kirill A. Shutemovfc9fe822012-12-12 13:50:51 -0800949 /*
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800950 * When page table lock is held, the huge zero pmd should not be
Kirill A. Shutemovfc9fe822012-12-12 13:50:51 -0800951 * under splitting since we don't split the page itself, only pmd to
952 * a page table.
953 */
954 if (is_huge_zero_pmd(pmd)) {
Kirill A. Shutemov5918d102013-04-29 15:08:44 -0700955 struct page *zero_page;
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -0800956 /*
957 * get_huge_zero_page() will never allocate a new page here,
958 * since we already have a zero page to copy. It just takes a
959 * reference.
960 */
Aaron Lu6fcb52a2016-10-07 17:00:08 -0700961 zero_page = mm_get_huge_zero_page(dst_mm);
Andrea Arcangeli6b251fc2015-09-04 15:46:20 -0700962 set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
Kirill A. Shutemov5918d102013-04-29 15:08:44 -0700963 zero_page);
Kirill A. Shutemovfc9fe822012-12-12 13:50:51 -0800964 ret = 0;
965 goto out_unlock;
966 }
Mel Gormande466bd2013-12-18 17:08:42 -0800967
Kirill A. Shutemov628d47c2016-07-26 15:25:42 -0700968 src_page = pmd_page(pmd);
969 VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
970 get_page(src_page);
971 page_dup_rmap(src_page, true);
972 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
Kirill A. Shutemovc4812902017-11-15 17:35:37 -0800973 mm_inc_nr_ptes(dst_mm);
Kirill A. Shutemov628d47c2016-07-26 15:25:42 -0700974 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800975
976 pmdp_set_wrprotect(src_mm, addr, src_pmd);
977 pmd = pmd_mkold(pmd_wrprotect(pmd));
978 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800979
980 ret = 0;
981out_unlock:
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -0800982 spin_unlock(src_ptl);
983 spin_unlock(dst_ptl);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800984out:
985 return ret;
986}
987
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800988#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
989static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
Kirill A. Shutemova8f97362017-11-27 06:21:25 +0300990 pud_t *pud, int flags)
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800991{
992 pud_t _pud;
993
Kirill A. Shutemova8f97362017-11-27 06:21:25 +0300994 _pud = pud_mkyoung(*pud);
995 if (flags & FOLL_WRITE)
996 _pud = pud_mkdirty(_pud);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800997 if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
Kirill A. Shutemova8f97362017-11-27 06:21:25 +0300998 pud, _pud, flags & FOLL_WRITE))
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800999 update_mmu_cache_pud(vma, addr, pud);
1000}
1001
1002struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
1003 pud_t *pud, int flags)
1004{
1005 unsigned long pfn = pud_pfn(*pud);
1006 struct mm_struct *mm = vma->vm_mm;
1007 struct dev_pagemap *pgmap;
1008 struct page *page;
1009
1010 assert_spin_locked(pud_lockptr(mm, pud));
1011
Linus Torvaldsf6f37322017-12-15 18:53:22 -08001012 if (flags & FOLL_WRITE && !pud_write(*pud))
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001013 return NULL;
1014
1015 if (pud_present(*pud) && pud_devmap(*pud))
1016 /* pass */;
1017 else
1018 return NULL;
1019
1020 if (flags & FOLL_TOUCH)
Kirill A. Shutemova8f97362017-11-27 06:21:25 +03001021 touch_pud(vma, addr, pud, flags);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001022
1023 /*
1024 * device mapped pages can only be returned if the
1025 * caller will manage the page reference count.
1026 */
1027 if (!(flags & FOLL_GET))
1028 return ERR_PTR(-EEXIST);
1029
1030 pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
1031 pgmap = get_dev_pagemap(pfn, NULL);
1032 if (!pgmap)
1033 return ERR_PTR(-EFAULT);
1034 page = pfn_to_page(pfn);
1035 get_page(page);
1036 put_dev_pagemap(pgmap);
1037
1038 return page;
1039}
1040
1041int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1042 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1043 struct vm_area_struct *vma)
1044{
1045 spinlock_t *dst_ptl, *src_ptl;
1046 pud_t pud;
1047 int ret;
1048
1049 dst_ptl = pud_lock(dst_mm, dst_pud);
1050 src_ptl = pud_lockptr(src_mm, src_pud);
1051 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1052
1053 ret = -EAGAIN;
1054 pud = *src_pud;
1055 if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
1056 goto out_unlock;
1057
1058 /*
1059 * When page table lock is held, the huge zero pud should not be
1060 * under splitting since we don't split the page itself, only pud to
1061 * a page table.
1062 */
1063 if (is_huge_zero_pud(pud)) {
1064 /* No huge zero pud yet */
1065 }
1066
1067 pudp_set_wrprotect(src_mm, addr, src_pud);
1068 pud = pud_mkold(pud_wrprotect(pud));
1069 set_pud_at(dst_mm, addr, dst_pud, pud);
1070
1071 ret = 0;
1072out_unlock:
1073 spin_unlock(src_ptl);
1074 spin_unlock(dst_ptl);
1075 return ret;
1076}
1077
1078void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
1079{
1080 pud_t entry;
1081 unsigned long haddr;
1082 bool write = vmf->flags & FAULT_FLAG_WRITE;
1083
1084 vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
1085 if (unlikely(!pud_same(*vmf->pud, orig_pud)))
1086 goto unlock;
1087
1088 entry = pud_mkyoung(orig_pud);
1089 if (write)
1090 entry = pud_mkdirty(entry);
1091 haddr = vmf->address & HPAGE_PUD_MASK;
1092 if (pudp_set_access_flags(vmf->vma, haddr, vmf->pud, entry, write))
1093 update_mmu_cache_pud(vmf->vma, vmf->address, vmf->pud);
1094
1095unlock:
1096 spin_unlock(vmf->ptl);
1097}
1098#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1099
Jan Kara82b0f8c2016-12-14 15:06:58 -08001100void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd)
Will Deacona1dd4502012-12-11 16:01:27 -08001101{
1102 pmd_t entry;
1103 unsigned long haddr;
Minchan Kim20f664a2017-01-10 16:57:51 -08001104 bool write = vmf->flags & FAULT_FLAG_WRITE;
Will Deacona1dd4502012-12-11 16:01:27 -08001105
Jan Kara82b0f8c2016-12-14 15:06:58 -08001106 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1107 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
Will Deacona1dd4502012-12-11 16:01:27 -08001108 goto unlock;
1109
1110 entry = pmd_mkyoung(orig_pmd);
Minchan Kim20f664a2017-01-10 16:57:51 -08001111 if (write)
1112 entry = pmd_mkdirty(entry);
Jan Kara82b0f8c2016-12-14 15:06:58 -08001113 haddr = vmf->address & HPAGE_PMD_MASK;
Minchan Kim20f664a2017-01-10 16:57:51 -08001114 if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write))
Jan Kara82b0f8c2016-12-14 15:06:58 -08001115 update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd);
Will Deacona1dd4502012-12-11 16:01:27 -08001116
1117unlock:
Jan Kara82b0f8c2016-12-14 15:06:58 -08001118 spin_unlock(vmf->ptl);
Will Deacona1dd4502012-12-11 16:01:27 -08001119}
1120
Jan Kara82b0f8c2016-12-14 15:06:58 -08001121static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd,
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07001122 struct page *page)
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001123{
Jan Kara82b0f8c2016-12-14 15:06:58 -08001124 struct vm_area_struct *vma = vmf->vma;
1125 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
Johannes Weiner00501b52014-08-08 14:19:20 -07001126 struct mem_cgroup *memcg;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001127 pgtable_t pgtable;
1128 pmd_t _pmd;
1129 int ret = 0, i;
1130 struct page **pages;
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001131 unsigned long mmun_start; /* For mmu_notifiers */
1132 unsigned long mmun_end; /* For mmu_notifiers */
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001133
Kees Cook6da2ec52018-06-12 13:55:00 -07001134 pages = kmalloc_array(HPAGE_PMD_NR, sizeof(struct page *),
1135 GFP_KERNEL);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001136 if (unlikely(!pages)) {
1137 ret |= VM_FAULT_OOM;
1138 goto out;
1139 }
1140
1141 for (i = 0; i < HPAGE_PMD_NR; i++) {
Michal Hocko41b61672017-01-10 16:57:42 -08001142 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, vma,
Jan Kara82b0f8c2016-12-14 15:06:58 -08001143 vmf->address, page_to_nid(page));
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -08001144 if (unlikely(!pages[i] ||
Tejun Heo2cf85582018-07-03 11:14:56 -04001145 mem_cgroup_try_charge_delay(pages[i], vma->vm_mm,
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07001146 GFP_KERNEL, &memcg, false))) {
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -08001147 if (pages[i])
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001148 put_page(pages[i]);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -08001149 while (--i >= 0) {
Johannes Weiner00501b52014-08-08 14:19:20 -07001150 memcg = (void *)page_private(pages[i]);
1151 set_page_private(pages[i], 0);
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08001152 mem_cgroup_cancel_charge(pages[i], memcg,
1153 false);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -08001154 put_page(pages[i]);
1155 }
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001156 kfree(pages);
1157 ret |= VM_FAULT_OOM;
1158 goto out;
1159 }
Johannes Weiner00501b52014-08-08 14:19:20 -07001160 set_page_private(pages[i], (unsigned long)memcg);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001161 }
1162
1163 for (i = 0; i < HPAGE_PMD_NR; i++) {
1164 copy_user_highpage(pages[i], page + i,
Hillf Danton0089e482011-10-31 17:09:38 -07001165 haddr + PAGE_SIZE * i, vma);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001166 __SetPageUptodate(pages[i]);
1167 cond_resched();
1168 }
1169
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001170 mmun_start = haddr;
1171 mmun_end = haddr + HPAGE_PMD_SIZE;
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07001172 mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001173
Jan Kara82b0f8c2016-12-14 15:06:58 -08001174 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1175 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001176 goto out_free_pages;
Sasha Levin309381fea2014-01-23 15:52:54 -08001177 VM_BUG_ON_PAGE(!PageHead(page), page);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001178
Jérôme Glisse0f108512017-11-15 17:34:07 -08001179 /*
1180 * Leave pmd empty until pte is filled note we must notify here as
1181 * concurrent CPU thread might write to new page before the call to
1182 * mmu_notifier_invalidate_range_end() happens which can lead to a
1183 * device seeing memory write in different order than CPU.
1184 *
Mike Rapoportad56b732018-03-21 21:22:47 +02001185 * See Documentation/vm/mmu_notifier.rst
Jérôme Glisse0f108512017-11-15 17:34:07 -08001186 */
Jan Kara82b0f8c2016-12-14 15:06:58 -08001187 pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001188
Jan Kara82b0f8c2016-12-14 15:06:58 -08001189 pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, vmf->pmd);
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07001190 pmd_populate(vma->vm_mm, &_pmd, pgtable);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001191
1192 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07001193 pte_t entry;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001194 entry = mk_pte(pages[i], vma->vm_page_prot);
1195 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
Johannes Weiner00501b52014-08-08 14:19:20 -07001196 memcg = (void *)page_private(pages[i]);
1197 set_page_private(pages[i], 0);
Jan Kara82b0f8c2016-12-14 15:06:58 -08001198 page_add_new_anon_rmap(pages[i], vmf->vma, haddr, false);
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08001199 mem_cgroup_commit_charge(pages[i], memcg, false, false);
Johannes Weiner00501b52014-08-08 14:19:20 -07001200 lru_cache_add_active_or_unevictable(pages[i], vma);
Jan Kara82b0f8c2016-12-14 15:06:58 -08001201 vmf->pte = pte_offset_map(&_pmd, haddr);
1202 VM_BUG_ON(!pte_none(*vmf->pte));
1203 set_pte_at(vma->vm_mm, haddr, vmf->pte, entry);
1204 pte_unmap(vmf->pte);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001205 }
1206 kfree(pages);
1207
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001208 smp_wmb(); /* make pte visible before pmd */
Jan Kara82b0f8c2016-12-14 15:06:58 -08001209 pmd_populate(vma->vm_mm, vmf->pmd, pgtable);
Kirill A. Shutemovd281ee62016-01-15 16:52:16 -08001210 page_remove_rmap(page, true);
Jan Kara82b0f8c2016-12-14 15:06:58 -08001211 spin_unlock(vmf->ptl);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001212
Jérôme Glisse4645b9f2017-11-15 17:34:11 -08001213 /*
1214 * No need to double call mmu_notifier->invalidate_range() callback as
1215 * the above pmdp_huge_clear_flush_notify() did already call it.
1216 */
1217 mmu_notifier_invalidate_range_only_end(vma->vm_mm, mmun_start,
1218 mmun_end);
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001219
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001220 ret |= VM_FAULT_WRITE;
1221 put_page(page);
1222
1223out:
1224 return ret;
1225
1226out_free_pages:
Jan Kara82b0f8c2016-12-14 15:06:58 -08001227 spin_unlock(vmf->ptl);
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07001228 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -08001229 for (i = 0; i < HPAGE_PMD_NR; i++) {
Johannes Weiner00501b52014-08-08 14:19:20 -07001230 memcg = (void *)page_private(pages[i]);
1231 set_page_private(pages[i], 0);
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08001232 mem_cgroup_cancel_charge(pages[i], memcg, false);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001233 put_page(pages[i]);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -08001234 }
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001235 kfree(pages);
1236 goto out;
1237}
1238
Jan Kara82b0f8c2016-12-14 15:06:58 -08001239int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001240{
Jan Kara82b0f8c2016-12-14 15:06:58 -08001241 struct vm_area_struct *vma = vmf->vma;
Kirill A. Shutemov93b47962012-12-12 13:50:54 -08001242 struct page *page = NULL, *new_page;
Johannes Weiner00501b52014-08-08 14:19:20 -07001243 struct mem_cgroup *memcg;
Jan Kara82b0f8c2016-12-14 15:06:58 -08001244 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001245 unsigned long mmun_start; /* For mmu_notifiers */
1246 unsigned long mmun_end; /* For mmu_notifiers */
Michal Hocko3b363692015-04-15 16:13:29 -07001247 gfp_t huge_gfp; /* for allocation and charge */
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07001248 int ret = 0;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001249
Jan Kara82b0f8c2016-12-14 15:06:58 -08001250 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
Sasha Levin81d1b092014-10-09 15:28:10 -07001251 VM_BUG_ON_VMA(!vma->anon_vma, vma);
Kirill A. Shutemov93b47962012-12-12 13:50:54 -08001252 if (is_huge_zero_pmd(orig_pmd))
1253 goto alloc;
Jan Kara82b0f8c2016-12-14 15:06:58 -08001254 spin_lock(vmf->ptl);
1255 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001256 goto out_unlock;
1257
1258 page = pmd_page(orig_pmd);
Sasha Levin309381fea2014-01-23 15:52:54 -08001259 VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
Kirill A. Shutemov1f25fe22016-01-15 16:52:24 -08001260 /*
1261 * We can only reuse the page if nobody else maps the huge page or it's
Andrea Arcangeli6d0a07e2016-05-12 15:42:25 -07001262 * part.
Kirill A. Shutemov1f25fe22016-01-15 16:52:24 -08001263 */
Huang Yingba3c4ce2017-09-06 16:22:19 -07001264 if (!trylock_page(page)) {
1265 get_page(page);
1266 spin_unlock(vmf->ptl);
1267 lock_page(page);
1268 spin_lock(vmf->ptl);
1269 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1270 unlock_page(page);
1271 put_page(page);
1272 goto out_unlock;
1273 }
1274 put_page(page);
1275 }
1276 if (reuse_swap_page(page, NULL)) {
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001277 pmd_t entry;
1278 entry = pmd_mkyoung(orig_pmd);
Linus Torvaldsf55e1012017-11-29 09:01:01 -08001279 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
Jan Kara82b0f8c2016-12-14 15:06:58 -08001280 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1))
1281 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001282 ret |= VM_FAULT_WRITE;
Huang Yingba3c4ce2017-09-06 16:22:19 -07001283 unlock_page(page);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001284 goto out_unlock;
1285 }
Huang Yingba3c4ce2017-09-06 16:22:19 -07001286 unlock_page(page);
Kirill A. Shutemovddc58f22016-01-15 16:52:56 -08001287 get_page(page);
Jan Kara82b0f8c2016-12-14 15:06:58 -08001288 spin_unlock(vmf->ptl);
Kirill A. Shutemov93b47962012-12-12 13:50:54 -08001289alloc:
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001290 if (transparent_hugepage_enabled(vma) &&
Aneesh Kumar K.V077fcf12015-02-11 15:27:12 -08001291 !transparent_hugepage_debug_cow()) {
Mel Gorman444eb2a42016-03-17 14:19:23 -07001292 huge_gfp = alloc_hugepage_direct_gfpmask(vma);
Michal Hocko3b363692015-04-15 16:13:29 -07001293 new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER);
Aneesh Kumar K.V077fcf12015-02-11 15:27:12 -08001294 } else
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001295 new_page = NULL;
1296
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08001297 if (likely(new_page)) {
1298 prep_transhuge_page(new_page);
1299 } else {
Hugh Dickinseecc1e42014-01-12 01:25:21 -08001300 if (!page) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08001301 split_huge_pmd(vma, vmf->pmd, vmf->address);
Kirill A. Shutemove9b71ca2014-04-03 14:48:17 -07001302 ret |= VM_FAULT_FALLBACK;
Kirill A. Shutemov93b47962012-12-12 13:50:54 -08001303 } else {
Jan Kara82b0f8c2016-12-14 15:06:58 -08001304 ret = do_huge_pmd_wp_page_fallback(vmf, orig_pmd, page);
Kirill A. Shutemov9845cbb2014-02-25 15:01:42 -08001305 if (ret & VM_FAULT_OOM) {
Jan Kara82b0f8c2016-12-14 15:06:58 -08001306 split_huge_pmd(vma, vmf->pmd, vmf->address);
Kirill A. Shutemov9845cbb2014-02-25 15:01:42 -08001307 ret |= VM_FAULT_FALLBACK;
1308 }
Kirill A. Shutemovddc58f22016-01-15 16:52:56 -08001309 put_page(page);
Kirill A. Shutemov93b47962012-12-12 13:50:54 -08001310 }
David Rientjes17766dd2013-09-12 15:14:06 -07001311 count_vm_event(THP_FAULT_FALLBACK);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001312 goto out;
1313 }
1314
Tejun Heo2cf85582018-07-03 11:14:56 -04001315 if (unlikely(mem_cgroup_try_charge_delay(new_page, vma->vm_mm,
Michal Hocko2a70f6a2018-04-10 16:29:30 -07001316 huge_gfp, &memcg, true))) {
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -08001317 put_page(new_page);
Jan Kara82b0f8c2016-12-14 15:06:58 -08001318 split_huge_pmd(vma, vmf->pmd, vmf->address);
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07001319 if (page)
Kirill A. Shutemovddc58f22016-01-15 16:52:56 -08001320 put_page(page);
Kirill A. Shutemov9845cbb2014-02-25 15:01:42 -08001321 ret |= VM_FAULT_FALLBACK;
David Rientjes17766dd2013-09-12 15:14:06 -07001322 count_vm_event(THP_FAULT_FALLBACK);
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -08001323 goto out;
1324 }
1325
David Rientjes17766dd2013-09-12 15:14:06 -07001326 count_vm_event(THP_FAULT_ALLOC);
1327
Hugh Dickinseecc1e42014-01-12 01:25:21 -08001328 if (!page)
Huang Yingc79b57e2017-09-06 16:25:04 -07001329 clear_huge_page(new_page, vmf->address, HPAGE_PMD_NR);
Kirill A. Shutemov93b47962012-12-12 13:50:54 -08001330 else
Huang Yingc9f4cd72018-08-17 15:45:49 -07001331 copy_user_huge_page(new_page, page, vmf->address,
1332 vma, HPAGE_PMD_NR);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001333 __SetPageUptodate(new_page);
1334
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001335 mmun_start = haddr;
1336 mmun_end = haddr + HPAGE_PMD_SIZE;
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07001337 mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001338
Jan Kara82b0f8c2016-12-14 15:06:58 -08001339 spin_lock(vmf->ptl);
Kirill A. Shutemov93b47962012-12-12 13:50:54 -08001340 if (page)
Kirill A. Shutemovddc58f22016-01-15 16:52:56 -08001341 put_page(page);
Jan Kara82b0f8c2016-12-14 15:06:58 -08001342 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1343 spin_unlock(vmf->ptl);
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08001344 mem_cgroup_cancel_charge(new_page, memcg, true);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001345 put_page(new_page);
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001346 goto out_mn;
Andrea Arcangelib9bbfbe2011-01-13 15:46:57 -08001347 } else {
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001348 pmd_t entry;
Kirill A. Shutemov31223592013-09-12 15:14:01 -07001349 entry = mk_huge_pmd(new_page, vma->vm_page_prot);
Linus Torvaldsf55e1012017-11-29 09:01:01 -08001350 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
Jan Kara82b0f8c2016-12-14 15:06:58 -08001351 pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd);
Kirill A. Shutemovd281ee62016-01-15 16:52:16 -08001352 page_add_new_anon_rmap(new_page, vma, haddr, true);
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08001353 mem_cgroup_commit_charge(new_page, memcg, false, true);
Johannes Weiner00501b52014-08-08 14:19:20 -07001354 lru_cache_add_active_or_unevictable(new_page, vma);
Jan Kara82b0f8c2016-12-14 15:06:58 -08001355 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
1356 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
Hugh Dickinseecc1e42014-01-12 01:25:21 -08001357 if (!page) {
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07001358 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
Kirill A. Shutemov97ae1742012-12-12 13:51:06 -08001359 } else {
Sasha Levin309381fea2014-01-23 15:52:54 -08001360 VM_BUG_ON_PAGE(!PageHead(page), page);
Kirill A. Shutemovd281ee62016-01-15 16:52:16 -08001361 page_remove_rmap(page, true);
Kirill A. Shutemov93b47962012-12-12 13:50:54 -08001362 put_page(page);
1363 }
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001364 ret |= VM_FAULT_WRITE;
1365 }
Jan Kara82b0f8c2016-12-14 15:06:58 -08001366 spin_unlock(vmf->ptl);
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001367out_mn:
Jérôme Glisse4645b9f2017-11-15 17:34:11 -08001368 /*
1369 * No need to double call mmu_notifier->invalidate_range() callback as
1370 * the above pmdp_huge_clear_flush_notify() did already call it.
1371 */
1372 mmu_notifier_invalidate_range_only_end(vma->vm_mm, mmun_start,
1373 mmun_end);
Sagi Grimberg2ec74c32012-10-08 16:33:33 -07001374out:
1375 return ret;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001376out_unlock:
Jan Kara82b0f8c2016-12-14 15:06:58 -08001377 spin_unlock(vmf->ptl);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001378 return ret;
1379}
1380
Keno Fischer8310d482017-01-24 15:17:48 -08001381/*
1382 * FOLL_FORCE can write to even unwritable pmd's, but only
1383 * after we've gone through a COW cycle and they are dirty.
1384 */
1385static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
1386{
Linus Torvaldsf6f37322017-12-15 18:53:22 -08001387 return pmd_write(pmd) ||
Keno Fischer8310d482017-01-24 15:17:48 -08001388 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
1389}
1390
David Rientjesb676b292012-10-08 16:34:03 -07001391struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001392 unsigned long addr,
1393 pmd_t *pmd,
1394 unsigned int flags)
1395{
David Rientjesb676b292012-10-08 16:34:03 -07001396 struct mm_struct *mm = vma->vm_mm;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001397 struct page *page = NULL;
1398
Kirill A. Shutemovc4088eb2013-11-14 14:31:04 -08001399 assert_spin_locked(pmd_lockptr(mm, pmd));
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001400
Keno Fischer8310d482017-01-24 15:17:48 -08001401 if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001402 goto out;
1403
Kirill A. Shutemov85facf22013-02-04 14:28:42 -08001404 /* Avoid dumping huge zero page */
1405 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1406 return ERR_PTR(-EFAULT);
1407
Mel Gorman2b4847e2013-12-18 17:08:32 -08001408 /* Full NUMA hinting faults to serialise migration in fault paths */
Mel Gorman8a0516e2015-02-12 14:58:22 -08001409 if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
Mel Gorman2b4847e2013-12-18 17:08:32 -08001410 goto out;
1411
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001412 page = pmd_page(*pmd);
Dan Williamsca120cf2016-09-03 10:38:03 -07001413 VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
Dan Williams3565fce2016-01-15 16:56:55 -08001414 if (flags & FOLL_TOUCH)
Kirill A. Shutemova8f97362017-11-27 06:21:25 +03001415 touch_pmd(vma, addr, pmd, flags);
Eric B Munsonde60f5f2015-11-05 18:51:36 -08001416 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
Kirill A. Shutemove90309c2016-01-15 16:54:33 -08001417 /*
1418 * We don't mlock() pte-mapped THPs. This way we can avoid
1419 * leaking mlocked pages into non-VM_LOCKED VMAs.
1420 *
Kirill A. Shutemov9a73f612016-07-26 15:25:53 -07001421 * For anon THP:
1422 *
Kirill A. Shutemove90309c2016-01-15 16:54:33 -08001423 * In most cases the pmd is the only mapping of the page as we
1424 * break COW for the mlock() -- see gup_flags |= FOLL_WRITE for
1425 * writable private mappings in populate_vma_page_range().
1426 *
1427 * The only scenario when we have the page shared here is if we
1428 * mlocking read-only mapping shared over fork(). We skip
1429 * mlocking such pages.
Kirill A. Shutemov9a73f612016-07-26 15:25:53 -07001430 *
1431 * For file THP:
1432 *
1433 * We can expect PageDoubleMap() to be stable under page lock:
1434 * for file pages we set it in page_add_file_rmap(), which
1435 * requires page to be locked.
Kirill A. Shutemove90309c2016-01-15 16:54:33 -08001436 */
Kirill A. Shutemov9a73f612016-07-26 15:25:53 -07001437
1438 if (PageAnon(page) && compound_mapcount(page) != 1)
1439 goto skip_mlock;
1440 if (PageDoubleMap(page) || !page->mapping)
1441 goto skip_mlock;
1442 if (!trylock_page(page))
1443 goto skip_mlock;
1444 lru_add_drain();
1445 if (page->mapping && !PageDoubleMap(page))
1446 mlock_vma_page(page);
1447 unlock_page(page);
David Rientjesb676b292012-10-08 16:34:03 -07001448 }
Kirill A. Shutemov9a73f612016-07-26 15:25:53 -07001449skip_mlock:
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001450 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
Dan Williamsca120cf2016-09-03 10:38:03 -07001451 VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001452 if (flags & FOLL_GET)
Kirill A. Shutemovddc58f22016-01-15 16:52:56 -08001453 get_page(page);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001454
1455out:
1456 return page;
1457}
1458
Mel Gormand10e63f2012-10-25 14:16:31 +02001459/* NUMA hinting page fault entry point for trans huge pmds */
Jan Kara82b0f8c2016-12-14 15:06:58 -08001460int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
Mel Gormand10e63f2012-10-25 14:16:31 +02001461{
Jan Kara82b0f8c2016-12-14 15:06:58 -08001462 struct vm_area_struct *vma = vmf->vma;
Mel Gormanb8916632013-10-07 11:28:44 +01001463 struct anon_vma *anon_vma = NULL;
Mel Gormanb32967f2012-11-19 12:35:47 +00001464 struct page *page;
Jan Kara82b0f8c2016-12-14 15:06:58 -08001465 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
Mel Gorman8191acb2013-10-07 11:28:45 +01001466 int page_nid = -1, this_nid = numa_node_id();
Peter Zijlstra90572892013-10-07 11:29:20 +01001467 int target_nid, last_cpupid = -1;
Mel Gorman8191acb2013-10-07 11:28:45 +01001468 bool page_locked;
1469 bool migrated = false;
Mel Gormanb191f9b2015-03-25 15:55:40 -07001470 bool was_writable;
Peter Zijlstra6688cc02013-10-07 11:29:24 +01001471 int flags = 0;
Mel Gormand10e63f2012-10-25 14:16:31 +02001472
Jan Kara82b0f8c2016-12-14 15:06:58 -08001473 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1474 if (unlikely(!pmd_same(pmd, *vmf->pmd)))
Mel Gormand10e63f2012-10-25 14:16:31 +02001475 goto out_unlock;
1476
Mel Gormande466bd2013-12-18 17:08:42 -08001477 /*
1478 * If there are potential migrations, wait for completion and retry
1479 * without disrupting NUMA hinting information. Do not relock and
1480 * check_same as the page may no longer be mapped.
1481 */
Jan Kara82b0f8c2016-12-14 15:06:58 -08001482 if (unlikely(pmd_trans_migrating(*vmf->pmd))) {
1483 page = pmd_page(*vmf->pmd);
Mark Rutland3c226c62017-06-16 14:02:34 -07001484 if (!get_page_unless_zero(page))
1485 goto out_unlock;
Jan Kara82b0f8c2016-12-14 15:06:58 -08001486 spin_unlock(vmf->ptl);
Mel Gorman5d833062015-02-12 14:58:16 -08001487 wait_on_page_locked(page);
Mark Rutland3c226c62017-06-16 14:02:34 -07001488 put_page(page);
Mel Gormande466bd2013-12-18 17:08:42 -08001489 goto out;
1490 }
1491
Mel Gormand10e63f2012-10-25 14:16:31 +02001492 page = pmd_page(pmd);
Mel Gormana1a46182013-10-07 11:28:50 +01001493 BUG_ON(is_huge_zero_page(page));
Mel Gorman8191acb2013-10-07 11:28:45 +01001494 page_nid = page_to_nid(page);
Peter Zijlstra90572892013-10-07 11:29:20 +01001495 last_cpupid = page_cpupid_last(page);
Mel Gorman03c5a6e2012-11-02 14:52:48 +00001496 count_vm_numa_event(NUMA_HINT_FAULTS);
Rik van Riel04bb2f92013-10-07 11:29:36 +01001497 if (page_nid == this_nid) {
Mel Gorman03c5a6e2012-11-02 14:52:48 +00001498 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
Rik van Riel04bb2f92013-10-07 11:29:36 +01001499 flags |= TNF_FAULT_LOCAL;
1500 }
Mel Gorman4daae3b2012-11-02 11:33:45 +00001501
Mel Gormanbea66fb2015-03-25 15:55:37 -07001502 /* See similar comment in do_numa_page for explanation */
Aneesh Kumar K.V288bc542017-02-24 14:59:16 -08001503 if (!pmd_savedwrite(pmd))
Peter Zijlstra6688cc02013-10-07 11:29:24 +01001504 flags |= TNF_NO_GROUP;
1505
1506 /*
Mel Gormanff9042b2013-10-07 11:28:43 +01001507 * Acquire the page lock to serialise THP migrations but avoid dropping
1508 * page_table_lock if at all possible
1509 */
Mel Gormanb8916632013-10-07 11:28:44 +01001510 page_locked = trylock_page(page);
1511 target_nid = mpol_misplaced(page, vma, haddr);
1512 if (target_nid == -1) {
1513 /* If the page was locked, there are no parallel migrations */
Mel Gormana54a4072013-10-07 11:28:46 +01001514 if (page_locked)
Mel Gormanb8916632013-10-07 11:28:44 +01001515 goto clear_pmdnuma;
Mel Gorman2b4847e2013-12-18 17:08:32 -08001516 }
Mel Gorman4daae3b2012-11-02 11:33:45 +00001517
Mel Gormande466bd2013-12-18 17:08:42 -08001518 /* Migration could have started since the pmd_trans_migrating check */
Mel Gorman2b4847e2013-12-18 17:08:32 -08001519 if (!page_locked) {
Mark Rutland3c226c62017-06-16 14:02:34 -07001520 page_nid = -1;
1521 if (!get_page_unless_zero(page))
1522 goto out_unlock;
Jan Kara82b0f8c2016-12-14 15:06:58 -08001523 spin_unlock(vmf->ptl);
Mel Gormanb8916632013-10-07 11:28:44 +01001524 wait_on_page_locked(page);
Mark Rutland3c226c62017-06-16 14:02:34 -07001525 put_page(page);
Mel Gormanb8916632013-10-07 11:28:44 +01001526 goto out;
1527 }
1528
Mel Gorman2b4847e2013-12-18 17:08:32 -08001529 /*
1530 * Page is misplaced. Page lock serialises migrations. Acquire anon_vma
1531 * to serialises splits
1532 */
Mel Gormanb8916632013-10-07 11:28:44 +01001533 get_page(page);
Jan Kara82b0f8c2016-12-14 15:06:58 -08001534 spin_unlock(vmf->ptl);
Mel Gormanb8916632013-10-07 11:28:44 +01001535 anon_vma = page_lock_anon_vma_read(page);
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001536
Peter Zijlstrac69307d2013-10-07 11:28:41 +01001537 /* Confirm the PMD did not change while page_table_lock was released */
Jan Kara82b0f8c2016-12-14 15:06:58 -08001538 spin_lock(vmf->ptl);
1539 if (unlikely(!pmd_same(pmd, *vmf->pmd))) {
Mel Gormanb32967f2012-11-19 12:35:47 +00001540 unlock_page(page);
1541 put_page(page);
Mel Gormana54a4072013-10-07 11:28:46 +01001542 page_nid = -1;
Mel Gormanb32967f2012-11-19 12:35:47 +00001543 goto out_unlock;
1544 }
Mel Gormanff9042b2013-10-07 11:28:43 +01001545
Mel Gormanc3a489c2013-12-18 17:08:38 -08001546 /* Bail if we fail to protect against THP splits for any reason */
1547 if (unlikely(!anon_vma)) {
1548 put_page(page);
1549 page_nid = -1;
1550 goto clear_pmdnuma;
1551 }
1552
Mel Gormana54a4072013-10-07 11:28:46 +01001553 /*
Peter Zijlstra8b1b4362017-06-07 18:05:07 +02001554 * Since we took the NUMA fault, we must have observed the !accessible
1555 * bit. Make sure all other CPUs agree with that, to avoid them
1556 * modifying the page we're about to migrate.
1557 *
1558 * Must be done under PTL such that we'll observe the relevant
Peter Zijlstraccde85b2017-08-11 14:29:01 +02001559 * inc_tlb_flush_pending().
1560 *
1561 * We are not sure a pending tlb flush here is for a huge page
1562 * mapping or not. Hence use the tlb range variant
Peter Zijlstra8b1b4362017-06-07 18:05:07 +02001563 */
1564 if (mm_tlb_flush_pending(vma->vm_mm))
Peter Zijlstraccde85b2017-08-11 14:29:01 +02001565 flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
Peter Zijlstra8b1b4362017-06-07 18:05:07 +02001566
1567 /*
Mel Gormana54a4072013-10-07 11:28:46 +01001568 * Migrate the THP to the requested node, returns with page unlocked
Mel Gorman8a0516e2015-02-12 14:58:22 -08001569 * and access rights restored.
Mel Gormana54a4072013-10-07 11:28:46 +01001570 */
Jan Kara82b0f8c2016-12-14 15:06:58 -08001571 spin_unlock(vmf->ptl);
Peter Zijlstra8b1b4362017-06-07 18:05:07 +02001572
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07001573 migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma,
Jan Kara82b0f8c2016-12-14 15:06:58 -08001574 vmf->pmd, pmd, vmf->address, page, target_nid);
Peter Zijlstra6688cc02013-10-07 11:29:24 +01001575 if (migrated) {
1576 flags |= TNF_MIGRATED;
Mel Gorman8191acb2013-10-07 11:28:45 +01001577 page_nid = target_nid;
Mel Gorman074c2382015-03-25 15:55:42 -07001578 } else
1579 flags |= TNF_MIGRATE_FAIL;
Mel Gormanb32967f2012-11-19 12:35:47 +00001580
Mel Gorman8191acb2013-10-07 11:28:45 +01001581 goto out;
Mel Gorman4daae3b2012-11-02 11:33:45 +00001582clear_pmdnuma:
Mel Gormana54a4072013-10-07 11:28:46 +01001583 BUG_ON(!PageLocked(page));
Aneesh Kumar K.V288bc542017-02-24 14:59:16 -08001584 was_writable = pmd_savedwrite(pmd);
Mel Gorman4d942462015-02-12 14:58:28 -08001585 pmd = pmd_modify(pmd, vma->vm_page_prot);
Mel Gormanb7b04002015-03-25 15:55:45 -07001586 pmd = pmd_mkyoung(pmd);
Mel Gormanb191f9b2015-03-25 15:55:40 -07001587 if (was_writable)
1588 pmd = pmd_mkwrite(pmd);
Jan Kara82b0f8c2016-12-14 15:06:58 -08001589 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
1590 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
Mel Gormana54a4072013-10-07 11:28:46 +01001591 unlock_page(page);
Mel Gormand10e63f2012-10-25 14:16:31 +02001592out_unlock:
Jan Kara82b0f8c2016-12-14 15:06:58 -08001593 spin_unlock(vmf->ptl);
Mel Gormanb8916632013-10-07 11:28:44 +01001594
1595out:
1596 if (anon_vma)
1597 page_unlock_anon_vma_read(anon_vma);
1598
Mel Gorman8191acb2013-10-07 11:28:45 +01001599 if (page_nid != -1)
Jan Kara82b0f8c2016-12-14 15:06:58 -08001600 task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR,
Aneesh Kumar K.V9a8b3002017-02-24 14:59:56 -08001601 flags);
Mel Gorman8191acb2013-10-07 11:28:45 +01001602
Mel Gormand10e63f2012-10-25 14:16:31 +02001603 return 0;
1604}
1605
Huang Ying319904a2016-07-28 15:48:03 -07001606/*
1607 * Return true if we do MADV_FREE successfully on entire pmd page.
1608 * Otherwise, return false.
1609 */
1610bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
Minchan Kimb8d3c4c2016-01-15 16:55:42 -08001611 pmd_t *pmd, unsigned long addr, unsigned long next)
Minchan Kimb8d3c4c2016-01-15 16:55:42 -08001612{
1613 spinlock_t *ptl;
1614 pmd_t orig_pmd;
1615 struct page *page;
1616 struct mm_struct *mm = tlb->mm;
Huang Ying319904a2016-07-28 15:48:03 -07001617 bool ret = false;
Minchan Kimb8d3c4c2016-01-15 16:55:42 -08001618
Aneesh Kumar K.V07e32662016-12-12 16:42:40 -08001619 tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE);
1620
Kirill A. Shutemovb6ec57f2016-01-21 16:40:25 -08001621 ptl = pmd_trans_huge_lock(pmd, vma);
1622 if (!ptl)
Linus Torvalds25eedab2016-01-17 18:33:15 -08001623 goto out_unlocked;
Minchan Kimb8d3c4c2016-01-15 16:55:42 -08001624
1625 orig_pmd = *pmd;
Huang Ying319904a2016-07-28 15:48:03 -07001626 if (is_huge_zero_pmd(orig_pmd))
Minchan Kimb8d3c4c2016-01-15 16:55:42 -08001627 goto out;
Minchan Kimb8d3c4c2016-01-15 16:55:42 -08001628
Zi Yan84c3fc42017-09-08 16:11:01 -07001629 if (unlikely(!pmd_present(orig_pmd))) {
1630 VM_BUG_ON(thp_migration_supported() &&
1631 !is_pmd_migration_entry(orig_pmd));
1632 goto out;
1633 }
1634
Minchan Kimb8d3c4c2016-01-15 16:55:42 -08001635 page = pmd_page(orig_pmd);
1636 /*
1637 * If other processes are mapping this page, we couldn't discard
1638 * the page unless they all do MADV_FREE so let's skip the page.
1639 */
1640 if (page_mapcount(page) != 1)
1641 goto out;
1642
1643 if (!trylock_page(page))
1644 goto out;
1645
1646 /*
1647 * If user want to discard part-pages of THP, split it so MADV_FREE
1648 * will deactivate only them.
1649 */
1650 if (next - addr != HPAGE_PMD_SIZE) {
1651 get_page(page);
1652 spin_unlock(ptl);
Huang Ying9818b8c2016-07-14 12:07:12 -07001653 split_huge_page(page);
Minchan Kimb8d3c4c2016-01-15 16:55:42 -08001654 unlock_page(page);
Kirill A. Shutemovbbf29ff2017-07-06 15:35:28 -07001655 put_page(page);
Minchan Kimb8d3c4c2016-01-15 16:55:42 -08001656 goto out_unlocked;
1657 }
1658
1659 if (PageDirty(page))
1660 ClearPageDirty(page);
1661 unlock_page(page);
1662
Minchan Kimb8d3c4c2016-01-15 16:55:42 -08001663 if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
Kirill A. Shutemov58ceeb62017-04-13 14:56:26 -07001664 pmdp_invalidate(vma, addr, pmd);
Minchan Kimb8d3c4c2016-01-15 16:55:42 -08001665 orig_pmd = pmd_mkold(orig_pmd);
1666 orig_pmd = pmd_mkclean(orig_pmd);
1667
1668 set_pmd_at(mm, addr, pmd, orig_pmd);
1669 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1670 }
Shaohua Li802a3a92017-05-03 14:52:32 -07001671
1672 mark_page_lazyfree(page);
Huang Ying319904a2016-07-28 15:48:03 -07001673 ret = true;
Minchan Kimb8d3c4c2016-01-15 16:55:42 -08001674out:
1675 spin_unlock(ptl);
1676out_unlocked:
1677 return ret;
1678}
1679
Aneesh Kumar K.V953c66c2016-12-12 16:44:32 -08001680static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
1681{
1682 pgtable_t pgtable;
1683
1684 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1685 pte_free(mm, pgtable);
Kirill A. Shutemovc4812902017-11-15 17:35:37 -08001686 mm_dec_nr_ptes(mm);
Aneesh Kumar K.V953c66c2016-12-12 16:44:32 -08001687}
1688
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001689int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
Shaohua Lif21760b2012-01-12 17:19:16 -08001690 pmd_t *pmd, unsigned long addr)
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001691{
Kirill A. Shutemovda146762015-09-08 14:59:31 -07001692 pmd_t orig_pmd;
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001693 spinlock_t *ptl;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001694
Aneesh Kumar K.V07e32662016-12-12 16:42:40 -08001695 tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE);
1696
Kirill A. Shutemovb6ec57f2016-01-21 16:40:25 -08001697 ptl = __pmd_trans_huge_lock(pmd, vma);
1698 if (!ptl)
Kirill A. Shutemovda146762015-09-08 14:59:31 -07001699 return 0;
1700 /*
1701 * For architectures like ppc64 we look at deposited pgtable
1702 * when calling pmdp_huge_get_and_clear. So do the
1703 * pgtable_trans_huge_withdraw after finishing pmdp related
1704 * operations.
1705 */
1706 orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
1707 tlb->fullmm);
1708 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1709 if (vma_is_dax(vma)) {
Oliver O'Halloran3b6521f2017-05-08 15:59:43 -07001710 if (arch_needs_pgtable_deposit())
1711 zap_deposited_table(tlb->mm, pmd);
Kirill A. Shutemovda146762015-09-08 14:59:31 -07001712 spin_unlock(ptl);
1713 if (is_huge_zero_pmd(orig_pmd))
Aneesh Kumar K.Vc0f2e172016-12-12 16:42:31 -08001714 tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE);
Kirill A. Shutemovda146762015-09-08 14:59:31 -07001715 } else if (is_huge_zero_pmd(orig_pmd)) {
Oliver O'Halloranc14a6eb2017-05-08 15:59:40 -07001716 zap_deposited_table(tlb->mm, pmd);
Kirill A. Shutemovda146762015-09-08 14:59:31 -07001717 spin_unlock(ptl);
Aneesh Kumar K.Vc0f2e172016-12-12 16:42:31 -08001718 tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE);
Kirill A. Shutemovda146762015-09-08 14:59:31 -07001719 } else {
Zi Yan616b8372017-09-08 16:10:57 -07001720 struct page *page = NULL;
1721 int flush_needed = 1;
1722
1723 if (pmd_present(orig_pmd)) {
1724 page = pmd_page(orig_pmd);
1725 page_remove_rmap(page, true);
1726 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
1727 VM_BUG_ON_PAGE(!PageHead(page), page);
1728 } else if (thp_migration_supported()) {
1729 swp_entry_t entry;
1730
1731 VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
1732 entry = pmd_to_swp_entry(orig_pmd);
1733 page = pfn_to_page(swp_offset(entry));
1734 flush_needed = 0;
1735 } else
1736 WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
1737
Kirill A. Shutemovb5072382016-07-26 15:25:34 -07001738 if (PageAnon(page)) {
Oliver O'Halloranc14a6eb2017-05-08 15:59:40 -07001739 zap_deposited_table(tlb->mm, pmd);
Kirill A. Shutemovb5072382016-07-26 15:25:34 -07001740 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1741 } else {
Aneesh Kumar K.V953c66c2016-12-12 16:44:32 -08001742 if (arch_needs_pgtable_deposit())
1743 zap_deposited_table(tlb->mm, pmd);
Yang Shifadae292018-08-17 15:44:55 -07001744 add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR);
Kirill A. Shutemovb5072382016-07-26 15:25:34 -07001745 }
Zi Yan616b8372017-09-08 16:10:57 -07001746
Kirill A. Shutemovda146762015-09-08 14:59:31 -07001747 spin_unlock(ptl);
Zi Yan616b8372017-09-08 16:10:57 -07001748 if (flush_needed)
1749 tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE);
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001750 }
Kirill A. Shutemovda146762015-09-08 14:59:31 -07001751 return 1;
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001752}
1753
Aneesh Kumar K.V1dd38b62016-12-12 16:44:29 -08001754#ifndef pmd_move_must_withdraw
1755static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
1756 spinlock_t *old_pmd_ptl,
1757 struct vm_area_struct *vma)
1758{
1759 /*
1760 * With split pmd lock we also need to move preallocated
1761 * PTE page table if new_pmd is on different PMD page table.
1762 *
1763 * We also don't deposit and withdraw tables for file pages.
1764 */
1765 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
1766}
1767#endif
1768
Naoya Horiguchiab6e3d02017-09-08 16:11:04 -07001769static pmd_t move_soft_dirty_pmd(pmd_t pmd)
1770{
1771#ifdef CONFIG_MEM_SOFT_DIRTY
1772 if (unlikely(is_pmd_migration_entry(pmd)))
1773 pmd = pmd_swp_mksoft_dirty(pmd);
1774 else if (pmd_present(pmd))
1775 pmd = pmd_mksoft_dirty(pmd);
1776#endif
1777 return pmd;
1778}
1779
Hugh Dickinsbf8616d2016-05-19 17:12:54 -07001780bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
Andrea Arcangeli37a1c492011-10-31 17:08:30 -07001781 unsigned long new_addr, unsigned long old_end,
Aaron Lu5d190422016-11-10 17:16:33 +08001782 pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush)
Andrea Arcangeli37a1c492011-10-31 17:08:30 -07001783{
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001784 spinlock_t *old_ptl, *new_ptl;
Andrea Arcangeli37a1c492011-10-31 17:08:30 -07001785 pmd_t pmd;
Andrea Arcangeli37a1c492011-10-31 17:08:30 -07001786 struct mm_struct *mm = vma->vm_mm;
Aaron Lu5d190422016-11-10 17:16:33 +08001787 bool force_flush = false;
Andrea Arcangeli37a1c492011-10-31 17:08:30 -07001788
1789 if ((old_addr & ~HPAGE_PMD_MASK) ||
1790 (new_addr & ~HPAGE_PMD_MASK) ||
Hugh Dickinsbf8616d2016-05-19 17:12:54 -07001791 old_end - old_addr < HPAGE_PMD_SIZE)
Kirill A. Shutemov4b471e82016-01-15 16:53:39 -08001792 return false;
Andrea Arcangeli37a1c492011-10-31 17:08:30 -07001793
1794 /*
1795 * The destination pmd shouldn't be established, free_pgtables()
1796 * should have release it.
1797 */
1798 if (WARN_ON(!pmd_none(*new_pmd))) {
1799 VM_BUG_ON(pmd_trans_huge(*new_pmd));
Kirill A. Shutemov4b471e82016-01-15 16:53:39 -08001800 return false;
Andrea Arcangeli37a1c492011-10-31 17:08:30 -07001801 }
1802
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001803 /*
1804 * We don't have to worry about the ordering of src and dst
1805 * ptlocks because exclusive mmap_sem prevents deadlock.
1806 */
Kirill A. Shutemovb6ec57f2016-01-21 16:40:25 -08001807 old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
1808 if (old_ptl) {
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001809 new_ptl = pmd_lockptr(mm, new_pmd);
1810 if (new_ptl != old_ptl)
1811 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -07001812 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
Aaron Lua2ce2662016-11-29 13:27:31 +08001813 if (pmd_present(pmd) && pmd_dirty(pmd))
1814 force_flush = true;
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001815 VM_BUG_ON(!pmd_none(*new_pmd));
Kirill A. Shutemov35928062013-12-12 17:12:33 -08001816
Aneesh Kumar K.V1dd38b62016-12-12 16:44:29 -08001817 if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) {
Aneesh Kumar K.Vb3084f42014-01-13 11:34:24 +05301818 pgtable_t pgtable;
Kirill A. Shutemov35928062013-12-12 17:12:33 -08001819 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
1820 pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
Kirill A. Shutemov35928062013-12-12 17:12:33 -08001821 }
Naoya Horiguchiab6e3d02017-09-08 16:11:04 -07001822 pmd = move_soft_dirty_pmd(pmd);
1823 set_pmd_at(mm, new_addr, new_pmd, pmd);
Aneesh Kumar K.Vb3084f42014-01-13 11:34:24 +05301824 if (new_ptl != old_ptl)
1825 spin_unlock(new_ptl);
Aaron Lu5d190422016-11-10 17:16:33 +08001826 if (force_flush)
1827 flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
1828 else
1829 *need_flush = true;
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001830 spin_unlock(old_ptl);
Kirill A. Shutemov4b471e82016-01-15 16:53:39 -08001831 return true;
Andrea Arcangeli37a1c492011-10-31 17:08:30 -07001832 }
Kirill A. Shutemov4b471e82016-01-15 16:53:39 -08001833 return false;
Andrea Arcangeli37a1c492011-10-31 17:08:30 -07001834}
1835
Mel Gormanf123d742013-10-07 11:28:49 +01001836/*
1837 * Returns
1838 * - 0 if PMD could not be locked
1839 * - 1 if PMD was locked but protections unchange and TLB flush unnecessary
1840 * - HPAGE_PMD_NR is protections changed and TLB flush necessary
1841 */
Johannes Weinercd7548a2011-01-13 15:47:04 -08001842int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
Mel Gormane944fd62015-02-12 14:58:35 -08001843 unsigned long addr, pgprot_t newprot, int prot_numa)
Johannes Weinercd7548a2011-01-13 15:47:04 -08001844{
1845 struct mm_struct *mm = vma->vm_mm;
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001846 spinlock_t *ptl;
Kirill A. Shutemov0a85e51d2017-04-13 14:56:17 -07001847 pmd_t entry;
1848 bool preserve_write;
1849 int ret;
Johannes Weinercd7548a2011-01-13 15:47:04 -08001850
Kirill A. Shutemovb6ec57f2016-01-21 16:40:25 -08001851 ptl = __pmd_trans_huge_lock(pmd, vma);
Kirill A. Shutemov0a85e51d2017-04-13 14:56:17 -07001852 if (!ptl)
1853 return 0;
Mel Gormane944fd62015-02-12 14:58:35 -08001854
Kirill A. Shutemov0a85e51d2017-04-13 14:56:17 -07001855 preserve_write = prot_numa && pmd_write(*pmd);
1856 ret = 1;
Mel Gormane944fd62015-02-12 14:58:35 -08001857
Zi Yan84c3fc42017-09-08 16:11:01 -07001858#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1859 if (is_swap_pmd(*pmd)) {
1860 swp_entry_t entry = pmd_to_swp_entry(*pmd);
1861
1862 VM_BUG_ON(!is_pmd_migration_entry(*pmd));
1863 if (is_write_migration_entry(entry)) {
1864 pmd_t newpmd;
1865 /*
1866 * A protection check is difficult so
1867 * just be safe and disable write
1868 */
1869 make_migration_entry_read(&entry);
1870 newpmd = swp_entry_to_pmd(entry);
Naoya Horiguchiab6e3d02017-09-08 16:11:04 -07001871 if (pmd_swp_soft_dirty(*pmd))
1872 newpmd = pmd_swp_mksoft_dirty(newpmd);
Zi Yan84c3fc42017-09-08 16:11:01 -07001873 set_pmd_at(mm, addr, pmd, newpmd);
1874 }
1875 goto unlock;
1876 }
1877#endif
1878
Kirill A. Shutemov0a85e51d2017-04-13 14:56:17 -07001879 /*
1880 * Avoid trapping faults against the zero page. The read-only
1881 * data is likely to be read-cached on the local CPU and
1882 * local/remote hits to the zero page are not interesting.
1883 */
1884 if (prot_numa && is_huge_zero_pmd(*pmd))
1885 goto unlock;
Johannes Weinercd7548a2011-01-13 15:47:04 -08001886
Kirill A. Shutemov0a85e51d2017-04-13 14:56:17 -07001887 if (prot_numa && pmd_protnone(*pmd))
1888 goto unlock;
1889
Kirill A. Shutemovced10802017-04-13 14:56:20 -07001890 /*
1891 * In case prot_numa, we are under down_read(mmap_sem). It's critical
1892 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
1893 * which is also under down_read(mmap_sem):
1894 *
1895 * CPU0: CPU1:
1896 * change_huge_pmd(prot_numa=1)
1897 * pmdp_huge_get_and_clear_notify()
1898 * madvise_dontneed()
1899 * zap_pmd_range()
1900 * pmd_trans_huge(*pmd) == 0 (without ptl)
1901 * // skip the pmd
1902 * set_pmd_at();
1903 * // pmd is re-established
1904 *
1905 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
1906 * which may break userspace.
1907 *
1908 * pmdp_invalidate() is required to make sure we don't miss
1909 * dirty/young flags set by hardware.
1910 */
Kirill A. Shutemova3cf9882018-01-31 16:18:20 -08001911 entry = pmdp_invalidate(vma, addr, pmd);
Kirill A. Shutemovced10802017-04-13 14:56:20 -07001912
Kirill A. Shutemov0a85e51d2017-04-13 14:56:17 -07001913 entry = pmd_modify(entry, newprot);
1914 if (preserve_write)
1915 entry = pmd_mk_savedwrite(entry);
1916 ret = HPAGE_PMD_NR;
1917 set_pmd_at(mm, addr, pmd, entry);
1918 BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry));
1919unlock:
1920 spin_unlock(ptl);
Johannes Weinercd7548a2011-01-13 15:47:04 -08001921 return ret;
1922}
1923
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001924/*
Huang Ying8f19b0c2016-07-26 15:27:04 -07001925 * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001926 *
Huang Ying8f19b0c2016-07-26 15:27:04 -07001927 * Note that if it returns page table lock pointer, this routine returns without
1928 * unlocking page table lock. So callers must unlock it.
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001929 */
Kirill A. Shutemovb6ec57f2016-01-21 16:40:25 -08001930spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001931{
Kirill A. Shutemovb6ec57f2016-01-21 16:40:25 -08001932 spinlock_t *ptl;
1933 ptl = pmd_lock(vma->vm_mm, pmd);
Zi Yan84c3fc42017-09-08 16:11:01 -07001934 if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) ||
1935 pmd_devmap(*pmd)))
Kirill A. Shutemovb6ec57f2016-01-21 16:40:25 -08001936 return ptl;
1937 spin_unlock(ptl);
1938 return NULL;
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001939}
1940
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001941/*
1942 * Returns true if a given pud maps a thp, false otherwise.
1943 *
1944 * Note that if it returns true, this routine returns without unlocking page
1945 * table lock. So callers must unlock it.
1946 */
1947spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
1948{
1949 spinlock_t *ptl;
1950
1951 ptl = pud_lock(vma->vm_mm, pud);
1952 if (likely(pud_trans_huge(*pud) || pud_devmap(*pud)))
1953 return ptl;
1954 spin_unlock(ptl);
1955 return NULL;
1956}
1957
1958#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1959int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
1960 pud_t *pud, unsigned long addr)
1961{
1962 pud_t orig_pud;
1963 spinlock_t *ptl;
1964
1965 ptl = __pud_trans_huge_lock(pud, vma);
1966 if (!ptl)
1967 return 0;
1968 /*
1969 * For architectures like ppc64 we look at deposited pgtable
1970 * when calling pudp_huge_get_and_clear. So do the
1971 * pgtable_trans_huge_withdraw after finishing pudp related
1972 * operations.
1973 */
1974 orig_pud = pudp_huge_get_and_clear_full(tlb->mm, addr, pud,
1975 tlb->fullmm);
1976 tlb_remove_pud_tlb_entry(tlb, pud, addr);
1977 if (vma_is_dax(vma)) {
1978 spin_unlock(ptl);
1979 /* No zero page support yet */
1980 } else {
1981 /* No support for anonymous PUD pages yet */
1982 BUG();
1983 }
1984 return 1;
1985}
1986
1987static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
1988 unsigned long haddr)
1989{
1990 VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
1991 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
1992 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
1993 VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
1994
Yisheng Xiece9311c2017-03-09 16:17:00 -08001995 count_vm_event(THP_SPLIT_PUD);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08001996
1997 pudp_huge_clear_flush_notify(vma, haddr, pud);
1998}
1999
2000void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
2001 unsigned long address)
2002{
2003 spinlock_t *ptl;
2004 struct mm_struct *mm = vma->vm_mm;
2005 unsigned long haddr = address & HPAGE_PUD_MASK;
2006
2007 mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PUD_SIZE);
2008 ptl = pud_lock(mm, pud);
2009 if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
2010 goto out;
2011 __split_huge_pud_locked(vma, pud, haddr);
2012
2013out:
2014 spin_unlock(ptl);
Jérôme Glisse4645b9f2017-11-15 17:34:11 -08002015 /*
2016 * No need to double call mmu_notifier->invalidate_range() callback as
2017 * the above pudp_huge_clear_flush_notify() did already call it.
2018 */
2019 mmu_notifier_invalidate_range_only_end(mm, haddr, haddr +
2020 HPAGE_PUD_SIZE);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -08002021}
2022#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
2023
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002024static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2025 unsigned long haddr, pmd_t *pmd)
2026{
2027 struct mm_struct *mm = vma->vm_mm;
2028 pgtable_t pgtable;
2029 pmd_t _pmd;
2030 int i;
2031
Jérôme Glisse0f108512017-11-15 17:34:07 -08002032 /*
2033 * Leave pmd empty until pte is filled note that it is fine to delay
2034 * notification until mmu_notifier_invalidate_range_end() as we are
2035 * replacing a zero pmd write protected page with a zero pte write
2036 * protected page.
2037 *
Mike Rapoportad56b732018-03-21 21:22:47 +02002038 * See Documentation/vm/mmu_notifier.rst
Jérôme Glisse0f108512017-11-15 17:34:07 -08002039 */
2040 pmdp_huge_clear_flush(vma, haddr, pmd);
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002041
2042 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2043 pmd_populate(mm, &_pmd, pgtable);
2044
2045 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
2046 pte_t *pte, entry;
2047 entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
2048 entry = pte_mkspecial(entry);
2049 pte = pte_offset_map(&_pmd, haddr);
2050 VM_BUG_ON(!pte_none(*pte));
2051 set_pte_at(mm, haddr, pte, entry);
2052 pte_unmap(pte);
2053 }
2054 smp_wmb(); /* make pte visible before pmd */
2055 pmd_populate(mm, pmd, pgtable);
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002056}
2057
2058static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
Kirill A. Shutemovba988282016-01-15 16:53:56 -08002059 unsigned long haddr, bool freeze)
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002060{
2061 struct mm_struct *mm = vma->vm_mm;
2062 struct page *page;
2063 pgtable_t pgtable;
Aneesh Kumar K.V423ac9a2018-01-31 16:18:24 -08002064 pmd_t old_pmd, _pmd;
Kirill A. Shutemova3cf9882018-01-31 16:18:20 -08002065 bool young, write, soft_dirty, pmd_migration = false;
Kirill A. Shutemov2ac015e2016-02-24 18:58:03 +03002066 unsigned long addr;
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002067 int i;
2068
2069 VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
2070 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2071 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
Zi Yan84c3fc42017-09-08 16:11:01 -07002072 VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
2073 && !pmd_devmap(*pmd));
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002074
2075 count_vm_event(THP_SPLIT_PMD);
2076
Kirill A. Shutemovd21b9e52016-07-26 15:25:37 -07002077 if (!vma_is_anonymous(vma)) {
2078 _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
Aneesh Kumar K.V953c66c2016-12-12 16:44:32 -08002079 /*
2080 * We are going to unmap this huge page. So
2081 * just go ahead and zap it
2082 */
2083 if (arch_needs_pgtable_deposit())
2084 zap_deposited_table(mm, pmd);
Kirill A. Shutemovd21b9e52016-07-26 15:25:37 -07002085 if (vma_is_dax(vma))
2086 return;
2087 page = pmd_page(_pmd);
Hugh Dickinse1f1b152018-07-20 17:53:45 -07002088 if (!PageDirty(page) && pmd_dirty(_pmd))
2089 set_page_dirty(page);
Kirill A. Shutemovd21b9e52016-07-26 15:25:37 -07002090 if (!PageReferenced(page) && pmd_young(_pmd))
2091 SetPageReferenced(page);
2092 page_remove_rmap(page, true);
2093 put_page(page);
Yang Shifadae292018-08-17 15:44:55 -07002094 add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002095 return;
2096 } else if (is_huge_zero_pmd(*pmd)) {
Jérôme Glisse4645b9f2017-11-15 17:34:11 -08002097 /*
2098 * FIXME: Do we want to invalidate secondary mmu by calling
2099 * mmu_notifier_invalidate_range() see comments below inside
2100 * __split_huge_pmd() ?
2101 *
2102 * We are going from a zero huge page write protected to zero
2103 * small page also write protected so it does not seems useful
2104 * to invalidate secondary mmu at this time.
2105 */
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002106 return __split_huge_zero_page_pmd(vma, haddr, pmd);
2107 }
2108
Aneesh Kumar K.V423ac9a2018-01-31 16:18:24 -08002109 /*
2110 * Up to this point the pmd is present and huge and userland has the
2111 * whole access to the hugepage during the split (which happens in
2112 * place). If we overwrite the pmd with the not-huge version pointing
2113 * to the pte here (which of course we could if all CPUs were bug
2114 * free), userland could trigger a small page size TLB miss on the
2115 * small sized TLB while the hugepage TLB entry is still established in
2116 * the huge TLB. Some CPU doesn't like that.
2117 * See http://support.amd.com/us/Processor_TechDocs/41322.pdf, Erratum
2118 * 383 on page 93. Intel should be safe but is also warns that it's
2119 * only safe if the permission and cache attributes of the two entries
2120 * loaded in the two TLB is identical (which should be the case here).
2121 * But it is generally safer to never allow small and huge TLB entries
2122 * for the same virtual address to be loaded simultaneously. So instead
2123 * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
2124 * current pmd notpresent (atomically because here the pmd_trans_huge
2125 * must remain set at all times on the pmd until the split is complete
2126 * for this pmd), then we flush the SMP TLB and finally we write the
2127 * non-huge version of the pmd entry with pmd_populate.
2128 */
2129 old_pmd = pmdp_invalidate(vma, haddr, pmd);
2130
Zi Yan84c3fc42017-09-08 16:11:01 -07002131#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
Aneesh Kumar K.V423ac9a2018-01-31 16:18:24 -08002132 pmd_migration = is_pmd_migration_entry(old_pmd);
Zi Yan84c3fc42017-09-08 16:11:01 -07002133 if (pmd_migration) {
2134 swp_entry_t entry;
2135
Aneesh Kumar K.V423ac9a2018-01-31 16:18:24 -08002136 entry = pmd_to_swp_entry(old_pmd);
Zi Yan84c3fc42017-09-08 16:11:01 -07002137 page = pfn_to_page(swp_offset(entry));
2138 } else
2139#endif
Aneesh Kumar K.V423ac9a2018-01-31 16:18:24 -08002140 page = pmd_page(old_pmd);
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002141 VM_BUG_ON_PAGE(!page_count(page), page);
Joonsoo Kimfe896d12016-03-17 14:19:26 -07002142 page_ref_add(page, HPAGE_PMD_NR - 1);
Aneesh Kumar K.V423ac9a2018-01-31 16:18:24 -08002143 if (pmd_dirty(old_pmd))
2144 SetPageDirty(page);
2145 write = pmd_write(old_pmd);
2146 young = pmd_young(old_pmd);
2147 soft_dirty = pmd_soft_dirty(old_pmd);
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002148
Aneesh Kumar K.V423ac9a2018-01-31 16:18:24 -08002149 /*
2150 * Withdraw the table only after we mark the pmd entry invalid.
2151 * This's critical for some architectures (Power).
2152 */
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002153 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2154 pmd_populate(mm, &_pmd, pgtable);
2155
Kirill A. Shutemov2ac015e2016-02-24 18:58:03 +03002156 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002157 pte_t entry, *pte;
2158 /*
2159 * Note that NUMA hinting access restrictions are not
2160 * transferred to avoid any possibility of altering
2161 * permissions across VMAs.
2162 */
Zi Yan84c3fc42017-09-08 16:11:01 -07002163 if (freeze || pmd_migration) {
Kirill A. Shutemovba988282016-01-15 16:53:56 -08002164 swp_entry_t swp_entry;
2165 swp_entry = make_migration_entry(page + i, write);
2166 entry = swp_entry_to_pte(swp_entry);
Andrea Arcangeli804dd152016-08-25 15:16:57 -07002167 if (soft_dirty)
2168 entry = pte_swp_mksoft_dirty(entry);
Kirill A. Shutemovba988282016-01-15 16:53:56 -08002169 } else {
Andrea Arcangeli6d2329f2016-10-07 17:01:22 -07002170 entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
Minchan Kimb8d3c4c2016-01-15 16:55:42 -08002171 entry = maybe_mkwrite(entry, vma);
Kirill A. Shutemovba988282016-01-15 16:53:56 -08002172 if (!write)
2173 entry = pte_wrprotect(entry);
2174 if (!young)
2175 entry = pte_mkold(entry);
Andrea Arcangeli804dd152016-08-25 15:16:57 -07002176 if (soft_dirty)
2177 entry = pte_mksoft_dirty(entry);
Kirill A. Shutemovba988282016-01-15 16:53:56 -08002178 }
Kirill A. Shutemov2ac015e2016-02-24 18:58:03 +03002179 pte = pte_offset_map(&_pmd, addr);
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002180 BUG_ON(!pte_none(*pte));
Kirill A. Shutemov2ac015e2016-02-24 18:58:03 +03002181 set_pte_at(mm, addr, pte, entry);
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002182 atomic_inc(&page[i]._mapcount);
2183 pte_unmap(pte);
2184 }
2185
2186 /*
2187 * Set PG_double_map before dropping compound_mapcount to avoid
2188 * false-negative page_mapped().
2189 */
2190 if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) {
2191 for (i = 0; i < HPAGE_PMD_NR; i++)
2192 atomic_inc(&page[i]._mapcount);
2193 }
2194
2195 if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
2196 /* Last compound_mapcount is gone. */
Mel Gorman11fb9982016-07-28 15:46:20 -07002197 __dec_node_page_state(page, NR_ANON_THPS);
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002198 if (TestClearPageDoubleMap(page)) {
2199 /* No need in mapcount reference anymore */
2200 for (i = 0; i < HPAGE_PMD_NR; i++)
2201 atomic_dec(&page[i]._mapcount);
2202 }
2203 }
2204
2205 smp_wmb(); /* make pte visible before pmd */
2206 pmd_populate(mm, pmd, pgtable);
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002207
2208 if (freeze) {
Kirill A. Shutemov2ac015e2016-02-24 18:58:03 +03002209 for (i = 0; i < HPAGE_PMD_NR; i++) {
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002210 page_remove_rmap(page + i, false);
2211 put_page(page + i);
2212 }
2213 }
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002214}
2215
2216void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
Naoya Horiguchi33f47512016-07-14 12:07:32 -07002217 unsigned long address, bool freeze, struct page *page)
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002218{
2219 spinlock_t *ptl;
2220 struct mm_struct *mm = vma->vm_mm;
2221 unsigned long haddr = address & HPAGE_PMD_MASK;
2222
2223 mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE);
2224 ptl = pmd_lock(mm, pmd);
Naoya Horiguchi33f47512016-07-14 12:07:32 -07002225
2226 /*
2227 * If caller asks to setup a migration entries, we need a page to check
2228 * pmd against. Otherwise we can end up replacing wrong page.
2229 */
2230 VM_BUG_ON(freeze && !page);
2231 if (page && page != pmd_page(*pmd))
2232 goto out;
2233
Dan Williams5c7fb562016-01-15 16:56:52 -08002234 if (pmd_trans_huge(*pmd)) {
Naoya Horiguchi33f47512016-07-14 12:07:32 -07002235 page = pmd_page(*pmd);
Dan Williams5c7fb562016-01-15 16:56:52 -08002236 if (PageMlocked(page))
Kirill A. Shutemov5f737712016-03-17 14:20:13 -07002237 clear_page_mlock(page);
Zi Yan84c3fc42017-09-08 16:11:01 -07002238 } else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd)))
Kirill A. Shutemove90309c2016-01-15 16:54:33 -08002239 goto out;
Kirill A. Shutemovfec89c12016-03-17 14:20:10 -07002240 __split_huge_pmd_locked(vma, pmd, haddr, freeze);
Kirill A. Shutemove90309c2016-01-15 16:54:33 -08002241out:
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002242 spin_unlock(ptl);
Jérôme Glisse4645b9f2017-11-15 17:34:11 -08002243 /*
2244 * No need to double call mmu_notifier->invalidate_range() callback.
2245 * They are 3 cases to consider inside __split_huge_pmd_locked():
2246 * 1) pmdp_huge_clear_flush_notify() call invalidate_range() obvious
2247 * 2) __split_huge_zero_page_pmd() read only zero page and any write
2248 * fault will trigger a flush_notify before pointing to a new page
2249 * (it is fine if the secondary mmu keeps pointing to the old zero
2250 * page in the meantime)
2251 * 3) Split a huge pmd into pte pointing to the same page. No need
2252 * to invalidate secondary tlb entry they are all still valid.
2253 * any further changes to individual pte will notify. So no need
2254 * to call mmu_notifier->invalidate_range()
2255 */
2256 mmu_notifier_invalidate_range_only_end(mm, haddr, haddr +
2257 HPAGE_PMD_SIZE);
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -08002258}
2259
Kirill A. Shutemovfec89c12016-03-17 14:20:10 -07002260void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
2261 bool freeze, struct page *page)
Andrea Arcangeli94fcc582011-01-13 15:47:08 -08002262{
Hugh Dickinsf72e7dc2014-06-23 13:22:05 -07002263 pgd_t *pgd;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002264 p4d_t *p4d;
Hugh Dickinsf72e7dc2014-06-23 13:22:05 -07002265 pud_t *pud;
Andrea Arcangeli94fcc582011-01-13 15:47:08 -08002266 pmd_t *pmd;
2267
Kirill A. Shutemov78ddc532016-01-15 16:52:42 -08002268 pgd = pgd_offset(vma->vm_mm, address);
Hugh Dickinsf72e7dc2014-06-23 13:22:05 -07002269 if (!pgd_present(*pgd))
2270 return;
2271
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +03002272 p4d = p4d_offset(pgd, address);
2273 if (!p4d_present(*p4d))
2274 return;
2275
2276 pud = pud_offset(p4d, address);
Hugh Dickinsf72e7dc2014-06-23 13:22:05 -07002277 if (!pud_present(*pud))
2278 return;
2279
2280 pmd = pmd_offset(pud, address);
Kirill A. Shutemovfec89c12016-03-17 14:20:10 -07002281
Naoya Horiguchi33f47512016-07-14 12:07:32 -07002282 __split_huge_pmd(vma, pmd, address, freeze, page);
Andrea Arcangeli94fcc582011-01-13 15:47:08 -08002283}
2284
Kirill A. Shutemove1b99962015-09-08 14:58:37 -07002285void vma_adjust_trans_huge(struct vm_area_struct *vma,
Andrea Arcangeli94fcc582011-01-13 15:47:08 -08002286 unsigned long start,
2287 unsigned long end,
2288 long adjust_next)
2289{
2290 /*
2291 * If the new start address isn't hpage aligned and it could
2292 * previously contain an hugepage: check if we need to split
2293 * an huge pmd.
2294 */
2295 if (start & ~HPAGE_PMD_MASK &&
2296 (start & HPAGE_PMD_MASK) >= vma->vm_start &&
2297 (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
Kirill A. Shutemovfec89c12016-03-17 14:20:10 -07002298 split_huge_pmd_address(vma, start, false, NULL);
Andrea Arcangeli94fcc582011-01-13 15:47:08 -08002299
2300 /*
2301 * If the new end address isn't hpage aligned and it could
2302 * previously contain an hugepage: check if we need to split
2303 * an huge pmd.
2304 */
2305 if (end & ~HPAGE_PMD_MASK &&
2306 (end & HPAGE_PMD_MASK) >= vma->vm_start &&
2307 (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
Kirill A. Shutemovfec89c12016-03-17 14:20:10 -07002308 split_huge_pmd_address(vma, end, false, NULL);
Andrea Arcangeli94fcc582011-01-13 15:47:08 -08002309
2310 /*
2311 * If we're also updating the vma->vm_next->vm_start, if the new
2312 * vm_next->vm_start isn't page aligned and it could previously
2313 * contain an hugepage: check if we need to split an huge pmd.
2314 */
2315 if (adjust_next > 0) {
2316 struct vm_area_struct *next = vma->vm_next;
2317 unsigned long nstart = next->vm_start;
2318 nstart += adjust_next << PAGE_SHIFT;
2319 if (nstart & ~HPAGE_PMD_MASK &&
2320 (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
2321 (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
Kirill A. Shutemovfec89c12016-03-17 14:20:10 -07002322 split_huge_pmd_address(next, nstart, false, NULL);
Andrea Arcangeli94fcc582011-01-13 15:47:08 -08002323 }
2324}
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002325
Kirill A. Shutemovfec89c12016-03-17 14:20:10 -07002326static void freeze_page(struct page *page)
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002327{
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002328 enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS |
Kirill A. Shutemovc7ab0d22017-02-24 14:58:01 -08002329 TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
Minchan Kim666e5a42017-05-03 14:54:20 -07002330 bool unmap_success;
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002331
2332 VM_BUG_ON_PAGE(!PageHead(page), page);
2333
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002334 if (PageAnon(page))
Naoya Horiguchib5ff8162017-09-08 16:10:49 -07002335 ttu_flags |= TTU_SPLIT_FREEZE;
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002336
Minchan Kim666e5a42017-05-03 14:54:20 -07002337 unmap_success = try_to_unmap(page, ttu_flags);
2338 VM_BUG_ON_PAGE(!unmap_success, page);
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002339}
2340
Kirill A. Shutemovfec89c12016-03-17 14:20:10 -07002341static void unfreeze_page(struct page *page)
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002342{
Kirill A. Shutemovfec89c12016-03-17 14:20:10 -07002343 int i;
Kirill A. Shutemovace71a12017-02-24 14:57:45 -08002344 if (PageTransHuge(page)) {
2345 remove_migration_ptes(page, page, true);
2346 } else {
2347 for (i = 0; i < HPAGE_PMD_NR; i++)
2348 remove_migration_ptes(page + i, page + i, true);
2349 }
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002350}
2351
Kirill A. Shutemov8df651c2016-03-15 14:57:30 -07002352static void __split_huge_page_tail(struct page *head, int tail,
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002353 struct lruvec *lruvec, struct list_head *list)
2354{
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002355 struct page *page_tail = head + tail;
2356
Kirill A. Shutemov8df651c2016-03-15 14:57:30 -07002357 VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002358
2359 /*
Konstantin Khlebnikov605ca5e2018-04-05 16:23:28 -07002360 * Clone page flags before unfreezing refcount.
2361 *
2362 * After successful get_page_unless_zero() might follow flags change,
2363 * for exmaple lock_page() which set PG_waiters.
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002364 */
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002365 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
2366 page_tail->flags |= (head->flags &
2367 ((1L << PG_referenced) |
2368 (1L << PG_swapbacked) |
Huang Ying38d8b4e2017-07-06 15:37:18 -07002369 (1L << PG_swapcache) |
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002370 (1L << PG_mlocked) |
2371 (1L << PG_uptodate) |
2372 (1L << PG_active) |
2373 (1L << PG_locked) |
Minchan Kimb8d3c4c2016-01-15 16:55:42 -08002374 (1L << PG_unevictable) |
2375 (1L << PG_dirty)));
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002376
Konstantin Khlebnikov605ca5e2018-04-05 16:23:28 -07002377 /* Page flags must be visible before we make the page non-compound. */
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002378 smp_wmb();
2379
Konstantin Khlebnikov605ca5e2018-04-05 16:23:28 -07002380 /*
2381 * Clear PageTail before unfreezing page refcount.
2382 *
2383 * After successful get_page_unless_zero() might follow put_page()
2384 * which needs correct compound_head().
2385 */
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002386 clear_compound_head(page_tail);
2387
Konstantin Khlebnikov605ca5e2018-04-05 16:23:28 -07002388 /* Finally unfreeze refcount. Additional reference from page cache. */
2389 page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) ||
2390 PageSwapCache(head)));
2391
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002392 if (page_is_young(head))
2393 set_page_young(page_tail);
2394 if (page_is_idle(head))
2395 set_page_idle(page_tail);
2396
2397 /* ->mapping in first tail page is compound_mapcount */
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002398 VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002399 page_tail);
2400 page_tail->mapping = head->mapping;
2401
2402 page_tail->index = head->index + tail;
2403 page_cpupid_xchg_last(page_tail, page_cpupid_last(head));
Michal Hocko94723aa2018-04-10 16:30:07 -07002404
2405 /*
2406 * always add to the tail because some iterators expect new
2407 * pages to show after the currently processed elements - e.g.
2408 * migrate_pages
2409 */
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002410 lru_add_page_tail(head, page_tail, lruvec, list);
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002411}
2412
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002413static void __split_huge_page(struct page *page, struct list_head *list,
2414 unsigned long flags)
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002415{
2416 struct page *head = compound_head(page);
2417 struct zone *zone = page_zone(head);
2418 struct lruvec *lruvec;
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002419 pgoff_t end = -1;
Kirill A. Shutemov8df651c2016-03-15 14:57:30 -07002420 int i;
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002421
Mel Gorman599d0c92016-07-28 15:45:31 -07002422 lruvec = mem_cgroup_page_lruvec(head, zone->zone_pgdat);
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002423
2424 /* complete memcg works before add pages to LRU */
2425 mem_cgroup_split_huge_fixup(head);
2426
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002427 if (!PageAnon(page))
2428 end = DIV_ROUND_UP(i_size_read(head->mapping->host), PAGE_SIZE);
2429
2430 for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
Kirill A. Shutemov8df651c2016-03-15 14:57:30 -07002431 __split_huge_page_tail(head, i, lruvec, list);
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002432 /* Some pages can be beyond i_size: drop them from page cache */
2433 if (head[i].index >= end) {
Hugh Dickins2d077d42018-06-01 16:50:45 -07002434 ClearPageDirty(head + i);
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002435 __delete_from_page_cache(head + i, NULL);
Kirill A. Shutemov800d8c62016-07-26 15:26:18 -07002436 if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head))
2437 shmem_uncharge(head->mapping->host, 1);
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002438 put_page(head + i);
2439 }
2440 }
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002441
2442 ClearPageCompound(head);
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002443 /* See comment in __split_huge_page_tail() */
2444 if (PageAnon(head)) {
Huang Ying38d8b4e2017-07-06 15:37:18 -07002445 /* Additional pin to radix tree of swap cache */
2446 if (PageSwapCache(head))
2447 page_ref_add(head, 2);
2448 else
2449 page_ref_inc(head);
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002450 } else {
2451 /* Additional pin to radix tree */
2452 page_ref_add(head, 2);
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07002453 xa_unlock(&head->mapping->i_pages);
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002454 }
2455
Mel Gormana52633d2016-07-28 15:45:28 -07002456 spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002457
Kirill A. Shutemovfec89c12016-03-17 14:20:10 -07002458 unfreeze_page(head);
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002459
2460 for (i = 0; i < HPAGE_PMD_NR; i++) {
2461 struct page *subpage = head + i;
2462 if (subpage == page)
2463 continue;
2464 unlock_page(subpage);
2465
2466 /*
2467 * Subpages may be freed if there wasn't any mapping
2468 * like if add_to_swap() is running on a lru page that
2469 * had its mapping zapped. And freeing these pages
2470 * requires taking the lru_lock so we do the put_page
2471 * of the tail pages after the split is complete.
2472 */
2473 put_page(subpage);
2474 }
2475}
2476
Kirill A. Shutemovb20ce5e2016-01-15 16:54:37 -08002477int total_mapcount(struct page *page)
2478{
Kirill A. Shutemovdd78fed2016-07-26 15:25:26 -07002479 int i, compound, ret;
Kirill A. Shutemovb20ce5e2016-01-15 16:54:37 -08002480
2481 VM_BUG_ON_PAGE(PageTail(page), page);
2482
2483 if (likely(!PageCompound(page)))
2484 return atomic_read(&page->_mapcount) + 1;
2485
Kirill A. Shutemovdd78fed2016-07-26 15:25:26 -07002486 compound = compound_mapcount(page);
Kirill A. Shutemovb20ce5e2016-01-15 16:54:37 -08002487 if (PageHuge(page))
Kirill A. Shutemovdd78fed2016-07-26 15:25:26 -07002488 return compound;
2489 ret = compound;
Kirill A. Shutemovb20ce5e2016-01-15 16:54:37 -08002490 for (i = 0; i < HPAGE_PMD_NR; i++)
2491 ret += atomic_read(&page[i]._mapcount) + 1;
Kirill A. Shutemovdd78fed2016-07-26 15:25:26 -07002492 /* File pages has compound_mapcount included in _mapcount */
2493 if (!PageAnon(page))
2494 return ret - compound * HPAGE_PMD_NR;
Kirill A. Shutemovb20ce5e2016-01-15 16:54:37 -08002495 if (PageDoubleMap(page))
2496 ret -= HPAGE_PMD_NR;
2497 return ret;
2498}
2499
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002500/*
Andrea Arcangeli6d0a07e2016-05-12 15:42:25 -07002501 * This calculates accurately how many mappings a transparent hugepage
2502 * has (unlike page_mapcount() which isn't fully accurate). This full
2503 * accuracy is primarily needed to know if copy-on-write faults can
2504 * reuse the page and change the mapping to read-write instead of
2505 * copying them. At the same time this returns the total_mapcount too.
2506 *
2507 * The function returns the highest mapcount any one of the subpages
2508 * has. If the return value is one, even if different processes are
2509 * mapping different subpages of the transparent hugepage, they can
2510 * all reuse it, because each process is reusing a different subpage.
2511 *
2512 * The total_mapcount is instead counting all virtual mappings of the
2513 * subpages. If the total_mapcount is equal to "one", it tells the
2514 * caller all mappings belong to the same "mm" and in turn the
2515 * anon_vma of the transparent hugepage can become the vma->anon_vma
2516 * local one as no other process may be mapping any of the subpages.
2517 *
2518 * It would be more accurate to replace page_mapcount() with
2519 * page_trans_huge_mapcount(), however we only use
2520 * page_trans_huge_mapcount() in the copy-on-write faults where we
2521 * need full accuracy to avoid breaking page pinning, because
2522 * page_trans_huge_mapcount() is slower than page_mapcount().
2523 */
2524int page_trans_huge_mapcount(struct page *page, int *total_mapcount)
2525{
2526 int i, ret, _total_mapcount, mapcount;
2527
2528 /* hugetlbfs shouldn't call it */
2529 VM_BUG_ON_PAGE(PageHuge(page), page);
2530
2531 if (likely(!PageTransCompound(page))) {
2532 mapcount = atomic_read(&page->_mapcount) + 1;
2533 if (total_mapcount)
2534 *total_mapcount = mapcount;
2535 return mapcount;
2536 }
2537
2538 page = compound_head(page);
2539
2540 _total_mapcount = ret = 0;
2541 for (i = 0; i < HPAGE_PMD_NR; i++) {
2542 mapcount = atomic_read(&page[i]._mapcount) + 1;
2543 ret = max(ret, mapcount);
2544 _total_mapcount += mapcount;
2545 }
2546 if (PageDoubleMap(page)) {
2547 ret -= 1;
2548 _total_mapcount -= HPAGE_PMD_NR;
2549 }
2550 mapcount = compound_mapcount(page);
2551 ret += mapcount;
2552 _total_mapcount += mapcount;
2553 if (total_mapcount)
2554 *total_mapcount = _total_mapcount;
2555 return ret;
2556}
2557
Huang Yingb8f593c2017-07-06 15:37:28 -07002558/* Racy check whether the huge page can be split */
2559bool can_split_huge_page(struct page *page, int *pextra_pins)
2560{
2561 int extra_pins;
2562
2563 /* Additional pins from radix tree */
2564 if (PageAnon(page))
2565 extra_pins = PageSwapCache(page) ? HPAGE_PMD_NR : 0;
2566 else
2567 extra_pins = HPAGE_PMD_NR;
2568 if (pextra_pins)
2569 *pextra_pins = extra_pins;
2570 return total_mapcount(page) == page_count(page) - extra_pins - 1;
2571}
2572
Andrea Arcangeli6d0a07e2016-05-12 15:42:25 -07002573/*
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002574 * This function splits huge page into normal pages. @page can point to any
2575 * subpage of huge page to split. Split doesn't change the position of @page.
2576 *
2577 * Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
2578 * The huge page must be locked.
2579 *
2580 * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
2581 *
2582 * Both head page and tail pages will inherit mapping, flags, and so on from
2583 * the hugepage.
2584 *
2585 * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if
2586 * they are not mapped.
2587 *
2588 * Returns 0 if the hugepage is split successfully.
2589 * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
2590 * us.
2591 */
2592int split_huge_page_to_list(struct page *page, struct list_head *list)
2593{
2594 struct page *head = compound_head(page);
Kirill A. Shutemova3d0a9182016-02-02 16:57:08 -08002595 struct pglist_data *pgdata = NODE_DATA(page_to_nid(head));
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002596 struct anon_vma *anon_vma = NULL;
2597 struct address_space *mapping = NULL;
2598 int count, mapcount, extra_pins, ret;
Kirill A. Shutemovd9654322016-01-15 16:54:43 -08002599 bool mlocked;
Kirill A. Shutemov0b9b6ff2016-01-20 14:58:09 -08002600 unsigned long flags;
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002601
2602 VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002603 VM_BUG_ON_PAGE(!PageLocked(page), page);
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002604 VM_BUG_ON_PAGE(!PageCompound(page), page);
2605
Huang Ying59807682017-09-06 16:22:34 -07002606 if (PageWriteback(page))
2607 return -EBUSY;
2608
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002609 if (PageAnon(head)) {
2610 /*
2611 * The caller does not necessarily hold an mmap_sem that would
2612 * prevent the anon_vma disappearing so we first we take a
2613 * reference to it and then lock the anon_vma for write. This
2614 * is similar to page_lock_anon_vma_read except the write lock
2615 * is taken to serialise against parallel split or collapse
2616 * operations.
2617 */
2618 anon_vma = page_get_anon_vma(head);
2619 if (!anon_vma) {
2620 ret = -EBUSY;
2621 goto out;
2622 }
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002623 mapping = NULL;
2624 anon_vma_lock_write(anon_vma);
2625 } else {
2626 mapping = head->mapping;
2627
2628 /* Truncated ? */
2629 if (!mapping) {
2630 ret = -EBUSY;
2631 goto out;
2632 }
2633
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002634 anon_vma = NULL;
2635 i_mmap_lock_read(mapping);
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002636 }
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002637
2638 /*
2639 * Racy check if we can split the page, before freeze_page() will
2640 * split PMDs
2641 */
Huang Yingb8f593c2017-07-06 15:37:28 -07002642 if (!can_split_huge_page(head, &extra_pins)) {
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002643 ret = -EBUSY;
2644 goto out_unlock;
2645 }
2646
Kirill A. Shutemovd9654322016-01-15 16:54:43 -08002647 mlocked = PageMlocked(page);
Kirill A. Shutemovfec89c12016-03-17 14:20:10 -07002648 freeze_page(head);
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002649 VM_BUG_ON_PAGE(compound_mapcount(head), head);
2650
Kirill A. Shutemovd9654322016-01-15 16:54:43 -08002651 /* Make sure the page is not on per-CPU pagevec as it takes pin */
2652 if (mlocked)
2653 lru_add_drain();
2654
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002655 /* prevent PageLRU to go away from under us, and freeze lru stats */
Mel Gormana52633d2016-07-28 15:45:28 -07002656 spin_lock_irqsave(zone_lru_lock(page_zone(head)), flags);
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002657
2658 if (mapping) {
2659 void **pslot;
2660
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07002661 xa_lock(&mapping->i_pages);
2662 pslot = radix_tree_lookup_slot(&mapping->i_pages,
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002663 page_index(head));
2664 /*
2665 * Check if the head page is present in radix tree.
2666 * We assume all tail are present too, if head is there.
2667 */
2668 if (radix_tree_deref_slot_protected(pslot,
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07002669 &mapping->i_pages.xa_lock) != head)
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002670 goto fail;
2671 }
2672
Joonsoo Kim0139aa72016-05-19 17:10:49 -07002673 /* Prevent deferred_split_scan() touching ->_refcount */
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002674 spin_lock(&pgdata->split_queue_lock);
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002675 count = page_count(head);
2676 mapcount = total_mapcount(head);
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002677 if (!mapcount && page_ref_freeze(head, 1 + extra_pins)) {
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002678 if (!list_empty(page_deferred_list(head))) {
Kirill A. Shutemova3d0a9182016-02-02 16:57:08 -08002679 pgdata->split_queue_len--;
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002680 list_del(page_deferred_list(head));
2681 }
Kirill A. Shutemov65c45372016-07-26 15:26:10 -07002682 if (mapping)
Mel Gorman11fb9982016-07-28 15:46:20 -07002683 __dec_node_page_state(page, NR_SHMEM_THPS);
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002684 spin_unlock(&pgdata->split_queue_lock);
2685 __split_huge_page(page, list, flags);
Huang Ying59807682017-09-06 16:22:34 -07002686 if (PageSwapCache(head)) {
2687 swp_entry_t entry = { .val = page_private(head) };
2688
2689 ret = split_swap_cluster(entry);
2690 } else
2691 ret = 0;
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002692 } else {
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002693 if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) {
2694 pr_alert("total_mapcount: %u, page_count(): %u\n",
2695 mapcount, count);
2696 if (PageTail(page))
2697 dump_page(head, NULL);
2698 dump_page(page, "total_mapcount(head) > 0");
2699 BUG();
2700 }
2701 spin_unlock(&pgdata->split_queue_lock);
2702fail: if (mapping)
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07002703 xa_unlock(&mapping->i_pages);
Mel Gormana52633d2016-07-28 15:45:28 -07002704 spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
Kirill A. Shutemovfec89c12016-03-17 14:20:10 -07002705 unfreeze_page(head);
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002706 ret = -EBUSY;
2707 }
2708
2709out_unlock:
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002710 if (anon_vma) {
2711 anon_vma_unlock_write(anon_vma);
2712 put_anon_vma(anon_vma);
2713 }
2714 if (mapping)
2715 i_mmap_unlock_read(mapping);
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -08002716out:
2717 count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
2718 return ret;
2719}
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002720
2721void free_transhuge_page(struct page *page)
2722{
Kirill A. Shutemova3d0a9182016-02-02 16:57:08 -08002723 struct pglist_data *pgdata = NODE_DATA(page_to_nid(page));
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002724 unsigned long flags;
2725
Kirill A. Shutemova3d0a9182016-02-02 16:57:08 -08002726 spin_lock_irqsave(&pgdata->split_queue_lock, flags);
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002727 if (!list_empty(page_deferred_list(page))) {
Kirill A. Shutemova3d0a9182016-02-02 16:57:08 -08002728 pgdata->split_queue_len--;
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002729 list_del(page_deferred_list(page));
2730 }
Kirill A. Shutemova3d0a9182016-02-02 16:57:08 -08002731 spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002732 free_compound_page(page);
2733}
2734
2735void deferred_split_huge_page(struct page *page)
2736{
Kirill A. Shutemova3d0a9182016-02-02 16:57:08 -08002737 struct pglist_data *pgdata = NODE_DATA(page_to_nid(page));
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002738 unsigned long flags;
2739
2740 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
2741
Kirill A. Shutemova3d0a9182016-02-02 16:57:08 -08002742 spin_lock_irqsave(&pgdata->split_queue_lock, flags);
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002743 if (list_empty(page_deferred_list(page))) {
Kirill A. Shutemovf9719a02016-03-17 14:18:45 -07002744 count_vm_event(THP_DEFERRED_SPLIT_PAGE);
Kirill A. Shutemova3d0a9182016-02-02 16:57:08 -08002745 list_add_tail(page_deferred_list(page), &pgdata->split_queue);
2746 pgdata->split_queue_len++;
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002747 }
Kirill A. Shutemova3d0a9182016-02-02 16:57:08 -08002748 spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002749}
2750
2751static unsigned long deferred_split_count(struct shrinker *shrink,
2752 struct shrink_control *sc)
2753{
Kirill A. Shutemova3d0a9182016-02-02 16:57:08 -08002754 struct pglist_data *pgdata = NODE_DATA(sc->nid);
Mark Rutland6aa7de02017-10-23 14:07:29 -07002755 return READ_ONCE(pgdata->split_queue_len);
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002756}
2757
2758static unsigned long deferred_split_scan(struct shrinker *shrink,
2759 struct shrink_control *sc)
2760{
Kirill A. Shutemova3d0a9182016-02-02 16:57:08 -08002761 struct pglist_data *pgdata = NODE_DATA(sc->nid);
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002762 unsigned long flags;
2763 LIST_HEAD(list), *pos, *next;
2764 struct page *page;
2765 int split = 0;
2766
Kirill A. Shutemova3d0a9182016-02-02 16:57:08 -08002767 spin_lock_irqsave(&pgdata->split_queue_lock, flags);
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002768 /* Take pin on all head pages to avoid freeing them under us */
Kirill A. Shutemovae026202016-02-05 15:36:53 -08002769 list_for_each_safe(pos, next, &pgdata->split_queue) {
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002770 page = list_entry((void *)pos, struct page, mapping);
2771 page = compound_head(page);
Kirill A. Shutemove3ae1952016-02-02 16:57:15 -08002772 if (get_page_unless_zero(page)) {
2773 list_move(page_deferred_list(page), &list);
2774 } else {
2775 /* We lost race with put_compound_page() */
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002776 list_del_init(page_deferred_list(page));
Kirill A. Shutemova3d0a9182016-02-02 16:57:08 -08002777 pgdata->split_queue_len--;
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002778 }
Kirill A. Shutemove3ae1952016-02-02 16:57:15 -08002779 if (!--sc->nr_to_scan)
2780 break;
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002781 }
Kirill A. Shutemova3d0a9182016-02-02 16:57:08 -08002782 spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002783
2784 list_for_each_safe(pos, next, &list) {
2785 page = list_entry((void *)pos, struct page, mapping);
Kirill A. Shutemovfa41b902018-03-22 16:17:31 -07002786 if (!trylock_page(page))
2787 goto next;
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002788 /* split_huge_page() removes page from list on success */
2789 if (!split_huge_page(page))
2790 split++;
2791 unlock_page(page);
Kirill A. Shutemovfa41b902018-03-22 16:17:31 -07002792next:
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002793 put_page(page);
2794 }
2795
Kirill A. Shutemova3d0a9182016-02-02 16:57:08 -08002796 spin_lock_irqsave(&pgdata->split_queue_lock, flags);
2797 list_splice_tail(&list, &pgdata->split_queue);
2798 spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002799
Kirill A. Shutemovcb8d68e2016-02-02 16:57:12 -08002800 /*
2801 * Stop shrinker if we didn't split any page, but the queue is empty.
2802 * This can happen if pages were freed under us.
2803 */
2804 if (!split && list_empty(&pgdata->split_queue))
2805 return SHRINK_STOP;
2806 return split;
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002807}
2808
2809static struct shrinker deferred_split_shrinker = {
2810 .count_objects = deferred_split_count,
2811 .scan_objects = deferred_split_scan,
2812 .seeks = DEFAULT_SEEKS,
Kirill A. Shutemova3d0a9182016-02-02 16:57:08 -08002813 .flags = SHRINKER_NUMA_AWARE,
Kirill A. Shutemov9a982252016-01-15 16:54:17 -08002814};
Kirill A. Shutemov49071d42016-01-15 16:54:40 -08002815
2816#ifdef CONFIG_DEBUG_FS
2817static int split_huge_pages_set(void *data, u64 val)
2818{
2819 struct zone *zone;
2820 struct page *page;
2821 unsigned long pfn, max_zone_pfn;
2822 unsigned long total = 0, split = 0;
2823
2824 if (val != 1)
2825 return -EINVAL;
2826
2827 for_each_populated_zone(zone) {
2828 max_zone_pfn = zone_end_pfn(zone);
2829 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
2830 if (!pfn_valid(pfn))
2831 continue;
2832
2833 page = pfn_to_page(pfn);
2834 if (!get_page_unless_zero(page))
2835 continue;
2836
2837 if (zone != page_zone(page))
2838 goto next;
2839
Kirill A. Shutemovbaa355f2016-07-26 15:25:51 -07002840 if (!PageHead(page) || PageHuge(page) || !PageLRU(page))
Kirill A. Shutemov49071d42016-01-15 16:54:40 -08002841 goto next;
2842
2843 total++;
2844 lock_page(page);
2845 if (!split_huge_page(page))
2846 split++;
2847 unlock_page(page);
2848next:
2849 put_page(page);
2850 }
2851 }
2852
Yang Shi145bdaa2016-05-05 16:22:00 -07002853 pr_info("%lu of %lu THP split\n", split, total);
Kirill A. Shutemov49071d42016-01-15 16:54:40 -08002854
2855 return 0;
2856}
2857DEFINE_SIMPLE_ATTRIBUTE(split_huge_pages_fops, NULL, split_huge_pages_set,
2858 "%llu\n");
2859
2860static int __init split_huge_pages_debugfs(void)
2861{
2862 void *ret;
2863
Yang Shi145bdaa2016-05-05 16:22:00 -07002864 ret = debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
Kirill A. Shutemov49071d42016-01-15 16:54:40 -08002865 &split_huge_pages_fops);
2866 if (!ret)
2867 pr_warn("Failed to create split_huge_pages in debugfs");
2868 return 0;
2869}
2870late_initcall(split_huge_pages_debugfs);
2871#endif
Zi Yan616b8372017-09-08 16:10:57 -07002872
2873#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
2874void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
2875 struct page *page)
2876{
2877 struct vm_area_struct *vma = pvmw->vma;
2878 struct mm_struct *mm = vma->vm_mm;
2879 unsigned long address = pvmw->address;
2880 pmd_t pmdval;
2881 swp_entry_t entry;
Naoya Horiguchiab6e3d02017-09-08 16:11:04 -07002882 pmd_t pmdswp;
Zi Yan616b8372017-09-08 16:10:57 -07002883
2884 if (!(pvmw->pmd && !pvmw->pte))
2885 return;
2886
2887 mmu_notifier_invalidate_range_start(mm, address,
2888 address + HPAGE_PMD_SIZE);
2889
2890 flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
2891 pmdval = *pvmw->pmd;
2892 pmdp_invalidate(vma, address, pvmw->pmd);
2893 if (pmd_dirty(pmdval))
2894 set_page_dirty(page);
2895 entry = make_migration_entry(page, pmd_write(pmdval));
Naoya Horiguchiab6e3d02017-09-08 16:11:04 -07002896 pmdswp = swp_entry_to_pmd(entry);
2897 if (pmd_soft_dirty(pmdval))
2898 pmdswp = pmd_swp_mksoft_dirty(pmdswp);
2899 set_pmd_at(mm, address, pvmw->pmd, pmdswp);
Zi Yan616b8372017-09-08 16:10:57 -07002900 page_remove_rmap(page, true);
2901 put_page(page);
2902
2903 mmu_notifier_invalidate_range_end(mm, address,
2904 address + HPAGE_PMD_SIZE);
2905}
2906
2907void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
2908{
2909 struct vm_area_struct *vma = pvmw->vma;
2910 struct mm_struct *mm = vma->vm_mm;
2911 unsigned long address = pvmw->address;
2912 unsigned long mmun_start = address & HPAGE_PMD_MASK;
2913 pmd_t pmde;
2914 swp_entry_t entry;
2915
2916 if (!(pvmw->pmd && !pvmw->pte))
2917 return;
2918
2919 entry = pmd_to_swp_entry(*pvmw->pmd);
2920 get_page(new);
2921 pmde = pmd_mkold(mk_huge_pmd(new, vma->vm_page_prot));
Naoya Horiguchiab6e3d02017-09-08 16:11:04 -07002922 if (pmd_swp_soft_dirty(*pvmw->pmd))
2923 pmde = pmd_mksoft_dirty(pmde);
Zi Yan616b8372017-09-08 16:10:57 -07002924 if (is_write_migration_entry(entry))
Linus Torvaldsf55e1012017-11-29 09:01:01 -08002925 pmde = maybe_pmd_mkwrite(pmde, vma);
Zi Yan616b8372017-09-08 16:10:57 -07002926
2927 flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE);
Naoya Horiguchie71769a2018-04-20 14:55:45 -07002928 if (PageAnon(new))
2929 page_add_anon_rmap(new, vma, mmun_start, true);
2930 else
2931 page_add_file_rmap(new, true);
Zi Yan616b8372017-09-08 16:10:57 -07002932 set_pmd_at(mm, mmun_start, pvmw->pmd, pmde);
2933 if (vma->vm_flags & VM_LOCKED)
2934 mlock_vma_page(new);
2935 update_mmu_cache_pmd(vma, address, pvmw->pmd);
2936}
2937#endif