blob: ffbdfc86aedf7120b0b62f20016cc738366b92f3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Generic hugetlb support.
3 * (C) William Irwin, April 2004
4 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <linux/list.h>
6#include <linux/init.h>
7#include <linux/module.h>
8#include <linux/mm.h>
Alexey Dobriyane1759c22008-10-15 23:50:22 +04009#include <linux/seq_file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/sysctl.h>
11#include <linux/highmem.h>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070012#include <linux/mmu_notifier.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/nodemask.h>
David Gibson63551ae2005-06-21 17:14:44 -070014#include <linux/pagemap.h>
Christoph Lameter5da7ca82006-01-06 00:10:46 -080015#include <linux/mempolicy.h>
Christoph Lameteraea47ff2006-01-08 01:00:57 -080016#include <linux/cpuset.h>
David Gibson3935baa2006-03-22 00:08:53 -080017#include <linux/mutex.h>
Andi Kleenaa888a72008-07-23 21:27:47 -070018#include <linux/bootmem.h>
Nishanth Aravamudana3437872008-07-23 21:27:44 -070019#include <linux/sysfs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090020#include <linux/slab.h>
Linus Torvaldsd6606682008-08-06 12:04:54 -070021
David Gibson63551ae2005-06-21 17:14:44 -070022#include <asm/page.h>
23#include <asm/pgtable.h>
Adrian Bunk78a34ae2008-07-28 15:46:30 -070024#include <asm/io.h>
David Gibson63551ae2005-06-21 17:14:44 -070025
26#include <linux/hugetlb.h>
Lee Schermerhorn9a305232009-12-14 17:58:25 -080027#include <linux/node.h>
Nick Piggin7835e982006-03-22 00:08:40 -080028#include "internal.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
Mel Gorman396faf02007-07-17 04:03:13 -070031static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
32unsigned long hugepages_treat_as_movable;
Andi Kleena5516432008-07-23 21:27:41 -070033
Andi Kleene5ff2152008-07-23 21:27:42 -070034static int max_hstate;
35unsigned int default_hstate_idx;
36struct hstate hstates[HUGE_MAX_HSTATE];
37
Jon Tollefson53ba51d2008-07-23 21:27:52 -070038__initdata LIST_HEAD(huge_boot_pages);
39
Andi Kleene5ff2152008-07-23 21:27:42 -070040/* for command line parsing */
41static struct hstate * __initdata parsed_hstate;
42static unsigned long __initdata default_hstate_max_huge_pages;
Nick Piggine11bfbf2008-07-23 21:27:52 -070043static unsigned long __initdata default_hstate_size;
Andi Kleene5ff2152008-07-23 21:27:42 -070044
45#define for_each_hstate(h) \
46 for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++)
Mel Gorman396faf02007-07-17 04:03:13 -070047
David Gibson3935baa2006-03-22 00:08:53 -080048/*
49 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
50 */
51static DEFINE_SPINLOCK(hugetlb_lock);
Eric Paris0bd0f9f2005-11-21 21:32:28 -080052
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -070053/*
Andy Whitcroft96822902008-07-23 21:27:29 -070054 * Region tracking -- allows tracking of reservations and instantiated pages
55 * across the pages in a mapping.
Andy Whitcroft84afd992008-07-23 21:27:32 -070056 *
57 * The region data structures are protected by a combination of the mmap_sem
58 * and the hugetlb_instantion_mutex. To access or modify a region the caller
59 * must either hold the mmap_sem for write, or the mmap_sem for read and
60 * the hugetlb_instantiation mutex:
61 *
62 * down_write(&mm->mmap_sem);
63 * or
64 * down_read(&mm->mmap_sem);
65 * mutex_lock(&hugetlb_instantiation_mutex);
Andy Whitcroft96822902008-07-23 21:27:29 -070066 */
67struct file_region {
68 struct list_head link;
69 long from;
70 long to;
71};
72
73static long region_add(struct list_head *head, long f, long t)
74{
75 struct file_region *rg, *nrg, *trg;
76
77 /* Locate the region we are either in or before. */
78 list_for_each_entry(rg, head, link)
79 if (f <= rg->to)
80 break;
81
82 /* Round our left edge to the current segment if it encloses us. */
83 if (f > rg->from)
84 f = rg->from;
85
86 /* Check for and consume any regions we now overlap with. */
87 nrg = rg;
88 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
89 if (&rg->link == head)
90 break;
91 if (rg->from > t)
92 break;
93
94 /* If this area reaches higher then extend our area to
95 * include it completely. If this is not the first area
96 * which we intend to reuse, free it. */
97 if (rg->to > t)
98 t = rg->to;
99 if (rg != nrg) {
100 list_del(&rg->link);
101 kfree(rg);
102 }
103 }
104 nrg->from = f;
105 nrg->to = t;
106 return 0;
107}
108
109static long region_chg(struct list_head *head, long f, long t)
110{
111 struct file_region *rg, *nrg;
112 long chg = 0;
113
114 /* Locate the region we are before or in. */
115 list_for_each_entry(rg, head, link)
116 if (f <= rg->to)
117 break;
118
119 /* If we are below the current region then a new region is required.
120 * Subtle, allocate a new region at the position but make it zero
121 * size such that we can guarantee to record the reservation. */
122 if (&rg->link == head || t < rg->from) {
123 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
124 if (!nrg)
125 return -ENOMEM;
126 nrg->from = f;
127 nrg->to = f;
128 INIT_LIST_HEAD(&nrg->link);
129 list_add(&nrg->link, rg->link.prev);
130
131 return t - f;
132 }
133
134 /* Round our left edge to the current segment if it encloses us. */
135 if (f > rg->from)
136 f = rg->from;
137 chg = t - f;
138
139 /* Check for and consume any regions we now overlap with. */
140 list_for_each_entry(rg, rg->link.prev, link) {
141 if (&rg->link == head)
142 break;
143 if (rg->from > t)
144 return chg;
145
146 /* We overlap with this area, if it extends futher than
147 * us then we must extend ourselves. Account for its
148 * existing reservation. */
149 if (rg->to > t) {
150 chg += rg->to - t;
151 t = rg->to;
152 }
153 chg -= rg->to - rg->from;
154 }
155 return chg;
156}
157
158static long region_truncate(struct list_head *head, long end)
159{
160 struct file_region *rg, *trg;
161 long chg = 0;
162
163 /* Locate the region we are either in or before. */
164 list_for_each_entry(rg, head, link)
165 if (end <= rg->to)
166 break;
167 if (&rg->link == head)
168 return 0;
169
170 /* If we are in the middle of a region then adjust it. */
171 if (end > rg->from) {
172 chg = rg->to - end;
173 rg->to = end;
174 rg = list_entry(rg->link.next, typeof(*rg), link);
175 }
176
177 /* Drop any remaining regions. */
178 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
179 if (&rg->link == head)
180 break;
181 chg += rg->to - rg->from;
182 list_del(&rg->link);
183 kfree(rg);
184 }
185 return chg;
186}
187
Andy Whitcroft84afd992008-07-23 21:27:32 -0700188static long region_count(struct list_head *head, long f, long t)
189{
190 struct file_region *rg;
191 long chg = 0;
192
193 /* Locate each segment we overlap with, and count that overlap. */
194 list_for_each_entry(rg, head, link) {
195 int seg_from;
196 int seg_to;
197
198 if (rg->to <= f)
199 continue;
200 if (rg->from >= t)
201 break;
202
203 seg_from = max(rg->from, f);
204 seg_to = min(rg->to, t);
205
206 chg += seg_to - seg_from;
207 }
208
209 return chg;
210}
211
Andy Whitcroft96822902008-07-23 21:27:29 -0700212/*
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -0700213 * Convert the address within this vma to the page offset within
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -0700214 * the mapping, in pagecache page units; huge pages here.
215 */
Andi Kleena5516432008-07-23 21:27:41 -0700216static pgoff_t vma_hugecache_offset(struct hstate *h,
217 struct vm_area_struct *vma, unsigned long address)
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -0700218{
Andi Kleena5516432008-07-23 21:27:41 -0700219 return ((address - vma->vm_start) >> huge_page_shift(h)) +
220 (vma->vm_pgoff >> huge_page_order(h));
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -0700221}
222
Andy Whitcroft84afd992008-07-23 21:27:32 -0700223/*
Mel Gorman08fba692009-01-06 14:38:53 -0800224 * Return the size of the pages allocated when backing a VMA. In the majority
225 * cases this will be same size as used by the page table entries.
226 */
227unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
228{
229 struct hstate *hstate;
230
231 if (!is_vm_hugetlb_page(vma))
232 return PAGE_SIZE;
233
234 hstate = hstate_vma(vma);
235
236 return 1UL << (hstate->order + PAGE_SHIFT);
237}
Joerg Roedelf340ca02009-06-19 15:16:22 +0200238EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
Mel Gorman08fba692009-01-06 14:38:53 -0800239
240/*
Mel Gorman33402892009-01-06 14:38:54 -0800241 * Return the page size being used by the MMU to back a VMA. In the majority
242 * of cases, the page size used by the kernel matches the MMU size. On
243 * architectures where it differs, an architecture-specific version of this
244 * function is required.
245 */
246#ifndef vma_mmu_pagesize
247unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
248{
249 return vma_kernel_pagesize(vma);
250}
251#endif
252
253/*
Andy Whitcroft84afd992008-07-23 21:27:32 -0700254 * Flags for MAP_PRIVATE reservations. These are stored in the bottom
255 * bits of the reservation map pointer, which are always clear due to
256 * alignment.
257 */
258#define HPAGE_RESV_OWNER (1UL << 0)
259#define HPAGE_RESV_UNMAPPED (1UL << 1)
Mel Gorman04f2cbe2008-07-23 21:27:25 -0700260#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
Andy Whitcroft84afd992008-07-23 21:27:32 -0700261
Mel Gormana1e78772008-07-23 21:27:23 -0700262/*
263 * These helpers are used to track how many pages are reserved for
264 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
265 * is guaranteed to have their future faults succeed.
266 *
267 * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
268 * the reserve counters are updated with the hugetlb_lock held. It is safe
269 * to reset the VMA at fork() time as it is not in use yet and there is no
270 * chance of the global counters getting corrupted as a result of the values.
Andy Whitcroft84afd992008-07-23 21:27:32 -0700271 *
272 * The private mapping reservation is represented in a subtly different
273 * manner to a shared mapping. A shared mapping has a region map associated
274 * with the underlying file, this region map represents the backing file
275 * pages which have ever had a reservation assigned which this persists even
276 * after the page is instantiated. A private mapping has a region map
277 * associated with the original mmap which is attached to all VMAs which
278 * reference it, this region map represents those offsets which have consumed
279 * reservation ie. where pages have been instantiated.
Mel Gormana1e78772008-07-23 21:27:23 -0700280 */
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -0700281static unsigned long get_vma_private_data(struct vm_area_struct *vma)
282{
283 return (unsigned long)vma->vm_private_data;
284}
285
286static void set_vma_private_data(struct vm_area_struct *vma,
287 unsigned long value)
288{
289 vma->vm_private_data = (void *)value;
290}
291
Andy Whitcroft84afd992008-07-23 21:27:32 -0700292struct resv_map {
293 struct kref refs;
294 struct list_head regions;
295};
296
Harvey Harrison2a4b3de2008-10-18 20:27:06 -0700297static struct resv_map *resv_map_alloc(void)
Andy Whitcroft84afd992008-07-23 21:27:32 -0700298{
299 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
300 if (!resv_map)
301 return NULL;
302
303 kref_init(&resv_map->refs);
304 INIT_LIST_HEAD(&resv_map->regions);
305
306 return resv_map;
307}
308
Harvey Harrison2a4b3de2008-10-18 20:27:06 -0700309static void resv_map_release(struct kref *ref)
Andy Whitcroft84afd992008-07-23 21:27:32 -0700310{
311 struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
312
313 /* Clear out any active regions before we release the map. */
314 region_truncate(&resv_map->regions, 0);
315 kfree(resv_map);
316}
317
318static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
Mel Gormana1e78772008-07-23 21:27:23 -0700319{
320 VM_BUG_ON(!is_vm_hugetlb_page(vma));
Mel Gormanf83a2752009-05-28 14:34:40 -0700321 if (!(vma->vm_flags & VM_MAYSHARE))
Andy Whitcroft84afd992008-07-23 21:27:32 -0700322 return (struct resv_map *)(get_vma_private_data(vma) &
323 ~HPAGE_RESV_MASK);
Harvey Harrison2a4b3de2008-10-18 20:27:06 -0700324 return NULL;
Mel Gormana1e78772008-07-23 21:27:23 -0700325}
326
Andy Whitcroft84afd992008-07-23 21:27:32 -0700327static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
Mel Gormana1e78772008-07-23 21:27:23 -0700328{
329 VM_BUG_ON(!is_vm_hugetlb_page(vma));
Mel Gormanf83a2752009-05-28 14:34:40 -0700330 VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
Mel Gormana1e78772008-07-23 21:27:23 -0700331
Andy Whitcroft84afd992008-07-23 21:27:32 -0700332 set_vma_private_data(vma, (get_vma_private_data(vma) &
333 HPAGE_RESV_MASK) | (unsigned long)map);
Mel Gorman04f2cbe2008-07-23 21:27:25 -0700334}
335
336static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
337{
Mel Gorman04f2cbe2008-07-23 21:27:25 -0700338 VM_BUG_ON(!is_vm_hugetlb_page(vma));
Mel Gormanf83a2752009-05-28 14:34:40 -0700339 VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -0700340
341 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
Mel Gorman04f2cbe2008-07-23 21:27:25 -0700342}
343
344static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
345{
346 VM_BUG_ON(!is_vm_hugetlb_page(vma));
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -0700347
348 return (get_vma_private_data(vma) & flag) != 0;
Mel Gormana1e78772008-07-23 21:27:23 -0700349}
350
351/* Decrement the reserved pages in the hugepage pool by one */
Andi Kleena5516432008-07-23 21:27:41 -0700352static void decrement_hugepage_resv_vma(struct hstate *h,
353 struct vm_area_struct *vma)
Mel Gormana1e78772008-07-23 21:27:23 -0700354{
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -0700355 if (vma->vm_flags & VM_NORESERVE)
356 return;
357
Mel Gormanf83a2752009-05-28 14:34:40 -0700358 if (vma->vm_flags & VM_MAYSHARE) {
Mel Gormana1e78772008-07-23 21:27:23 -0700359 /* Shared mappings always use reserves */
Andi Kleena5516432008-07-23 21:27:41 -0700360 h->resv_huge_pages--;
Andy Whitcroft84afd992008-07-23 21:27:32 -0700361 } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
Mel Gormana1e78772008-07-23 21:27:23 -0700362 /*
363 * Only the process that called mmap() has reserves for
364 * private mappings.
365 */
Andi Kleena5516432008-07-23 21:27:41 -0700366 h->resv_huge_pages--;
Mel Gormana1e78772008-07-23 21:27:23 -0700367 }
368}
369
Mel Gorman04f2cbe2008-07-23 21:27:25 -0700370/* Reset counters to 0 and clear all HPAGE_RESV_* flags */
Mel Gormana1e78772008-07-23 21:27:23 -0700371void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
372{
373 VM_BUG_ON(!is_vm_hugetlb_page(vma));
Mel Gormanf83a2752009-05-28 14:34:40 -0700374 if (!(vma->vm_flags & VM_MAYSHARE))
Mel Gormana1e78772008-07-23 21:27:23 -0700375 vma->vm_private_data = (void *)0;
376}
377
378/* Returns true if the VMA has associated reserve pages */
Mel Gorman7f09ca52008-07-23 21:27:58 -0700379static int vma_has_reserves(struct vm_area_struct *vma)
Mel Gormana1e78772008-07-23 21:27:23 -0700380{
Mel Gormanf83a2752009-05-28 14:34:40 -0700381 if (vma->vm_flags & VM_MAYSHARE)
Mel Gorman7f09ca52008-07-23 21:27:58 -0700382 return 1;
383 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
384 return 1;
385 return 0;
Mel Gormana1e78772008-07-23 21:27:23 -0700386}
387
Andy Whitcroft69d177c2008-11-06 12:53:26 -0800388static void clear_gigantic_page(struct page *page,
389 unsigned long addr, unsigned long sz)
390{
391 int i;
392 struct page *p = page;
393
394 might_sleep();
395 for (i = 0; i < sz/PAGE_SIZE; i++, p = mem_map_next(p, page, i)) {
396 cond_resched();
397 clear_user_highpage(p, addr + i * PAGE_SIZE);
398 }
399}
Andi Kleena5516432008-07-23 21:27:41 -0700400static void clear_huge_page(struct page *page,
401 unsigned long addr, unsigned long sz)
David Gibson79ac6ba2006-03-22 00:08:51 -0800402{
403 int i;
404
Andrea Arcangeli74dbdd22010-01-08 14:43:05 -0800405 if (unlikely(sz/PAGE_SIZE > MAX_ORDER_NR_PAGES)) {
Hannes Ederebdd4ae2009-01-06 14:39:58 -0800406 clear_gigantic_page(page, addr, sz);
407 return;
408 }
Andy Whitcroft69d177c2008-11-06 12:53:26 -0800409
David Gibson79ac6ba2006-03-22 00:08:51 -0800410 might_sleep();
Andi Kleena5516432008-07-23 21:27:41 -0700411 for (i = 0; i < sz/PAGE_SIZE; i++) {
David Gibson79ac6ba2006-03-22 00:08:51 -0800412 cond_resched();
Ralf Baechle281e0e32007-10-01 01:20:10 -0700413 clear_user_highpage(page + i, addr + i * PAGE_SIZE);
David Gibson79ac6ba2006-03-22 00:08:51 -0800414 }
415}
416
Andy Whitcroft69d177c2008-11-06 12:53:26 -0800417static void copy_gigantic_page(struct page *dst, struct page *src,
418 unsigned long addr, struct vm_area_struct *vma)
419{
420 int i;
421 struct hstate *h = hstate_vma(vma);
422 struct page *dst_base = dst;
423 struct page *src_base = src;
424 might_sleep();
425 for (i = 0; i < pages_per_huge_page(h); ) {
426 cond_resched();
427 copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
428
429 i++;
430 dst = mem_map_next(dst, dst_base, i);
431 src = mem_map_next(src, src_base, i);
432 }
433}
David Gibson79ac6ba2006-03-22 00:08:51 -0800434static void copy_huge_page(struct page *dst, struct page *src,
Atsushi Nemoto9de455b2006-12-12 17:14:55 +0000435 unsigned long addr, struct vm_area_struct *vma)
David Gibson79ac6ba2006-03-22 00:08:51 -0800436{
437 int i;
Andi Kleena5516432008-07-23 21:27:41 -0700438 struct hstate *h = hstate_vma(vma);
David Gibson79ac6ba2006-03-22 00:08:51 -0800439
Hannes Ederebdd4ae2009-01-06 14:39:58 -0800440 if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
441 copy_gigantic_page(dst, src, addr, vma);
442 return;
443 }
Andy Whitcroft69d177c2008-11-06 12:53:26 -0800444
David Gibson79ac6ba2006-03-22 00:08:51 -0800445 might_sleep();
Andi Kleena5516432008-07-23 21:27:41 -0700446 for (i = 0; i < pages_per_huge_page(h); i++) {
David Gibson79ac6ba2006-03-22 00:08:51 -0800447 cond_resched();
Atsushi Nemoto9de455b2006-12-12 17:14:55 +0000448 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
David Gibson79ac6ba2006-03-22 00:08:51 -0800449 }
450}
451
Andi Kleena5516432008-07-23 21:27:41 -0700452static void enqueue_huge_page(struct hstate *h, struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453{
454 int nid = page_to_nid(page);
Andi Kleena5516432008-07-23 21:27:41 -0700455 list_add(&page->lru, &h->hugepage_freelists[nid]);
456 h->free_huge_pages++;
457 h->free_huge_pages_node[nid]++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458}
459
Andi Kleena5516432008-07-23 21:27:41 -0700460static struct page *dequeue_huge_page_vma(struct hstate *h,
461 struct vm_area_struct *vma,
Mel Gorman04f2cbe2008-07-23 21:27:25 -0700462 unsigned long address, int avoid_reserve)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463{
Nishanth Aravamudan31a5c6e2007-07-15 23:38:02 -0700464 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 struct page *page = NULL;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -0700466 struct mempolicy *mpol;
Mel Gorman19770b32008-04-28 02:12:18 -0700467 nodemask_t *nodemask;
Mel Gorman396faf02007-07-17 04:03:13 -0700468 struct zonelist *zonelist = huge_zonelist(vma, address,
Mel Gorman19770b32008-04-28 02:12:18 -0700469 htlb_alloc_mask, &mpol, &nodemask);
Mel Gormandd1a2392008-04-28 02:12:17 -0700470 struct zone *zone;
471 struct zoneref *z;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472
Mel Gormana1e78772008-07-23 21:27:23 -0700473 /*
474 * A child process with MAP_PRIVATE mappings created by their parent
475 * have no page reserves. This check ensures that reservations are
476 * not "stolen". The child may still get SIGKILLed
477 */
Mel Gorman7f09ca52008-07-23 21:27:58 -0700478 if (!vma_has_reserves(vma) &&
Andi Kleena5516432008-07-23 21:27:41 -0700479 h->free_huge_pages - h->resv_huge_pages == 0)
Mel Gormana1e78772008-07-23 21:27:23 -0700480 return NULL;
481
Mel Gorman04f2cbe2008-07-23 21:27:25 -0700482 /* If reserves cannot be used, ensure enough pages are in the pool */
Andi Kleena5516432008-07-23 21:27:41 -0700483 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
Mel Gorman04f2cbe2008-07-23 21:27:25 -0700484 return NULL;
485
Mel Gorman19770b32008-04-28 02:12:18 -0700486 for_each_zone_zonelist_nodemask(zone, z, zonelist,
487 MAX_NR_ZONES - 1, nodemask) {
Mel Gorman54a6eb52008-04-28 02:12:16 -0700488 nid = zone_to_nid(zone);
489 if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) &&
Andi Kleena5516432008-07-23 21:27:41 -0700490 !list_empty(&h->hugepage_freelists[nid])) {
491 page = list_entry(h->hugepage_freelists[nid].next,
Andrew Morton3abf7af2007-07-19 01:49:08 -0700492 struct page, lru);
493 list_del(&page->lru);
Andi Kleena5516432008-07-23 21:27:41 -0700494 h->free_huge_pages--;
495 h->free_huge_pages_node[nid]--;
Mel Gorman04f2cbe2008-07-23 21:27:25 -0700496
497 if (!avoid_reserve)
Andi Kleena5516432008-07-23 21:27:41 -0700498 decrement_hugepage_resv_vma(h, vma);
Mel Gormana1e78772008-07-23 21:27:23 -0700499
Ken Chen5ab3ee72007-07-23 18:44:00 -0700500 break;
Andrew Morton3abf7af2007-07-19 01:49:08 -0700501 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 }
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700503 mpol_cond_put(mpol);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 return page;
505}
506
Andi Kleena5516432008-07-23 21:27:41 -0700507static void update_and_free_page(struct hstate *h, struct page *page)
Adam Litke6af2acb2007-10-16 01:26:16 -0700508{
509 int i;
Andi Kleena5516432008-07-23 21:27:41 -0700510
Andy Whitcroft18229df2008-11-06 12:53:27 -0800511 VM_BUG_ON(h->order >= MAX_ORDER);
512
Andi Kleena5516432008-07-23 21:27:41 -0700513 h->nr_huge_pages--;
514 h->nr_huge_pages_node[page_to_nid(page)]--;
515 for (i = 0; i < pages_per_huge_page(h); i++) {
Adam Litke6af2acb2007-10-16 01:26:16 -0700516 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
517 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
518 1 << PG_private | 1<< PG_writeback);
519 }
520 set_compound_page_dtor(page, NULL);
521 set_page_refcounted(page);
Gerald Schaefer7f2e9522008-04-28 02:13:29 -0700522 arch_release_hugepage(page);
Andi Kleena5516432008-07-23 21:27:41 -0700523 __free_pages(page, huge_page_order(h));
Adam Litke6af2acb2007-10-16 01:26:16 -0700524}
525
Andi Kleene5ff2152008-07-23 21:27:42 -0700526struct hstate *size_to_hstate(unsigned long size)
527{
528 struct hstate *h;
529
530 for_each_hstate(h) {
531 if (huge_page_size(h) == size)
532 return h;
533 }
534 return NULL;
535}
536
David Gibson27a85ef2006-03-22 00:08:56 -0800537static void free_huge_page(struct page *page)
538{
Andi Kleena5516432008-07-23 21:27:41 -0700539 /*
540 * Can't pass hstate in here because it is called from the
541 * compound page destructor.
542 */
Andi Kleene5ff2152008-07-23 21:27:42 -0700543 struct hstate *h = page_hstate(page);
Adam Litke7893d1d2007-10-16 01:26:18 -0700544 int nid = page_to_nid(page);
Adam Litkec79fb752007-11-14 16:59:38 -0800545 struct address_space *mapping;
David Gibson27a85ef2006-03-22 00:08:56 -0800546
Adam Litkec79fb752007-11-14 16:59:38 -0800547 mapping = (struct address_space *) page_private(page);
Andy Whitcrofte5df70a2008-02-23 15:23:32 -0800548 set_page_private(page, 0);
Mel Gorman23be7462010-04-23 13:17:56 -0400549 page->mapping = NULL;
Adam Litke7893d1d2007-10-16 01:26:18 -0700550 BUG_ON(page_count(page));
David Gibson27a85ef2006-03-22 00:08:56 -0800551 INIT_LIST_HEAD(&page->lru);
552
553 spin_lock(&hugetlb_lock);
Andi Kleenaa888a72008-07-23 21:27:47 -0700554 if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
Andi Kleena5516432008-07-23 21:27:41 -0700555 update_and_free_page(h, page);
556 h->surplus_huge_pages--;
557 h->surplus_huge_pages_node[nid]--;
Adam Litke7893d1d2007-10-16 01:26:18 -0700558 } else {
Andi Kleena5516432008-07-23 21:27:41 -0700559 enqueue_huge_page(h, page);
Adam Litke7893d1d2007-10-16 01:26:18 -0700560 }
David Gibson27a85ef2006-03-22 00:08:56 -0800561 spin_unlock(&hugetlb_lock);
Adam Litkec79fb752007-11-14 16:59:38 -0800562 if (mapping)
Adam Litke9a119c02007-11-14 16:59:41 -0800563 hugetlb_put_quota(mapping, 1);
David Gibson27a85ef2006-03-22 00:08:56 -0800564}
565
Andi Kleena5516432008-07-23 21:27:41 -0700566static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
Andi Kleenb7ba30c2008-07-23 21:27:40 -0700567{
568 set_compound_page_dtor(page, free_huge_page);
569 spin_lock(&hugetlb_lock);
Andi Kleena5516432008-07-23 21:27:41 -0700570 h->nr_huge_pages++;
571 h->nr_huge_pages_node[nid]++;
Andi Kleenb7ba30c2008-07-23 21:27:40 -0700572 spin_unlock(&hugetlb_lock);
573 put_page(page); /* free it into the hugepage allocator */
574}
575
Wu Fengguang20a03072009-06-16 15:32:22 -0700576static void prep_compound_gigantic_page(struct page *page, unsigned long order)
577{
578 int i;
579 int nr_pages = 1 << order;
580 struct page *p = page + 1;
581
582 /* we rely on prep_new_huge_page to set the destructor */
583 set_compound_order(page, order);
584 __SetPageHead(page);
585 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
586 __SetPageTail(p);
587 p->first_page = page;
588 }
589}
590
591int PageHuge(struct page *page)
592{
593 compound_page_dtor *dtor;
594
595 if (!PageCompound(page))
596 return 0;
597
598 page = compound_head(page);
599 dtor = get_compound_page_dtor(page);
600
601 return dtor == free_huge_page;
602}
603
Andi Kleena5516432008-07-23 21:27:41 -0700604static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 struct page *page;
Joe Jinf96efd52007-07-15 23:38:12 -0700607
Andi Kleenaa888a72008-07-23 21:27:47 -0700608 if (h->order >= MAX_ORDER)
609 return NULL;
610
Mel Gorman6484eb32009-06-16 15:31:54 -0700611 page = alloc_pages_exact_node(nid,
Nishanth Aravamudan551883a2008-04-29 00:58:26 -0700612 htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
613 __GFP_REPEAT|__GFP_NOWARN,
Andi Kleena5516432008-07-23 21:27:41 -0700614 huge_page_order(h));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 if (page) {
Gerald Schaefer7f2e9522008-04-28 02:13:29 -0700616 if (arch_prepare_hugepage(page)) {
Gerald Schaefercaff3a22008-08-12 15:08:38 -0700617 __free_pages(page, huge_page_order(h));
Harvey Harrison7b8ee842008-04-28 14:13:19 -0700618 return NULL;
Gerald Schaefer7f2e9522008-04-28 02:13:29 -0700619 }
Andi Kleena5516432008-07-23 21:27:41 -0700620 prep_new_huge_page(h, page, nid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 }
Nishanth Aravamudan63b46132007-10-16 01:26:24 -0700622
623 return page;
624}
625
Andi Kleen5ced66c2008-07-23 21:27:45 -0700626/*
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -0800627 * common helper functions for hstate_next_node_to_{alloc|free}.
628 * We may have allocated or freed a huge page based on a different
629 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
630 * be outside of *nodes_allowed. Ensure that we use an allowed
631 * node for alloc or free.
Lee Schermerhorn9a76db02009-12-14 17:58:15 -0800632 */
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -0800633static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
Lee Schermerhorn9a76db02009-12-14 17:58:15 -0800634{
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -0800635 nid = next_node(nid, *nodes_allowed);
Lee Schermerhorn9a76db02009-12-14 17:58:15 -0800636 if (nid == MAX_NUMNODES)
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -0800637 nid = first_node(*nodes_allowed);
Lee Schermerhorn9a76db02009-12-14 17:58:15 -0800638 VM_BUG_ON(nid >= MAX_NUMNODES);
639
640 return nid;
641}
642
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -0800643static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
Andi Kleen5ced66c2008-07-23 21:27:45 -0700644{
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -0800645 if (!node_isset(nid, *nodes_allowed))
646 nid = next_node_allowed(nid, nodes_allowed);
Lee Schermerhorn9a76db02009-12-14 17:58:15 -0800647 return nid;
Andi Kleen5ced66c2008-07-23 21:27:45 -0700648}
649
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -0800650/*
651 * returns the previously saved node ["this node"] from which to
652 * allocate a persistent huge page for the pool and advance the
653 * next node from which to allocate, handling wrap at end of node
654 * mask.
655 */
656static int hstate_next_node_to_alloc(struct hstate *h,
657 nodemask_t *nodes_allowed)
658{
659 int nid;
660
661 VM_BUG_ON(!nodes_allowed);
662
663 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
664 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
665
666 return nid;
667}
668
669static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
Nishanth Aravamudan63b46132007-10-16 01:26:24 -0700670{
671 struct page *page;
672 int start_nid;
673 int next_nid;
674 int ret = 0;
675
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -0800676 start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
Lee Schermerhorne8c5c822009-09-21 17:01:22 -0700677 next_nid = start_nid;
Nishanth Aravamudan63b46132007-10-16 01:26:24 -0700678
679 do {
Lee Schermerhorne8c5c822009-09-21 17:01:22 -0700680 page = alloc_fresh_huge_page_node(h, next_nid);
Lee Schermerhorn9a76db02009-12-14 17:58:15 -0800681 if (page) {
Nishanth Aravamudan63b46132007-10-16 01:26:24 -0700682 ret = 1;
Lee Schermerhorn9a76db02009-12-14 17:58:15 -0800683 break;
684 }
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -0800685 next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
Lee Schermerhorn9a76db02009-12-14 17:58:15 -0800686 } while (next_nid != start_nid);
Nishanth Aravamudan63b46132007-10-16 01:26:24 -0700687
Adam Litke3b116302008-04-28 02:13:06 -0700688 if (ret)
689 count_vm_event(HTLB_BUDDY_PGALLOC);
690 else
691 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
692
Nishanth Aravamudan63b46132007-10-16 01:26:24 -0700693 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694}
695
Lee Schermerhorne8c5c822009-09-21 17:01:22 -0700696/*
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -0800697 * helper for free_pool_huge_page() - return the previously saved
698 * node ["this node"] from which to free a huge page. Advance the
699 * next node id whether or not we find a free huge page to free so
700 * that the next attempt to free addresses the next node.
Lee Schermerhorne8c5c822009-09-21 17:01:22 -0700701 */
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -0800702static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
Lee Schermerhorne8c5c822009-09-21 17:01:22 -0700703{
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -0800704 int nid;
Lee Schermerhorn9a76db02009-12-14 17:58:15 -0800705
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -0800706 VM_BUG_ON(!nodes_allowed);
707
708 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
709 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
710
Lee Schermerhorn9a76db02009-12-14 17:58:15 -0800711 return nid;
Lee Schermerhorne8c5c822009-09-21 17:01:22 -0700712}
713
714/*
715 * Free huge page from pool from next node to free.
716 * Attempt to keep persistent huge pages more or less
717 * balanced over allowed nodes.
718 * Called with hugetlb_lock locked.
719 */
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -0800720static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
721 bool acct_surplus)
Lee Schermerhorne8c5c822009-09-21 17:01:22 -0700722{
723 int start_nid;
724 int next_nid;
725 int ret = 0;
726
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -0800727 start_nid = hstate_next_node_to_free(h, nodes_allowed);
Lee Schermerhorne8c5c822009-09-21 17:01:22 -0700728 next_nid = start_nid;
729
730 do {
Lee Schermerhorn685f3452009-09-21 17:01:23 -0700731 /*
732 * If we're returning unused surplus pages, only examine
733 * nodes with surplus pages.
734 */
735 if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
736 !list_empty(&h->hugepage_freelists[next_nid])) {
Lee Schermerhorne8c5c822009-09-21 17:01:22 -0700737 struct page *page =
738 list_entry(h->hugepage_freelists[next_nid].next,
739 struct page, lru);
740 list_del(&page->lru);
741 h->free_huge_pages--;
742 h->free_huge_pages_node[next_nid]--;
Lee Schermerhorn685f3452009-09-21 17:01:23 -0700743 if (acct_surplus) {
744 h->surplus_huge_pages--;
745 h->surplus_huge_pages_node[next_nid]--;
746 }
Lee Schermerhorne8c5c822009-09-21 17:01:22 -0700747 update_and_free_page(h, page);
748 ret = 1;
Lee Schermerhorn9a76db02009-12-14 17:58:15 -0800749 break;
Lee Schermerhorne8c5c822009-09-21 17:01:22 -0700750 }
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -0800751 next_nid = hstate_next_node_to_free(h, nodes_allowed);
Lee Schermerhorn9a76db02009-12-14 17:58:15 -0800752 } while (next_nid != start_nid);
Lee Schermerhorne8c5c822009-09-21 17:01:22 -0700753
754 return ret;
755}
756
Andi Kleena5516432008-07-23 21:27:41 -0700757static struct page *alloc_buddy_huge_page(struct hstate *h,
758 struct vm_area_struct *vma, unsigned long address)
Adam Litke7893d1d2007-10-16 01:26:18 -0700759{
760 struct page *page;
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -0800761 unsigned int nid;
Adam Litke7893d1d2007-10-16 01:26:18 -0700762
Andi Kleenaa888a72008-07-23 21:27:47 -0700763 if (h->order >= MAX_ORDER)
764 return NULL;
765
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -0800766 /*
767 * Assume we will successfully allocate the surplus page to
768 * prevent racing processes from causing the surplus to exceed
769 * overcommit
770 *
771 * This however introduces a different race, where a process B
772 * tries to grow the static hugepage pool while alloc_pages() is
773 * called by process A. B will only examine the per-node
774 * counters in determining if surplus huge pages can be
775 * converted to normal huge pages in adjust_pool_surplus(). A
776 * won't be able to increment the per-node counter, until the
777 * lock is dropped by B, but B doesn't drop hugetlb_lock until
778 * no more huge pages can be converted from surplus to normal
779 * state (and doesn't try to convert again). Thus, we have a
780 * case where a surplus huge page exists, the pool is grown, and
781 * the surplus huge page still exists after, even though it
782 * should just have been converted to a normal huge page. This
783 * does not leak memory, though, as the hugepage will be freed
784 * once it is out of use. It also does not allow the counters to
785 * go out of whack in adjust_pool_surplus() as we don't modify
786 * the node values until we've gotten the hugepage and only the
787 * per-node value is checked there.
788 */
789 spin_lock(&hugetlb_lock);
Andi Kleena5516432008-07-23 21:27:41 -0700790 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -0800791 spin_unlock(&hugetlb_lock);
792 return NULL;
793 } else {
Andi Kleena5516432008-07-23 21:27:41 -0700794 h->nr_huge_pages++;
795 h->surplus_huge_pages++;
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -0800796 }
797 spin_unlock(&hugetlb_lock);
798
Nishanth Aravamudan551883a2008-04-29 00:58:26 -0700799 page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
800 __GFP_REPEAT|__GFP_NOWARN,
Andi Kleena5516432008-07-23 21:27:41 -0700801 huge_page_order(h));
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -0800802
Gerald Schaefercaff3a22008-08-12 15:08:38 -0700803 if (page && arch_prepare_hugepage(page)) {
804 __free_pages(page, huge_page_order(h));
805 return NULL;
806 }
807
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -0800808 spin_lock(&hugetlb_lock);
Adam Litke7893d1d2007-10-16 01:26:18 -0700809 if (page) {
Adam Litke2668db92008-03-10 11:43:50 -0700810 /*
811 * This page is now managed by the hugetlb allocator and has
812 * no users -- drop the buddy allocator's reference.
813 */
814 put_page_testzero(page);
815 VM_BUG_ON(page_count(page));
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -0800816 nid = page_to_nid(page);
Adam Litke7893d1d2007-10-16 01:26:18 -0700817 set_compound_page_dtor(page, free_huge_page);
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -0800818 /*
819 * We incremented the global counters already
820 */
Andi Kleena5516432008-07-23 21:27:41 -0700821 h->nr_huge_pages_node[nid]++;
822 h->surplus_huge_pages_node[nid]++;
Adam Litke3b116302008-04-28 02:13:06 -0700823 __count_vm_event(HTLB_BUDDY_PGALLOC);
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -0800824 } else {
Andi Kleena5516432008-07-23 21:27:41 -0700825 h->nr_huge_pages--;
826 h->surplus_huge_pages--;
Adam Litke3b116302008-04-28 02:13:06 -0700827 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
Adam Litke7893d1d2007-10-16 01:26:18 -0700828 }
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -0800829 spin_unlock(&hugetlb_lock);
Adam Litke7893d1d2007-10-16 01:26:18 -0700830
831 return page;
832}
833
Adam Litkee4e574b2007-10-16 01:26:19 -0700834/*
835 * Increase the hugetlb pool such that it can accomodate a reservation
836 * of size 'delta'.
837 */
Andi Kleena5516432008-07-23 21:27:41 -0700838static int gather_surplus_pages(struct hstate *h, int delta)
Adam Litkee4e574b2007-10-16 01:26:19 -0700839{
840 struct list_head surplus_list;
841 struct page *page, *tmp;
842 int ret, i;
843 int needed, allocated;
844
Andi Kleena5516432008-07-23 21:27:41 -0700845 needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
Adam Litkeac09b3a2008-03-04 14:29:38 -0800846 if (needed <= 0) {
Andi Kleena5516432008-07-23 21:27:41 -0700847 h->resv_huge_pages += delta;
Adam Litkee4e574b2007-10-16 01:26:19 -0700848 return 0;
Adam Litkeac09b3a2008-03-04 14:29:38 -0800849 }
Adam Litkee4e574b2007-10-16 01:26:19 -0700850
851 allocated = 0;
852 INIT_LIST_HEAD(&surplus_list);
853
854 ret = -ENOMEM;
855retry:
856 spin_unlock(&hugetlb_lock);
857 for (i = 0; i < needed; i++) {
Andi Kleena5516432008-07-23 21:27:41 -0700858 page = alloc_buddy_huge_page(h, NULL, 0);
Adam Litkee4e574b2007-10-16 01:26:19 -0700859 if (!page) {
860 /*
861 * We were not able to allocate enough pages to
862 * satisfy the entire reservation so we free what
863 * we've allocated so far.
864 */
865 spin_lock(&hugetlb_lock);
866 needed = 0;
867 goto free;
868 }
869
870 list_add(&page->lru, &surplus_list);
871 }
872 allocated += needed;
873
874 /*
875 * After retaking hugetlb_lock, we need to recalculate 'needed'
876 * because either resv_huge_pages or free_huge_pages may have changed.
877 */
878 spin_lock(&hugetlb_lock);
Andi Kleena5516432008-07-23 21:27:41 -0700879 needed = (h->resv_huge_pages + delta) -
880 (h->free_huge_pages + allocated);
Adam Litkee4e574b2007-10-16 01:26:19 -0700881 if (needed > 0)
882 goto retry;
883
884 /*
885 * The surplus_list now contains _at_least_ the number of extra pages
886 * needed to accomodate the reservation. Add the appropriate number
887 * of pages to the hugetlb pool and free the extras back to the buddy
Adam Litkeac09b3a2008-03-04 14:29:38 -0800888 * allocator. Commit the entire reservation here to prevent another
889 * process from stealing the pages as they are added to the pool but
890 * before they are reserved.
Adam Litkee4e574b2007-10-16 01:26:19 -0700891 */
892 needed += allocated;
Andi Kleena5516432008-07-23 21:27:41 -0700893 h->resv_huge_pages += delta;
Adam Litkee4e574b2007-10-16 01:26:19 -0700894 ret = 0;
895free:
Adam Litke19fc3f02008-04-28 02:12:20 -0700896 /* Free the needed pages to the hugetlb pool */
Adam Litkee4e574b2007-10-16 01:26:19 -0700897 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
Adam Litke19fc3f02008-04-28 02:12:20 -0700898 if ((--needed) < 0)
899 break;
Adam Litkee4e574b2007-10-16 01:26:19 -0700900 list_del(&page->lru);
Andi Kleena5516432008-07-23 21:27:41 -0700901 enqueue_huge_page(h, page);
Adam Litke19fc3f02008-04-28 02:12:20 -0700902 }
903
904 /* Free unnecessary surplus pages to the buddy allocator */
905 if (!list_empty(&surplus_list)) {
906 spin_unlock(&hugetlb_lock);
907 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
908 list_del(&page->lru);
Adam Litkeaf767cb2007-10-16 01:26:25 -0700909 /*
Adam Litke2668db92008-03-10 11:43:50 -0700910 * The page has a reference count of zero already, so
911 * call free_huge_page directly instead of using
912 * put_page. This must be done with hugetlb_lock
Adam Litkeaf767cb2007-10-16 01:26:25 -0700913 * unlocked which is safe because free_huge_page takes
914 * hugetlb_lock before deciding how to free the page.
915 */
Adam Litke2668db92008-03-10 11:43:50 -0700916 free_huge_page(page);
Adam Litkeaf767cb2007-10-16 01:26:25 -0700917 }
Adam Litke19fc3f02008-04-28 02:12:20 -0700918 spin_lock(&hugetlb_lock);
Adam Litkee4e574b2007-10-16 01:26:19 -0700919 }
920
921 return ret;
922}
923
924/*
925 * When releasing a hugetlb pool reservation, any surplus pages that were
926 * allocated to satisfy the reservation must be explicitly freed if they were
927 * never used.
Lee Schermerhorn685f3452009-09-21 17:01:23 -0700928 * Called with hugetlb_lock held.
Adam Litkee4e574b2007-10-16 01:26:19 -0700929 */
Andi Kleena5516432008-07-23 21:27:41 -0700930static void return_unused_surplus_pages(struct hstate *h,
931 unsigned long unused_resv_pages)
Adam Litkee4e574b2007-10-16 01:26:19 -0700932{
Adam Litkee4e574b2007-10-16 01:26:19 -0700933 unsigned long nr_pages;
934
Adam Litkeac09b3a2008-03-04 14:29:38 -0800935 /* Uncommit the reservation */
Andi Kleena5516432008-07-23 21:27:41 -0700936 h->resv_huge_pages -= unused_resv_pages;
Adam Litkeac09b3a2008-03-04 14:29:38 -0800937
Andi Kleenaa888a72008-07-23 21:27:47 -0700938 /* Cannot return gigantic pages currently */
939 if (h->order >= MAX_ORDER)
940 return;
941
Andi Kleena5516432008-07-23 21:27:41 -0700942 nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
Adam Litkee4e574b2007-10-16 01:26:19 -0700943
Lee Schermerhorn685f3452009-09-21 17:01:23 -0700944 /*
945 * We want to release as many surplus pages as possible, spread
Lee Schermerhorn9b5e5d02009-12-14 17:58:32 -0800946 * evenly across all nodes with memory. Iterate across these nodes
947 * until we can no longer free unreserved surplus pages. This occurs
948 * when the nodes with surplus pages have no free pages.
949 * free_pool_huge_page() will balance the the freed pages across the
950 * on-line nodes with memory and will handle the hstate accounting.
Lee Schermerhorn685f3452009-09-21 17:01:23 -0700951 */
952 while (nr_pages--) {
Lee Schermerhorn9b5e5d02009-12-14 17:58:32 -0800953 if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1))
Lee Schermerhorn685f3452009-09-21 17:01:23 -0700954 break;
Adam Litkee4e574b2007-10-16 01:26:19 -0700955 }
956}
957
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -0700958/*
959 * Determine if the huge page at addr within the vma has an associated
960 * reservation. Where it does not we will need to logically increase
961 * reservation and actually increase quota before an allocation can occur.
962 * Where any new reservation would be required the reservation change is
963 * prepared, but not committed. Once the page has been quota'd allocated
964 * an instantiated the change should be committed via vma_commit_reservation.
965 * No action is required on failure.
966 */
Roel Kluine2f17d92009-03-31 15:23:15 -0700967static long vma_needs_reservation(struct hstate *h,
Andi Kleena5516432008-07-23 21:27:41 -0700968 struct vm_area_struct *vma, unsigned long addr)
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -0700969{
970 struct address_space *mapping = vma->vm_file->f_mapping;
971 struct inode *inode = mapping->host;
972
Mel Gormanf83a2752009-05-28 14:34:40 -0700973 if (vma->vm_flags & VM_MAYSHARE) {
Andi Kleena5516432008-07-23 21:27:41 -0700974 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -0700975 return region_chg(&inode->i_mapping->private_list,
976 idx, idx + 1);
977
Andy Whitcroft84afd992008-07-23 21:27:32 -0700978 } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
979 return 1;
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -0700980
Andy Whitcroft84afd992008-07-23 21:27:32 -0700981 } else {
Roel Kluine2f17d92009-03-31 15:23:15 -0700982 long err;
Andi Kleena5516432008-07-23 21:27:41 -0700983 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
Andy Whitcroft84afd992008-07-23 21:27:32 -0700984 struct resv_map *reservations = vma_resv_map(vma);
985
986 err = region_chg(&reservations->regions, idx, idx + 1);
987 if (err < 0)
988 return err;
989 return 0;
990 }
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -0700991}
Andi Kleena5516432008-07-23 21:27:41 -0700992static void vma_commit_reservation(struct hstate *h,
993 struct vm_area_struct *vma, unsigned long addr)
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -0700994{
995 struct address_space *mapping = vma->vm_file->f_mapping;
996 struct inode *inode = mapping->host;
997
Mel Gormanf83a2752009-05-28 14:34:40 -0700998 if (vma->vm_flags & VM_MAYSHARE) {
Andi Kleena5516432008-07-23 21:27:41 -0700999 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -07001000 region_add(&inode->i_mapping->private_list, idx, idx + 1);
Andy Whitcroft84afd992008-07-23 21:27:32 -07001001
1002 } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
Andi Kleena5516432008-07-23 21:27:41 -07001003 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
Andy Whitcroft84afd992008-07-23 21:27:32 -07001004 struct resv_map *reservations = vma_resv_map(vma);
1005
1006 /* Mark this page used in the map. */
1007 region_add(&reservations->regions, idx, idx + 1);
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -07001008 }
1009}
1010
David Gibson27a85ef2006-03-22 00:08:56 -08001011static struct page *alloc_huge_page(struct vm_area_struct *vma,
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001012 unsigned long addr, int avoid_reserve)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013{
Andi Kleena5516432008-07-23 21:27:41 -07001014 struct hstate *h = hstate_vma(vma);
Adam Litke348ea202007-11-14 16:59:37 -08001015 struct page *page;
Adam Litke2fc39ce2007-11-14 16:59:39 -08001016 struct address_space *mapping = vma->vm_file->f_mapping;
Mel Gormana1e78772008-07-23 21:27:23 -07001017 struct inode *inode = mapping->host;
Roel Kluine2f17d92009-03-31 15:23:15 -07001018 long chg;
Adam Litke2fc39ce2007-11-14 16:59:39 -08001019
Mel Gormana1e78772008-07-23 21:27:23 -07001020 /*
1021 * Processes that did not create the mapping will have no reserves and
1022 * will not have accounted against quota. Check that the quota can be
1023 * made before satisfying the allocation
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -07001024 * MAP_NORESERVE mappings may also need pages and quota allocated
1025 * if no reserve mapping overlaps.
Mel Gormana1e78772008-07-23 21:27:23 -07001026 */
Andi Kleena5516432008-07-23 21:27:41 -07001027 chg = vma_needs_reservation(h, vma, addr);
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -07001028 if (chg < 0)
1029 return ERR_PTR(chg);
1030 if (chg)
Mel Gormana1e78772008-07-23 21:27:23 -07001031 if (hugetlb_get_quota(inode->i_mapping, chg))
1032 return ERR_PTR(-ENOSPC);
Mel Gormana1e78772008-07-23 21:27:23 -07001033
1034 spin_lock(&hugetlb_lock);
Andi Kleena5516432008-07-23 21:27:41 -07001035 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
Mel Gormana1e78772008-07-23 21:27:23 -07001036 spin_unlock(&hugetlb_lock);
1037
1038 if (!page) {
Andi Kleena5516432008-07-23 21:27:41 -07001039 page = alloc_buddy_huge_page(h, vma, addr);
Mel Gormana1e78772008-07-23 21:27:23 -07001040 if (!page) {
1041 hugetlb_put_quota(inode->i_mapping, chg);
1042 return ERR_PTR(-VM_FAULT_OOM);
1043 }
1044 }
1045
1046 set_page_refcounted(page);
1047 set_page_private(page, (unsigned long) mapping);
1048
Andi Kleena5516432008-07-23 21:27:41 -07001049 vma_commit_reservation(h, vma, addr);
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -07001050
Adam Litke90d8b7e2007-11-14 16:59:42 -08001051 return page;
David Gibsonb45b5bd2006-03-22 00:08:55 -08001052}
1053
Cyrill Gorcunov91f47662009-01-06 14:40:33 -08001054int __weak alloc_bootmem_huge_page(struct hstate *h)
Andi Kleenaa888a72008-07-23 21:27:47 -07001055{
1056 struct huge_bootmem_page *m;
Lee Schermerhorn9b5e5d02009-12-14 17:58:32 -08001057 int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
Andi Kleenaa888a72008-07-23 21:27:47 -07001058
1059 while (nr_nodes) {
1060 void *addr;
1061
1062 addr = __alloc_bootmem_node_nopanic(
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -08001063 NODE_DATA(hstate_next_node_to_alloc(h,
Lee Schermerhorn9b5e5d02009-12-14 17:58:32 -08001064 &node_states[N_HIGH_MEMORY])),
Andi Kleenaa888a72008-07-23 21:27:47 -07001065 huge_page_size(h), huge_page_size(h), 0);
1066
1067 if (addr) {
1068 /*
1069 * Use the beginning of the huge page to store the
1070 * huge_bootmem_page struct (until gather_bootmem
1071 * puts them into the mem_map).
1072 */
1073 m = addr;
Cyrill Gorcunov91f47662009-01-06 14:40:33 -08001074 goto found;
Andi Kleenaa888a72008-07-23 21:27:47 -07001075 }
Andi Kleenaa888a72008-07-23 21:27:47 -07001076 nr_nodes--;
1077 }
1078 return 0;
1079
1080found:
1081 BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
1082 /* Put them into a private list first because mem_map is not up yet */
1083 list_add(&m->list, &huge_boot_pages);
1084 m->hstate = h;
1085 return 1;
1086}
1087
Andy Whitcroft18229df2008-11-06 12:53:27 -08001088static void prep_compound_huge_page(struct page *page, int order)
1089{
1090 if (unlikely(order > (MAX_ORDER - 1)))
1091 prep_compound_gigantic_page(page, order);
1092 else
1093 prep_compound_page(page, order);
1094}
1095
Andi Kleenaa888a72008-07-23 21:27:47 -07001096/* Put bootmem huge pages into the standard lists after mem_map is up */
1097static void __init gather_bootmem_prealloc(void)
1098{
1099 struct huge_bootmem_page *m;
1100
1101 list_for_each_entry(m, &huge_boot_pages, list) {
1102 struct page *page = virt_to_page(m);
1103 struct hstate *h = m->hstate;
1104 __ClearPageReserved(page);
1105 WARN_ON(page_count(page) != 1);
Andy Whitcroft18229df2008-11-06 12:53:27 -08001106 prep_compound_huge_page(page, h->order);
Andi Kleenaa888a72008-07-23 21:27:47 -07001107 prep_new_huge_page(h, page, page_to_nid(page));
1108 }
1109}
1110
Andi Kleen8faa8b02008-07-23 21:27:48 -07001111static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112{
1113 unsigned long i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114
Andi Kleene5ff2152008-07-23 21:27:42 -07001115 for (i = 0; i < h->max_huge_pages; ++i) {
Andi Kleenaa888a72008-07-23 21:27:47 -07001116 if (h->order >= MAX_ORDER) {
1117 if (!alloc_bootmem_huge_page(h))
1118 break;
Lee Schermerhorn9b5e5d02009-12-14 17:58:32 -08001119 } else if (!alloc_fresh_huge_page(h,
1120 &node_states[N_HIGH_MEMORY]))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 }
Andi Kleen8faa8b02008-07-23 21:27:48 -07001123 h->max_huge_pages = i;
Andi Kleene5ff2152008-07-23 21:27:42 -07001124}
1125
1126static void __init hugetlb_init_hstates(void)
1127{
1128 struct hstate *h;
1129
1130 for_each_hstate(h) {
Andi Kleen8faa8b02008-07-23 21:27:48 -07001131 /* oversize hugepages were init'ed in early boot */
1132 if (h->order < MAX_ORDER)
1133 hugetlb_hstate_alloc_pages(h);
Andi Kleene5ff2152008-07-23 21:27:42 -07001134 }
1135}
1136
Andi Kleen4abd32d2008-07-23 21:27:49 -07001137static char * __init memfmt(char *buf, unsigned long n)
1138{
1139 if (n >= (1UL << 30))
1140 sprintf(buf, "%lu GB", n >> 30);
1141 else if (n >= (1UL << 20))
1142 sprintf(buf, "%lu MB", n >> 20);
1143 else
1144 sprintf(buf, "%lu KB", n >> 10);
1145 return buf;
1146}
1147
Andi Kleene5ff2152008-07-23 21:27:42 -07001148static void __init report_hugepages(void)
1149{
1150 struct hstate *h;
1151
1152 for_each_hstate(h) {
Andi Kleen4abd32d2008-07-23 21:27:49 -07001153 char buf[32];
1154 printk(KERN_INFO "HugeTLB registered %s page size, "
1155 "pre-allocated %ld pages\n",
1156 memfmt(buf, huge_page_size(h)),
1157 h->free_huge_pages);
Andi Kleene5ff2152008-07-23 21:27:42 -07001158 }
1159}
1160
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161#ifdef CONFIG_HIGHMEM
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -08001162static void try_to_free_low(struct hstate *h, unsigned long count,
1163 nodemask_t *nodes_allowed)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164{
Christoph Lameter4415cc82006-09-25 23:31:55 -07001165 int i;
1166
Andi Kleenaa888a72008-07-23 21:27:47 -07001167 if (h->order >= MAX_ORDER)
1168 return;
1169
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -08001170 for_each_node_mask(i, *nodes_allowed) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 struct page *page, *next;
Andi Kleena5516432008-07-23 21:27:41 -07001172 struct list_head *freel = &h->hugepage_freelists[i];
1173 list_for_each_entry_safe(page, next, freel, lru) {
1174 if (count >= h->nr_huge_pages)
Adam Litke6b0c8802007-10-16 01:26:23 -07001175 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 if (PageHighMem(page))
1177 continue;
1178 list_del(&page->lru);
Andi Kleene5ff2152008-07-23 21:27:42 -07001179 update_and_free_page(h, page);
Andi Kleena5516432008-07-23 21:27:41 -07001180 h->free_huge_pages--;
1181 h->free_huge_pages_node[page_to_nid(page)]--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 }
1183 }
1184}
1185#else
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -08001186static inline void try_to_free_low(struct hstate *h, unsigned long count,
1187 nodemask_t *nodes_allowed)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188{
1189}
1190#endif
1191
Wu Fengguang20a03072009-06-16 15:32:22 -07001192/*
1193 * Increment or decrement surplus_huge_pages. Keep node-specific counters
1194 * balanced by operating on them in a round-robin fashion.
1195 * Returns 1 if an adjustment was made.
1196 */
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -08001197static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
1198 int delta)
Wu Fengguang20a03072009-06-16 15:32:22 -07001199{
Lee Schermerhorne8c5c822009-09-21 17:01:22 -07001200 int start_nid, next_nid;
Wu Fengguang20a03072009-06-16 15:32:22 -07001201 int ret = 0;
1202
1203 VM_BUG_ON(delta != -1 && delta != 1);
Wu Fengguang20a03072009-06-16 15:32:22 -07001204
Lee Schermerhorne8c5c822009-09-21 17:01:22 -07001205 if (delta < 0)
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -08001206 start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
Lee Schermerhorne8c5c822009-09-21 17:01:22 -07001207 else
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -08001208 start_nid = hstate_next_node_to_free(h, nodes_allowed);
Lee Schermerhorne8c5c822009-09-21 17:01:22 -07001209 next_nid = start_nid;
1210
1211 do {
1212 int nid = next_nid;
1213 if (delta < 0) {
Lee Schermerhorne8c5c822009-09-21 17:01:22 -07001214 /*
1215 * To shrink on this node, there must be a surplus page
1216 */
Lee Schermerhorn9a76db02009-12-14 17:58:15 -08001217 if (!h->surplus_huge_pages_node[nid]) {
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -08001218 next_nid = hstate_next_node_to_alloc(h,
1219 nodes_allowed);
Lee Schermerhorne8c5c822009-09-21 17:01:22 -07001220 continue;
Lee Schermerhorn9a76db02009-12-14 17:58:15 -08001221 }
Lee Schermerhorne8c5c822009-09-21 17:01:22 -07001222 }
1223 if (delta > 0) {
Lee Schermerhorne8c5c822009-09-21 17:01:22 -07001224 /*
1225 * Surplus cannot exceed the total number of pages
1226 */
1227 if (h->surplus_huge_pages_node[nid] >=
Lee Schermerhorn9a76db02009-12-14 17:58:15 -08001228 h->nr_huge_pages_node[nid]) {
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -08001229 next_nid = hstate_next_node_to_free(h,
1230 nodes_allowed);
Lee Schermerhorne8c5c822009-09-21 17:01:22 -07001231 continue;
Lee Schermerhorn9a76db02009-12-14 17:58:15 -08001232 }
Lee Schermerhorne8c5c822009-09-21 17:01:22 -07001233 }
Wu Fengguang20a03072009-06-16 15:32:22 -07001234
1235 h->surplus_huge_pages += delta;
1236 h->surplus_huge_pages_node[nid] += delta;
1237 ret = 1;
1238 break;
Lee Schermerhorne8c5c822009-09-21 17:01:22 -07001239 } while (next_nid != start_nid);
Wu Fengguang20a03072009-06-16 15:32:22 -07001240
Wu Fengguang20a03072009-06-16 15:32:22 -07001241 return ret;
1242}
1243
Andi Kleena5516432008-07-23 21:27:41 -07001244#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -08001245static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1246 nodemask_t *nodes_allowed)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247{
Adam Litke7893d1d2007-10-16 01:26:18 -07001248 unsigned long min_count, ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249
Andi Kleenaa888a72008-07-23 21:27:47 -07001250 if (h->order >= MAX_ORDER)
1251 return h->max_huge_pages;
1252
Adam Litke7893d1d2007-10-16 01:26:18 -07001253 /*
1254 * Increase the pool size
1255 * First take pages out of surplus state. Then make up the
1256 * remaining difference by allocating fresh huge pages.
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -08001257 *
1258 * We might race with alloc_buddy_huge_page() here and be unable
1259 * to convert a surplus huge page to a normal huge page. That is
1260 * not critical, though, it just means the overall size of the
1261 * pool might be one hugepage larger than it needs to be, but
1262 * within all the constraints specified by the sysctls.
Adam Litke7893d1d2007-10-16 01:26:18 -07001263 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 spin_lock(&hugetlb_lock);
Andi Kleena5516432008-07-23 21:27:41 -07001265 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -08001266 if (!adjust_pool_surplus(h, nodes_allowed, -1))
Adam Litke7893d1d2007-10-16 01:26:18 -07001267 break;
1268 }
1269
Andi Kleena5516432008-07-23 21:27:41 -07001270 while (count > persistent_huge_pages(h)) {
Adam Litke7893d1d2007-10-16 01:26:18 -07001271 /*
1272 * If this allocation races such that we no longer need the
1273 * page, free_huge_page will handle it by freeing the page
1274 * and reducing the surplus.
1275 */
1276 spin_unlock(&hugetlb_lock);
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -08001277 ret = alloc_fresh_huge_page(h, nodes_allowed);
Adam Litke7893d1d2007-10-16 01:26:18 -07001278 spin_lock(&hugetlb_lock);
1279 if (!ret)
1280 goto out;
1281
Mel Gorman536240f22009-12-14 17:59:56 -08001282 /* Bail for signals. Probably ctrl-c from user */
1283 if (signal_pending(current))
1284 goto out;
Adam Litke7893d1d2007-10-16 01:26:18 -07001285 }
Adam Litke7893d1d2007-10-16 01:26:18 -07001286
1287 /*
1288 * Decrease the pool size
1289 * First return free pages to the buddy allocator (being careful
1290 * to keep enough around to satisfy reservations). Then place
1291 * pages into surplus state as needed so the pool will shrink
1292 * to the desired size as pages become free.
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -08001293 *
1294 * By placing pages into the surplus state independent of the
1295 * overcommit value, we are allowing the surplus pool size to
1296 * exceed overcommit. There are few sane options here. Since
1297 * alloc_buddy_huge_page() is checking the global counter,
1298 * though, we'll note that we're not allowed to exceed surplus
1299 * and won't grow the pool anywhere else. Not until one of the
1300 * sysctls are changed, or the surplus pages go out of use.
Adam Litke7893d1d2007-10-16 01:26:18 -07001301 */
Andi Kleena5516432008-07-23 21:27:41 -07001302 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
Adam Litke6b0c8802007-10-16 01:26:23 -07001303 min_count = max(count, min_count);
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -08001304 try_to_free_low(h, min_count, nodes_allowed);
Andi Kleena5516432008-07-23 21:27:41 -07001305 while (min_count < persistent_huge_pages(h)) {
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -08001306 if (!free_pool_huge_page(h, nodes_allowed, 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308 }
Andi Kleena5516432008-07-23 21:27:41 -07001309 while (count < persistent_huge_pages(h)) {
Lee Schermerhorn6ae11b22009-12-14 17:58:16 -08001310 if (!adjust_pool_surplus(h, nodes_allowed, 1))
Adam Litke7893d1d2007-10-16 01:26:18 -07001311 break;
1312 }
1313out:
Andi Kleena5516432008-07-23 21:27:41 -07001314 ret = persistent_huge_pages(h);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 spin_unlock(&hugetlb_lock);
Adam Litke7893d1d2007-10-16 01:26:18 -07001316 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317}
1318
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001319#define HSTATE_ATTR_RO(_name) \
1320 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1321
1322#define HSTATE_ATTR(_name) \
1323 static struct kobj_attribute _name##_attr = \
1324 __ATTR(_name, 0644, _name##_show, _name##_store)
1325
1326static struct kobject *hugepages_kobj;
1327static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1328
Lee Schermerhorn9a305232009-12-14 17:58:25 -08001329static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
1330
1331static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001332{
1333 int i;
Lee Schermerhorn9a305232009-12-14 17:58:25 -08001334
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001335 for (i = 0; i < HUGE_MAX_HSTATE; i++)
Lee Schermerhorn9a305232009-12-14 17:58:25 -08001336 if (hstate_kobjs[i] == kobj) {
1337 if (nidp)
1338 *nidp = NUMA_NO_NODE;
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001339 return &hstates[i];
Lee Schermerhorn9a305232009-12-14 17:58:25 -08001340 }
1341
1342 return kobj_to_node_hstate(kobj, nidp);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001343}
1344
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001345static ssize_t nr_hugepages_show_common(struct kobject *kobj,
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001346 struct kobj_attribute *attr, char *buf)
1347{
Lee Schermerhorn9a305232009-12-14 17:58:25 -08001348 struct hstate *h;
1349 unsigned long nr_huge_pages;
1350 int nid;
1351
1352 h = kobj_to_hstate(kobj, &nid);
1353 if (nid == NUMA_NO_NODE)
1354 nr_huge_pages = h->nr_huge_pages;
1355 else
1356 nr_huge_pages = h->nr_huge_pages_node[nid];
1357
1358 return sprintf(buf, "%lu\n", nr_huge_pages);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001359}
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001360static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1361 struct kobject *kobj, struct kobj_attribute *attr,
1362 const char *buf, size_t len)
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001363{
1364 int err;
Lee Schermerhorn9a305232009-12-14 17:58:25 -08001365 int nid;
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001366 unsigned long count;
Lee Schermerhorn9a305232009-12-14 17:58:25 -08001367 struct hstate *h;
David Rientjesbad44b52009-12-14 17:58:38 -08001368 NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001369
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001370 err = strict_strtoul(buf, 10, &count);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001371 if (err)
1372 return 0;
1373
Lee Schermerhorn9a305232009-12-14 17:58:25 -08001374 h = kobj_to_hstate(kobj, &nid);
1375 if (nid == NUMA_NO_NODE) {
1376 /*
1377 * global hstate attribute
1378 */
1379 if (!(obey_mempolicy &&
1380 init_nodemask_of_mempolicy(nodes_allowed))) {
1381 NODEMASK_FREE(nodes_allowed);
1382 nodes_allowed = &node_states[N_HIGH_MEMORY];
1383 }
1384 } else if (nodes_allowed) {
1385 /*
1386 * per node hstate attribute: adjust count to global,
1387 * but restrict alloc/free to the specified node.
1388 */
1389 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
1390 init_nodemask_of_node(nodes_allowed, nid);
1391 } else
1392 nodes_allowed = &node_states[N_HIGH_MEMORY];
1393
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001394 h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001395
Lee Schermerhorn9b5e5d02009-12-14 17:58:32 -08001396 if (nodes_allowed != &node_states[N_HIGH_MEMORY])
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001397 NODEMASK_FREE(nodes_allowed);
1398
1399 return len;
1400}
1401
1402static ssize_t nr_hugepages_show(struct kobject *kobj,
1403 struct kobj_attribute *attr, char *buf)
1404{
1405 return nr_hugepages_show_common(kobj, attr, buf);
1406}
1407
1408static ssize_t nr_hugepages_store(struct kobject *kobj,
1409 struct kobj_attribute *attr, const char *buf, size_t len)
1410{
1411 return nr_hugepages_store_common(false, kobj, attr, buf, len);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001412}
1413HSTATE_ATTR(nr_hugepages);
1414
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001415#ifdef CONFIG_NUMA
1416
1417/*
1418 * hstate attribute for optionally mempolicy-based constraint on persistent
1419 * huge page alloc/free.
1420 */
1421static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
1422 struct kobj_attribute *attr, char *buf)
1423{
1424 return nr_hugepages_show_common(kobj, attr, buf);
1425}
1426
1427static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
1428 struct kobj_attribute *attr, const char *buf, size_t len)
1429{
1430 return nr_hugepages_store_common(true, kobj, attr, buf, len);
1431}
1432HSTATE_ATTR(nr_hugepages_mempolicy);
1433#endif
1434
1435
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001436static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1437 struct kobj_attribute *attr, char *buf)
1438{
Lee Schermerhorn9a305232009-12-14 17:58:25 -08001439 struct hstate *h = kobj_to_hstate(kobj, NULL);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001440 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1441}
1442static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1443 struct kobj_attribute *attr, const char *buf, size_t count)
1444{
1445 int err;
1446 unsigned long input;
Lee Schermerhorn9a305232009-12-14 17:58:25 -08001447 struct hstate *h = kobj_to_hstate(kobj, NULL);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001448
1449 err = strict_strtoul(buf, 10, &input);
1450 if (err)
1451 return 0;
1452
1453 spin_lock(&hugetlb_lock);
1454 h->nr_overcommit_huge_pages = input;
1455 spin_unlock(&hugetlb_lock);
1456
1457 return count;
1458}
1459HSTATE_ATTR(nr_overcommit_hugepages);
1460
1461static ssize_t free_hugepages_show(struct kobject *kobj,
1462 struct kobj_attribute *attr, char *buf)
1463{
Lee Schermerhorn9a305232009-12-14 17:58:25 -08001464 struct hstate *h;
1465 unsigned long free_huge_pages;
1466 int nid;
1467
1468 h = kobj_to_hstate(kobj, &nid);
1469 if (nid == NUMA_NO_NODE)
1470 free_huge_pages = h->free_huge_pages;
1471 else
1472 free_huge_pages = h->free_huge_pages_node[nid];
1473
1474 return sprintf(buf, "%lu\n", free_huge_pages);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001475}
1476HSTATE_ATTR_RO(free_hugepages);
1477
1478static ssize_t resv_hugepages_show(struct kobject *kobj,
1479 struct kobj_attribute *attr, char *buf)
1480{
Lee Schermerhorn9a305232009-12-14 17:58:25 -08001481 struct hstate *h = kobj_to_hstate(kobj, NULL);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001482 return sprintf(buf, "%lu\n", h->resv_huge_pages);
1483}
1484HSTATE_ATTR_RO(resv_hugepages);
1485
1486static ssize_t surplus_hugepages_show(struct kobject *kobj,
1487 struct kobj_attribute *attr, char *buf)
1488{
Lee Schermerhorn9a305232009-12-14 17:58:25 -08001489 struct hstate *h;
1490 unsigned long surplus_huge_pages;
1491 int nid;
1492
1493 h = kobj_to_hstate(kobj, &nid);
1494 if (nid == NUMA_NO_NODE)
1495 surplus_huge_pages = h->surplus_huge_pages;
1496 else
1497 surplus_huge_pages = h->surplus_huge_pages_node[nid];
1498
1499 return sprintf(buf, "%lu\n", surplus_huge_pages);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001500}
1501HSTATE_ATTR_RO(surplus_hugepages);
1502
1503static struct attribute *hstate_attrs[] = {
1504 &nr_hugepages_attr.attr,
1505 &nr_overcommit_hugepages_attr.attr,
1506 &free_hugepages_attr.attr,
1507 &resv_hugepages_attr.attr,
1508 &surplus_hugepages_attr.attr,
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001509#ifdef CONFIG_NUMA
1510 &nr_hugepages_mempolicy_attr.attr,
1511#endif
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001512 NULL,
1513};
1514
1515static struct attribute_group hstate_attr_group = {
1516 .attrs = hstate_attrs,
1517};
1518
Jeff Mahoney094e9532010-02-02 13:44:14 -08001519static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
1520 struct kobject **hstate_kobjs,
1521 struct attribute_group *hstate_attr_group)
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001522{
1523 int retval;
Lee Schermerhorn9a305232009-12-14 17:58:25 -08001524 int hi = h - hstates;
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001525
Lee Schermerhorn9a305232009-12-14 17:58:25 -08001526 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
1527 if (!hstate_kobjs[hi])
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001528 return -ENOMEM;
1529
Lee Schermerhorn9a305232009-12-14 17:58:25 -08001530 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001531 if (retval)
Lee Schermerhorn9a305232009-12-14 17:58:25 -08001532 kobject_put(hstate_kobjs[hi]);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001533
1534 return retval;
1535}
1536
1537static void __init hugetlb_sysfs_init(void)
1538{
1539 struct hstate *h;
1540 int err;
1541
1542 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
1543 if (!hugepages_kobj)
1544 return;
1545
1546 for_each_hstate(h) {
Lee Schermerhorn9a305232009-12-14 17:58:25 -08001547 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
1548 hstate_kobjs, &hstate_attr_group);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001549 if (err)
1550 printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
1551 h->name);
1552 }
1553}
1554
Lee Schermerhorn9a305232009-12-14 17:58:25 -08001555#ifdef CONFIG_NUMA
1556
1557/*
1558 * node_hstate/s - associate per node hstate attributes, via their kobjects,
1559 * with node sysdevs in node_devices[] using a parallel array. The array
1560 * index of a node sysdev or _hstate == node id.
1561 * This is here to avoid any static dependency of the node sysdev driver, in
1562 * the base kernel, on the hugetlb module.
1563 */
1564struct node_hstate {
1565 struct kobject *hugepages_kobj;
1566 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1567};
1568struct node_hstate node_hstates[MAX_NUMNODES];
1569
1570/*
1571 * A subset of global hstate attributes for node sysdevs
1572 */
1573static struct attribute *per_node_hstate_attrs[] = {
1574 &nr_hugepages_attr.attr,
1575 &free_hugepages_attr.attr,
1576 &surplus_hugepages_attr.attr,
1577 NULL,
1578};
1579
1580static struct attribute_group per_node_hstate_attr_group = {
1581 .attrs = per_node_hstate_attrs,
1582};
1583
1584/*
1585 * kobj_to_node_hstate - lookup global hstate for node sysdev hstate attr kobj.
1586 * Returns node id via non-NULL nidp.
1587 */
1588static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1589{
1590 int nid;
1591
1592 for (nid = 0; nid < nr_node_ids; nid++) {
1593 struct node_hstate *nhs = &node_hstates[nid];
1594 int i;
1595 for (i = 0; i < HUGE_MAX_HSTATE; i++)
1596 if (nhs->hstate_kobjs[i] == kobj) {
1597 if (nidp)
1598 *nidp = nid;
1599 return &hstates[i];
1600 }
1601 }
1602
1603 BUG();
1604 return NULL;
1605}
1606
1607/*
1608 * Unregister hstate attributes from a single node sysdev.
1609 * No-op if no hstate attributes attached.
1610 */
1611void hugetlb_unregister_node(struct node *node)
1612{
1613 struct hstate *h;
1614 struct node_hstate *nhs = &node_hstates[node->sysdev.id];
1615
1616 if (!nhs->hugepages_kobj)
Lee Schermerhorn9b5e5d02009-12-14 17:58:32 -08001617 return; /* no hstate attributes */
Lee Schermerhorn9a305232009-12-14 17:58:25 -08001618
1619 for_each_hstate(h)
1620 if (nhs->hstate_kobjs[h - hstates]) {
1621 kobject_put(nhs->hstate_kobjs[h - hstates]);
1622 nhs->hstate_kobjs[h - hstates] = NULL;
1623 }
1624
1625 kobject_put(nhs->hugepages_kobj);
1626 nhs->hugepages_kobj = NULL;
1627}
1628
1629/*
1630 * hugetlb module exit: unregister hstate attributes from node sysdevs
1631 * that have them.
1632 */
1633static void hugetlb_unregister_all_nodes(void)
1634{
1635 int nid;
1636
1637 /*
1638 * disable node sysdev registrations.
1639 */
1640 register_hugetlbfs_with_node(NULL, NULL);
1641
1642 /*
1643 * remove hstate attributes from any nodes that have them.
1644 */
1645 for (nid = 0; nid < nr_node_ids; nid++)
1646 hugetlb_unregister_node(&node_devices[nid]);
1647}
1648
1649/*
1650 * Register hstate attributes for a single node sysdev.
1651 * No-op if attributes already registered.
1652 */
1653void hugetlb_register_node(struct node *node)
1654{
1655 struct hstate *h;
1656 struct node_hstate *nhs = &node_hstates[node->sysdev.id];
1657 int err;
1658
1659 if (nhs->hugepages_kobj)
1660 return; /* already allocated */
1661
1662 nhs->hugepages_kobj = kobject_create_and_add("hugepages",
1663 &node->sysdev.kobj);
1664 if (!nhs->hugepages_kobj)
1665 return;
1666
1667 for_each_hstate(h) {
1668 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
1669 nhs->hstate_kobjs,
1670 &per_node_hstate_attr_group);
1671 if (err) {
1672 printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
1673 " for node %d\n",
1674 h->name, node->sysdev.id);
1675 hugetlb_unregister_node(node);
1676 break;
1677 }
1678 }
1679}
1680
1681/*
Lee Schermerhorn9b5e5d02009-12-14 17:58:32 -08001682 * hugetlb init time: register hstate attributes for all registered node
1683 * sysdevs of nodes that have memory. All on-line nodes should have
1684 * registered their associated sysdev by this time.
Lee Schermerhorn9a305232009-12-14 17:58:25 -08001685 */
1686static void hugetlb_register_all_nodes(void)
1687{
1688 int nid;
1689
Lee Schermerhorn9b5e5d02009-12-14 17:58:32 -08001690 for_each_node_state(nid, N_HIGH_MEMORY) {
Lee Schermerhorn9a305232009-12-14 17:58:25 -08001691 struct node *node = &node_devices[nid];
1692 if (node->sysdev.id == nid)
1693 hugetlb_register_node(node);
1694 }
1695
1696 /*
1697 * Let the node sysdev driver know we're here so it can
1698 * [un]register hstate attributes on node hotplug.
1699 */
1700 register_hugetlbfs_with_node(hugetlb_register_node,
1701 hugetlb_unregister_node);
1702}
1703#else /* !CONFIG_NUMA */
1704
1705static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1706{
1707 BUG();
1708 if (nidp)
1709 *nidp = -1;
1710 return NULL;
1711}
1712
1713static void hugetlb_unregister_all_nodes(void) { }
1714
1715static void hugetlb_register_all_nodes(void) { }
1716
1717#endif
1718
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001719static void __exit hugetlb_exit(void)
1720{
1721 struct hstate *h;
1722
Lee Schermerhorn9a305232009-12-14 17:58:25 -08001723 hugetlb_unregister_all_nodes();
1724
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001725 for_each_hstate(h) {
1726 kobject_put(hstate_kobjs[h - hstates]);
1727 }
1728
1729 kobject_put(hugepages_kobj);
1730}
1731module_exit(hugetlb_exit);
1732
1733static int __init hugetlb_init(void)
1734{
Benjamin Herrenschmidt0ef89d22008-07-31 00:07:30 -07001735 /* Some platform decide whether they support huge pages at boot
1736 * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
1737 * there is no such support
1738 */
1739 if (HPAGE_SHIFT == 0)
1740 return 0;
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001741
Nick Piggine11bfbf2008-07-23 21:27:52 -07001742 if (!size_to_hstate(default_hstate_size)) {
1743 default_hstate_size = HPAGE_SIZE;
1744 if (!size_to_hstate(default_hstate_size))
1745 hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001746 }
Nick Piggine11bfbf2008-07-23 21:27:52 -07001747 default_hstate_idx = size_to_hstate(default_hstate_size) - hstates;
1748 if (default_hstate_max_huge_pages)
1749 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001750
1751 hugetlb_init_hstates();
1752
Andi Kleenaa888a72008-07-23 21:27:47 -07001753 gather_bootmem_prealloc();
1754
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001755 report_hugepages();
1756
1757 hugetlb_sysfs_init();
1758
Lee Schermerhorn9a305232009-12-14 17:58:25 -08001759 hugetlb_register_all_nodes();
1760
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001761 return 0;
1762}
1763module_init(hugetlb_init);
1764
1765/* Should be called on processing a hugepagesz=... option */
1766void __init hugetlb_add_hstate(unsigned order)
1767{
1768 struct hstate *h;
Andi Kleen8faa8b02008-07-23 21:27:48 -07001769 unsigned long i;
1770
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001771 if (size_to_hstate(PAGE_SIZE << order)) {
1772 printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
1773 return;
1774 }
1775 BUG_ON(max_hstate >= HUGE_MAX_HSTATE);
1776 BUG_ON(order == 0);
1777 h = &hstates[max_hstate++];
1778 h->order = order;
1779 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
Andi Kleen8faa8b02008-07-23 21:27:48 -07001780 h->nr_huge_pages = 0;
1781 h->free_huge_pages = 0;
1782 for (i = 0; i < MAX_NUMNODES; ++i)
1783 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
Lee Schermerhorn9b5e5d02009-12-14 17:58:32 -08001784 h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]);
1785 h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001786 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
1787 huge_page_size(h)/1024);
Andi Kleen8faa8b02008-07-23 21:27:48 -07001788
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001789 parsed_hstate = h;
1790}
1791
Nick Piggine11bfbf2008-07-23 21:27:52 -07001792static int __init hugetlb_nrpages_setup(char *s)
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001793{
1794 unsigned long *mhp;
Andi Kleen8faa8b02008-07-23 21:27:48 -07001795 static unsigned long *last_mhp;
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001796
1797 /*
1798 * !max_hstate means we haven't parsed a hugepagesz= parameter yet,
1799 * so this hugepages= parameter goes to the "default hstate".
1800 */
1801 if (!max_hstate)
1802 mhp = &default_hstate_max_huge_pages;
1803 else
1804 mhp = &parsed_hstate->max_huge_pages;
1805
Andi Kleen8faa8b02008-07-23 21:27:48 -07001806 if (mhp == last_mhp) {
1807 printk(KERN_WARNING "hugepages= specified twice without "
1808 "interleaving hugepagesz=, ignoring\n");
1809 return 1;
1810 }
1811
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001812 if (sscanf(s, "%lu", mhp) <= 0)
1813 *mhp = 0;
1814
Andi Kleen8faa8b02008-07-23 21:27:48 -07001815 /*
1816 * Global state is always initialized later in hugetlb_init.
1817 * But we need to allocate >= MAX_ORDER hstates here early to still
1818 * use the bootmem allocator.
1819 */
1820 if (max_hstate && parsed_hstate->order >= MAX_ORDER)
1821 hugetlb_hstate_alloc_pages(parsed_hstate);
1822
1823 last_mhp = mhp;
1824
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001825 return 1;
1826}
Nick Piggine11bfbf2008-07-23 21:27:52 -07001827__setup("hugepages=", hugetlb_nrpages_setup);
1828
1829static int __init hugetlb_default_setup(char *s)
1830{
1831 default_hstate_size = memparse(s, &s);
1832 return 1;
1833}
1834__setup("default_hugepagesz=", hugetlb_default_setup);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001835
Nishanth Aravamudan8a213462008-07-25 19:44:37 -07001836static unsigned int cpuset_mems_nr(unsigned int *array)
1837{
1838 int node;
1839 unsigned int nr = 0;
1840
1841 for_each_node_mask(node, cpuset_current_mems_allowed)
1842 nr += array[node];
1843
1844 return nr;
1845}
1846
1847#ifdef CONFIG_SYSCTL
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001848static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
1849 struct ctl_table *table, int write,
1850 void __user *buffer, size_t *length, loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851{
Andi Kleene5ff2152008-07-23 21:27:42 -07001852 struct hstate *h = &default_hstate;
1853 unsigned long tmp;
1854
1855 if (!write)
1856 tmp = h->max_huge_pages;
1857
1858 table->data = &tmp;
1859 table->maxlen = sizeof(unsigned long);
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07001860 proc_doulongvec_minmax(table, write, buffer, length, ppos);
Andi Kleene5ff2152008-07-23 21:27:42 -07001861
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001862 if (write) {
David Rientjesbad44b52009-12-14 17:58:38 -08001863 NODEMASK_ALLOC(nodemask_t, nodes_allowed,
1864 GFP_KERNEL | __GFP_NORETRY);
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001865 if (!(obey_mempolicy &&
1866 init_nodemask_of_mempolicy(nodes_allowed))) {
1867 NODEMASK_FREE(nodes_allowed);
1868 nodes_allowed = &node_states[N_HIGH_MEMORY];
1869 }
1870 h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
1871
1872 if (nodes_allowed != &node_states[N_HIGH_MEMORY])
1873 NODEMASK_FREE(nodes_allowed);
1874 }
Andi Kleene5ff2152008-07-23 21:27:42 -07001875
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876 return 0;
1877}
Mel Gorman396faf02007-07-17 04:03:13 -07001878
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001879int hugetlb_sysctl_handler(struct ctl_table *table, int write,
1880 void __user *buffer, size_t *length, loff_t *ppos)
1881{
1882
1883 return hugetlb_sysctl_handler_common(false, table, write,
1884 buffer, length, ppos);
1885}
1886
1887#ifdef CONFIG_NUMA
1888int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
1889 void __user *buffer, size_t *length, loff_t *ppos)
1890{
1891 return hugetlb_sysctl_handler_common(true, table, write,
1892 buffer, length, ppos);
1893}
1894#endif /* CONFIG_NUMA */
1895
Mel Gorman396faf02007-07-17 04:03:13 -07001896int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07001897 void __user *buffer,
Mel Gorman396faf02007-07-17 04:03:13 -07001898 size_t *length, loff_t *ppos)
1899{
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07001900 proc_dointvec(table, write, buffer, length, ppos);
Mel Gorman396faf02007-07-17 04:03:13 -07001901 if (hugepages_treat_as_movable)
1902 htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
1903 else
1904 htlb_alloc_mask = GFP_HIGHUSER;
1905 return 0;
1906}
1907
Nishanth Aravamudana3d0c6a2008-02-08 04:18:18 -08001908int hugetlb_overcommit_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07001909 void __user *buffer,
Nishanth Aravamudana3d0c6a2008-02-08 04:18:18 -08001910 size_t *length, loff_t *ppos)
1911{
Andi Kleena5516432008-07-23 21:27:41 -07001912 struct hstate *h = &default_hstate;
Andi Kleene5ff2152008-07-23 21:27:42 -07001913 unsigned long tmp;
1914
1915 if (!write)
1916 tmp = h->nr_overcommit_huge_pages;
1917
1918 table->data = &tmp;
1919 table->maxlen = sizeof(unsigned long);
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07001920 proc_doulongvec_minmax(table, write, buffer, length, ppos);
Andi Kleene5ff2152008-07-23 21:27:42 -07001921
1922 if (write) {
1923 spin_lock(&hugetlb_lock);
1924 h->nr_overcommit_huge_pages = tmp;
1925 spin_unlock(&hugetlb_lock);
1926 }
1927
Nishanth Aravamudana3d0c6a2008-02-08 04:18:18 -08001928 return 0;
1929}
1930
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931#endif /* CONFIG_SYSCTL */
1932
Alexey Dobriyane1759c22008-10-15 23:50:22 +04001933void hugetlb_report_meminfo(struct seq_file *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934{
Andi Kleena5516432008-07-23 21:27:41 -07001935 struct hstate *h = &default_hstate;
Alexey Dobriyane1759c22008-10-15 23:50:22 +04001936 seq_printf(m,
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001937 "HugePages_Total: %5lu\n"
1938 "HugePages_Free: %5lu\n"
1939 "HugePages_Rsvd: %5lu\n"
1940 "HugePages_Surp: %5lu\n"
1941 "Hugepagesize: %8lu kB\n",
Andi Kleena5516432008-07-23 21:27:41 -07001942 h->nr_huge_pages,
1943 h->free_huge_pages,
1944 h->resv_huge_pages,
1945 h->surplus_huge_pages,
1946 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947}
1948
1949int hugetlb_report_node_meminfo(int nid, char *buf)
1950{
Andi Kleena5516432008-07-23 21:27:41 -07001951 struct hstate *h = &default_hstate;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952 return sprintf(buf,
1953 "Node %d HugePages_Total: %5u\n"
Nishanth Aravamudana1de0912008-03-26 14:37:53 -07001954 "Node %d HugePages_Free: %5u\n"
1955 "Node %d HugePages_Surp: %5u\n",
Andi Kleena5516432008-07-23 21:27:41 -07001956 nid, h->nr_huge_pages_node[nid],
1957 nid, h->free_huge_pages_node[nid],
1958 nid, h->surplus_huge_pages_node[nid]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959}
1960
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
1962unsigned long hugetlb_total_pages(void)
1963{
Andi Kleena5516432008-07-23 21:27:41 -07001964 struct hstate *h = &default_hstate;
1965 return h->nr_huge_pages * pages_per_huge_page(h);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967
Andi Kleena5516432008-07-23 21:27:41 -07001968static int hugetlb_acct_memory(struct hstate *h, long delta)
Mel Gormanfc1b8a72008-07-23 21:27:22 -07001969{
1970 int ret = -ENOMEM;
1971
1972 spin_lock(&hugetlb_lock);
1973 /*
1974 * When cpuset is configured, it breaks the strict hugetlb page
1975 * reservation as the accounting is done on a global variable. Such
1976 * reservation is completely rubbish in the presence of cpuset because
1977 * the reservation is not checked against page availability for the
1978 * current cpuset. Application can still potentially OOM'ed by kernel
1979 * with lack of free htlb page in cpuset that the task is in.
1980 * Attempt to enforce strict accounting with cpuset is almost
1981 * impossible (or too ugly) because cpuset is too fluid that
1982 * task or memory node can be dynamically moved between cpusets.
1983 *
1984 * The change of semantics for shared hugetlb mapping with cpuset is
1985 * undesirable. However, in order to preserve some of the semantics,
1986 * we fall back to check against current free page availability as
1987 * a best attempt and hopefully to minimize the impact of changing
1988 * semantics that cpuset has.
1989 */
1990 if (delta > 0) {
Andi Kleena5516432008-07-23 21:27:41 -07001991 if (gather_surplus_pages(h, delta) < 0)
Mel Gormanfc1b8a72008-07-23 21:27:22 -07001992 goto out;
1993
Andi Kleena5516432008-07-23 21:27:41 -07001994 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
1995 return_unused_surplus_pages(h, delta);
Mel Gormanfc1b8a72008-07-23 21:27:22 -07001996 goto out;
1997 }
1998 }
1999
2000 ret = 0;
2001 if (delta < 0)
Andi Kleena5516432008-07-23 21:27:41 -07002002 return_unused_surplus_pages(h, (unsigned long) -delta);
Mel Gormanfc1b8a72008-07-23 21:27:22 -07002003
2004out:
2005 spin_unlock(&hugetlb_lock);
2006 return ret;
2007}
2008
Andy Whitcroft84afd992008-07-23 21:27:32 -07002009static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2010{
2011 struct resv_map *reservations = vma_resv_map(vma);
2012
2013 /*
2014 * This new VMA should share its siblings reservation map if present.
2015 * The VMA will only ever have a valid reservation map pointer where
2016 * it is being copied for another still existing VMA. As that VMA
2017 * has a reference to the reservation map it cannot dissappear until
2018 * after this open call completes. It is therefore safe to take a
2019 * new reference here without additional locking.
2020 */
2021 if (reservations)
2022 kref_get(&reservations->refs);
2023}
2024
Mel Gormana1e78772008-07-23 21:27:23 -07002025static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2026{
Andi Kleena5516432008-07-23 21:27:41 -07002027 struct hstate *h = hstate_vma(vma);
Andy Whitcroft84afd992008-07-23 21:27:32 -07002028 struct resv_map *reservations = vma_resv_map(vma);
2029 unsigned long reserve;
2030 unsigned long start;
2031 unsigned long end;
2032
2033 if (reservations) {
Andi Kleena5516432008-07-23 21:27:41 -07002034 start = vma_hugecache_offset(h, vma, vma->vm_start);
2035 end = vma_hugecache_offset(h, vma, vma->vm_end);
Andy Whitcroft84afd992008-07-23 21:27:32 -07002036
2037 reserve = (end - start) -
2038 region_count(&reservations->regions, start, end);
2039
2040 kref_put(&reservations->refs, resv_map_release);
2041
Adam Litke7251ff72008-07-23 21:27:59 -07002042 if (reserve) {
Andi Kleena5516432008-07-23 21:27:41 -07002043 hugetlb_acct_memory(h, -reserve);
Adam Litke7251ff72008-07-23 21:27:59 -07002044 hugetlb_put_quota(vma->vm_file->f_mapping, reserve);
2045 }
Andy Whitcroft84afd992008-07-23 21:27:32 -07002046 }
Mel Gormana1e78772008-07-23 21:27:23 -07002047}
2048
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049/*
2050 * We cannot handle pagefaults against hugetlb pages at all. They cause
2051 * handle_mm_fault() to try to instantiate regular-sized pages in the
2052 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
2053 * this far.
2054 */
Nick Piggind0217ac2007-07-19 01:47:03 -07002055static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056{
2057 BUG();
Nick Piggind0217ac2007-07-19 01:47:03 -07002058 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059}
2060
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +04002061const struct vm_operations_struct hugetlb_vm_ops = {
Nick Piggind0217ac2007-07-19 01:47:03 -07002062 .fault = hugetlb_vm_op_fault,
Andy Whitcroft84afd992008-07-23 21:27:32 -07002063 .open = hugetlb_vm_op_open,
Mel Gormana1e78772008-07-23 21:27:23 -07002064 .close = hugetlb_vm_op_close,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065};
2066
David Gibson1e8f8892006-01-06 00:10:44 -08002067static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2068 int writable)
David Gibson63551ae2005-06-21 17:14:44 -07002069{
2070 pte_t entry;
2071
David Gibson1e8f8892006-01-06 00:10:44 -08002072 if (writable) {
David Gibson63551ae2005-06-21 17:14:44 -07002073 entry =
2074 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
2075 } else {
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07002076 entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
David Gibson63551ae2005-06-21 17:14:44 -07002077 }
2078 entry = pte_mkyoung(entry);
2079 entry = pte_mkhuge(entry);
2080
2081 return entry;
2082}
2083
David Gibson1e8f8892006-01-06 00:10:44 -08002084static void set_huge_ptep_writable(struct vm_area_struct *vma,
2085 unsigned long address, pte_t *ptep)
2086{
2087 pte_t entry;
2088
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07002089 entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
2090 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
Russell King4b3073e2009-12-18 16:40:18 +00002091 update_mmu_cache(vma, address, ptep);
Benjamin Herrenschmidt8dab5242007-06-16 10:16:12 -07002092 }
David Gibson1e8f8892006-01-06 00:10:44 -08002093}
2094
2095
David Gibson63551ae2005-06-21 17:14:44 -07002096int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2097 struct vm_area_struct *vma)
2098{
2099 pte_t *src_pte, *dst_pte, entry;
2100 struct page *ptepage;
Hugh Dickins1c598272005-10-19 21:23:43 -07002101 unsigned long addr;
David Gibson1e8f8892006-01-06 00:10:44 -08002102 int cow;
Andi Kleena5516432008-07-23 21:27:41 -07002103 struct hstate *h = hstate_vma(vma);
2104 unsigned long sz = huge_page_size(h);
David Gibson1e8f8892006-01-06 00:10:44 -08002105
2106 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
David Gibson63551ae2005-06-21 17:14:44 -07002107
Andi Kleena5516432008-07-23 21:27:41 -07002108 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
Hugh Dickinsc74df322005-10-29 18:16:23 -07002109 src_pte = huge_pte_offset(src, addr);
2110 if (!src_pte)
2111 continue;
Andi Kleena5516432008-07-23 21:27:41 -07002112 dst_pte = huge_pte_alloc(dst, addr, sz);
David Gibson63551ae2005-06-21 17:14:44 -07002113 if (!dst_pte)
2114 goto nomem;
Larry Woodmanc5c99422008-01-24 05:49:25 -08002115
2116 /* If the pagetables are shared don't copy or take references */
2117 if (dst_pte == src_pte)
2118 continue;
2119
Hugh Dickinsc74df322005-10-29 18:16:23 -07002120 spin_lock(&dst->page_table_lock);
Nick Piggin46478752008-06-05 22:45:57 -07002121 spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07002122 if (!huge_pte_none(huge_ptep_get(src_pte))) {
David Gibson1e8f8892006-01-06 00:10:44 -08002123 if (cow)
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07002124 huge_ptep_set_wrprotect(src, addr, src_pte);
2125 entry = huge_ptep_get(src_pte);
Hugh Dickins1c598272005-10-19 21:23:43 -07002126 ptepage = pte_page(entry);
2127 get_page(ptepage);
Hugh Dickins1c598272005-10-19 21:23:43 -07002128 set_huge_pte_at(dst, addr, dst_pte, entry);
2129 }
2130 spin_unlock(&src->page_table_lock);
Hugh Dickinsc74df322005-10-29 18:16:23 -07002131 spin_unlock(&dst->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -07002132 }
2133 return 0;
2134
2135nomem:
2136 return -ENOMEM;
2137}
2138
Chen, Kenneth W502717f2006-10-11 01:20:46 -07002139void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
Mel Gorman04f2cbe2008-07-23 21:27:25 -07002140 unsigned long end, struct page *ref_page)
David Gibson63551ae2005-06-21 17:14:44 -07002141{
2142 struct mm_struct *mm = vma->vm_mm;
2143 unsigned long address;
David Gibsonc7546f82005-08-05 11:59:35 -07002144 pte_t *ptep;
David Gibson63551ae2005-06-21 17:14:44 -07002145 pte_t pte;
2146 struct page *page;
Chen, Kenneth Wfe1668a2006-10-04 02:15:24 -07002147 struct page *tmp;
Andi Kleena5516432008-07-23 21:27:41 -07002148 struct hstate *h = hstate_vma(vma);
2149 unsigned long sz = huge_page_size(h);
2150
Chen, Kenneth Wc0a499c2006-12-06 20:31:39 -08002151 /*
2152 * A page gathering list, protected by per file i_mmap_lock. The
2153 * lock is used to avoid list corruption from multiple unmapping
2154 * of the same page since we are using page->lru.
2155 */
Chen, Kenneth Wfe1668a2006-10-04 02:15:24 -07002156 LIST_HEAD(page_list);
David Gibson63551ae2005-06-21 17:14:44 -07002157
2158 WARN_ON(!is_vm_hugetlb_page(vma));
Andi Kleena5516432008-07-23 21:27:41 -07002159 BUG_ON(start & ~huge_page_mask(h));
2160 BUG_ON(end & ~huge_page_mask(h));
David Gibson63551ae2005-06-21 17:14:44 -07002161
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07002162 mmu_notifier_invalidate_range_start(mm, start, end);
Hugh Dickins508034a2005-10-29 18:16:30 -07002163 spin_lock(&mm->page_table_lock);
Andi Kleena5516432008-07-23 21:27:41 -07002164 for (address = start; address < end; address += sz) {
David Gibsonc7546f82005-08-05 11:59:35 -07002165 ptep = huge_pte_offset(mm, address);
Adam Litke4c887262005-10-29 18:16:46 -07002166 if (!ptep)
David Gibsonc7546f82005-08-05 11:59:35 -07002167 continue;
2168
Chen, Kenneth W39dde652006-12-06 20:32:03 -08002169 if (huge_pmd_unshare(mm, &address, ptep))
2170 continue;
2171
Mel Gorman04f2cbe2008-07-23 21:27:25 -07002172 /*
2173 * If a reference page is supplied, it is because a specific
2174 * page is being unmapped, not a range. Ensure the page we
2175 * are about to unmap is the actual page of interest.
2176 */
2177 if (ref_page) {
2178 pte = huge_ptep_get(ptep);
2179 if (huge_pte_none(pte))
2180 continue;
2181 page = pte_page(pte);
2182 if (page != ref_page)
2183 continue;
2184
2185 /*
2186 * Mark the VMA as having unmapped its page so that
2187 * future faults in this VMA will fail rather than
2188 * looking like data was lost
2189 */
2190 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
2191 }
2192
David Gibsonc7546f82005-08-05 11:59:35 -07002193 pte = huge_ptep_get_and_clear(mm, address, ptep);
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07002194 if (huge_pte_none(pte))
David Gibson63551ae2005-06-21 17:14:44 -07002195 continue;
David Gibsonc7546f82005-08-05 11:59:35 -07002196
David Gibson63551ae2005-06-21 17:14:44 -07002197 page = pte_page(pte);
Ken Chen6649a382007-02-08 14:20:27 -08002198 if (pte_dirty(pte))
2199 set_page_dirty(page);
Chen, Kenneth Wfe1668a2006-10-04 02:15:24 -07002200 list_add(&page->lru, &page_list);
David Gibson63551ae2005-06-21 17:14:44 -07002201 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202 spin_unlock(&mm->page_table_lock);
Hugh Dickins508034a2005-10-29 18:16:30 -07002203 flush_tlb_range(vma, start, end);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07002204 mmu_notifier_invalidate_range_end(mm, start, end);
Chen, Kenneth Wfe1668a2006-10-04 02:15:24 -07002205 list_for_each_entry_safe(page, tmp, &page_list, lru) {
2206 list_del(&page->lru);
2207 put_page(page);
2208 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209}
David Gibson63551ae2005-06-21 17:14:44 -07002210
Chen, Kenneth W502717f2006-10-11 01:20:46 -07002211void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
Mel Gorman04f2cbe2008-07-23 21:27:25 -07002212 unsigned long end, struct page *ref_page)
Chen, Kenneth W502717f2006-10-11 01:20:46 -07002213{
Andi Kleena137e1c2008-07-23 21:27:43 -07002214 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
2215 __unmap_hugepage_range(vma, start, end, ref_page);
2216 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
Chen, Kenneth W502717f2006-10-11 01:20:46 -07002217}
2218
Mel Gorman04f2cbe2008-07-23 21:27:25 -07002219/*
2220 * This is called when the original mapper is failing to COW a MAP_PRIVATE
2221 * mappping it owns the reserve page for. The intention is to unmap the page
2222 * from other VMAs and let the children be SIGKILLed if they are faulting the
2223 * same region.
2224 */
Harvey Harrison2a4b3de2008-10-18 20:27:06 -07002225static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2226 struct page *page, unsigned long address)
Mel Gorman04f2cbe2008-07-23 21:27:25 -07002227{
Adam Litke75266742008-11-12 13:24:56 -08002228 struct hstate *h = hstate_vma(vma);
Mel Gorman04f2cbe2008-07-23 21:27:25 -07002229 struct vm_area_struct *iter_vma;
2230 struct address_space *mapping;
2231 struct prio_tree_iter iter;
2232 pgoff_t pgoff;
2233
2234 /*
2235 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
2236 * from page cache lookup which is in HPAGE_SIZE units.
2237 */
Adam Litke75266742008-11-12 13:24:56 -08002238 address = address & huge_page_mask(h);
Mel Gorman04f2cbe2008-07-23 21:27:25 -07002239 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
2240 + (vma->vm_pgoff >> PAGE_SHIFT);
2241 mapping = (struct address_space *)page_private(page);
2242
Mel Gorman4eb2b1d2009-12-14 17:59:53 -08002243 /*
2244 * Take the mapping lock for the duration of the table walk. As
2245 * this mapping should be shared between all the VMAs,
2246 * __unmap_hugepage_range() is called as the lock is already held
2247 */
2248 spin_lock(&mapping->i_mmap_lock);
Mel Gorman04f2cbe2008-07-23 21:27:25 -07002249 vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
2250 /* Do not unmap the current VMA */
2251 if (iter_vma == vma)
2252 continue;
2253
2254 /*
2255 * Unmap the page from other VMAs without their own reserves.
2256 * They get marked to be SIGKILLed if they fault in these
2257 * areas. This is because a future no-page fault on this VMA
2258 * could insert a zeroed page instead of the data existing
2259 * from the time of fork. This would look like data corruption
2260 */
2261 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
Mel Gorman4eb2b1d2009-12-14 17:59:53 -08002262 __unmap_hugepage_range(iter_vma,
Adam Litke75266742008-11-12 13:24:56 -08002263 address, address + huge_page_size(h),
Mel Gorman04f2cbe2008-07-23 21:27:25 -07002264 page);
2265 }
Mel Gorman4eb2b1d2009-12-14 17:59:53 -08002266 spin_unlock(&mapping->i_mmap_lock);
Mel Gorman04f2cbe2008-07-23 21:27:25 -07002267
2268 return 1;
2269}
2270
David Gibson1e8f8892006-01-06 00:10:44 -08002271static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
Mel Gorman04f2cbe2008-07-23 21:27:25 -07002272 unsigned long address, pte_t *ptep, pte_t pte,
2273 struct page *pagecache_page)
David Gibson1e8f8892006-01-06 00:10:44 -08002274{
Andi Kleena5516432008-07-23 21:27:41 -07002275 struct hstate *h = hstate_vma(vma);
David Gibson1e8f8892006-01-06 00:10:44 -08002276 struct page *old_page, *new_page;
David Gibson79ac6ba2006-03-22 00:08:51 -08002277 int avoidcopy;
Mel Gorman04f2cbe2008-07-23 21:27:25 -07002278 int outside_reserve = 0;
David Gibson1e8f8892006-01-06 00:10:44 -08002279
2280 old_page = pte_page(pte);
2281
Mel Gorman04f2cbe2008-07-23 21:27:25 -07002282retry_avoidcopy:
David Gibson1e8f8892006-01-06 00:10:44 -08002283 /* If no-one else is actually using this page, avoid the copy
2284 * and just make the page writable */
2285 avoidcopy = (page_count(old_page) == 1);
2286 if (avoidcopy) {
2287 set_huge_ptep_writable(vma, address, ptep);
Nick Piggin83c54072007-07-19 01:47:05 -07002288 return 0;
David Gibson1e8f8892006-01-06 00:10:44 -08002289 }
2290
Mel Gorman04f2cbe2008-07-23 21:27:25 -07002291 /*
2292 * If the process that created a MAP_PRIVATE mapping is about to
2293 * perform a COW due to a shared page count, attempt to satisfy
2294 * the allocation without using the existing reserves. The pagecache
2295 * page is used to determine if the reserve at this address was
2296 * consumed or not. If reserves were used, a partial faulted mapping
2297 * at the time of fork() could consume its reserves on COW instead
2298 * of the full address range.
2299 */
Mel Gormanf83a2752009-05-28 14:34:40 -07002300 if (!(vma->vm_flags & VM_MAYSHARE) &&
Mel Gorman04f2cbe2008-07-23 21:27:25 -07002301 is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
2302 old_page != pagecache_page)
2303 outside_reserve = 1;
2304
David Gibson1e8f8892006-01-06 00:10:44 -08002305 page_cache_get(old_page);
Larry Woodmanb76c8cf2009-12-14 17:59:37 -08002306
2307 /* Drop page_table_lock as buddy allocator may be called */
2308 spin_unlock(&mm->page_table_lock);
Mel Gorman04f2cbe2008-07-23 21:27:25 -07002309 new_page = alloc_huge_page(vma, address, outside_reserve);
David Gibson1e8f8892006-01-06 00:10:44 -08002310
Adam Litke2fc39ce2007-11-14 16:59:39 -08002311 if (IS_ERR(new_page)) {
David Gibson1e8f8892006-01-06 00:10:44 -08002312 page_cache_release(old_page);
Mel Gorman04f2cbe2008-07-23 21:27:25 -07002313
2314 /*
2315 * If a process owning a MAP_PRIVATE mapping fails to COW,
2316 * it is due to references held by a child and an insufficient
2317 * huge page pool. To guarantee the original mappers
2318 * reliability, unmap the page from child processes. The child
2319 * may get SIGKILLed if it later faults.
2320 */
2321 if (outside_reserve) {
2322 BUG_ON(huge_pte_none(pte));
2323 if (unmap_ref_private(mm, vma, old_page, address)) {
2324 BUG_ON(page_count(old_page) != 1);
2325 BUG_ON(huge_pte_none(pte));
Larry Woodmanb76c8cf2009-12-14 17:59:37 -08002326 spin_lock(&mm->page_table_lock);
Mel Gorman04f2cbe2008-07-23 21:27:25 -07002327 goto retry_avoidcopy;
2328 }
2329 WARN_ON_ONCE(1);
2330 }
2331
Larry Woodmanb76c8cf2009-12-14 17:59:37 -08002332 /* Caller expects lock to be held */
2333 spin_lock(&mm->page_table_lock);
Adam Litke2fc39ce2007-11-14 16:59:39 -08002334 return -PTR_ERR(new_page);
David Gibson1e8f8892006-01-06 00:10:44 -08002335 }
2336
Atsushi Nemoto9de455b2006-12-12 17:14:55 +00002337 copy_huge_page(new_page, old_page, address, vma);
Nick Piggin0ed361d2008-02-04 22:29:34 -08002338 __SetPageUptodate(new_page);
David Gibson1e8f8892006-01-06 00:10:44 -08002339
Larry Woodmanb76c8cf2009-12-14 17:59:37 -08002340 /*
2341 * Retake the page_table_lock to check for racing updates
2342 * before the page tables are altered
2343 */
2344 spin_lock(&mm->page_table_lock);
Andi Kleena5516432008-07-23 21:27:41 -07002345 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07002346 if (likely(pte_same(huge_ptep_get(ptep), pte))) {
David Gibson1e8f8892006-01-06 00:10:44 -08002347 /* Break COW */
Gerald Schaefer8fe627e2008-04-28 02:13:28 -07002348 huge_ptep_clear_flush(vma, address, ptep);
David Gibson1e8f8892006-01-06 00:10:44 -08002349 set_huge_pte_at(mm, address, ptep,
2350 make_huge_pte(vma, new_page, 1));
2351 /* Make the old page be freed below */
2352 new_page = old_page;
2353 }
2354 page_cache_release(new_page);
2355 page_cache_release(old_page);
Nick Piggin83c54072007-07-19 01:47:05 -07002356 return 0;
David Gibson1e8f8892006-01-06 00:10:44 -08002357}
2358
Mel Gorman04f2cbe2008-07-23 21:27:25 -07002359/* Return the pagecache page at a given address within a VMA */
Andi Kleena5516432008-07-23 21:27:41 -07002360static struct page *hugetlbfs_pagecache_page(struct hstate *h,
2361 struct vm_area_struct *vma, unsigned long address)
Mel Gorman04f2cbe2008-07-23 21:27:25 -07002362{
2363 struct address_space *mapping;
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -07002364 pgoff_t idx;
Mel Gorman04f2cbe2008-07-23 21:27:25 -07002365
2366 mapping = vma->vm_file->f_mapping;
Andi Kleena5516432008-07-23 21:27:41 -07002367 idx = vma_hugecache_offset(h, vma, address);
Mel Gorman04f2cbe2008-07-23 21:27:25 -07002368
2369 return find_lock_page(mapping, idx);
2370}
2371
Hugh Dickins3ae77f42009-09-21 17:03:33 -07002372/*
2373 * Return whether there is a pagecache page to back given address within VMA.
2374 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
2375 */
2376static bool hugetlbfs_pagecache_present(struct hstate *h,
Hugh Dickins2a15efc2009-09-21 17:03:27 -07002377 struct vm_area_struct *vma, unsigned long address)
2378{
2379 struct address_space *mapping;
2380 pgoff_t idx;
2381 struct page *page;
2382
2383 mapping = vma->vm_file->f_mapping;
2384 idx = vma_hugecache_offset(h, vma, address);
2385
2386 page = find_get_page(mapping, idx);
2387 if (page)
2388 put_page(page);
2389 return page != NULL;
2390}
2391
Robert P. J. Daya1ed3dd2007-07-17 04:03:33 -07002392static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
Hugh Dickins788c7df2009-06-23 13:49:05 +01002393 unsigned long address, pte_t *ptep, unsigned int flags)
Hugh Dickinsac9b9c62005-10-20 16:24:28 +01002394{
Andi Kleena5516432008-07-23 21:27:41 -07002395 struct hstate *h = hstate_vma(vma);
Hugh Dickinsac9b9c62005-10-20 16:24:28 +01002396 int ret = VM_FAULT_SIGBUS;
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -07002397 pgoff_t idx;
Adam Litke4c887262005-10-29 18:16:46 -07002398 unsigned long size;
Adam Litke4c887262005-10-29 18:16:46 -07002399 struct page *page;
2400 struct address_space *mapping;
David Gibson1e8f8892006-01-06 00:10:44 -08002401 pte_t new_pte;
Adam Litke4c887262005-10-29 18:16:46 -07002402
Mel Gorman04f2cbe2008-07-23 21:27:25 -07002403 /*
2404 * Currently, we are forced to kill the process in the event the
2405 * original mapper has unmapped pages from the child due to a failed
2406 * COW. Warn that such a situation has occured as it may not be obvious
2407 */
2408 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
2409 printk(KERN_WARNING
2410 "PID %d killed due to inadequate hugepage pool\n",
2411 current->pid);
2412 return ret;
2413 }
2414
Adam Litke4c887262005-10-29 18:16:46 -07002415 mapping = vma->vm_file->f_mapping;
Andi Kleena5516432008-07-23 21:27:41 -07002416 idx = vma_hugecache_offset(h, vma, address);
Adam Litke4c887262005-10-29 18:16:46 -07002417
2418 /*
2419 * Use page lock to guard against racing truncation
2420 * before we get page_table_lock.
2421 */
Christoph Lameter6bda6662006-01-06 00:10:49 -08002422retry:
2423 page = find_lock_page(mapping, idx);
2424 if (!page) {
Andi Kleena5516432008-07-23 21:27:41 -07002425 size = i_size_read(mapping->host) >> huge_page_shift(h);
Hugh Dickinsebed4bf2006-10-28 10:38:43 -07002426 if (idx >= size)
2427 goto out;
Mel Gorman04f2cbe2008-07-23 21:27:25 -07002428 page = alloc_huge_page(vma, address, 0);
Adam Litke2fc39ce2007-11-14 16:59:39 -08002429 if (IS_ERR(page)) {
2430 ret = -PTR_ERR(page);
Christoph Lameter6bda6662006-01-06 00:10:49 -08002431 goto out;
2432 }
Andi Kleena5516432008-07-23 21:27:41 -07002433 clear_huge_page(page, address, huge_page_size(h));
Nick Piggin0ed361d2008-02-04 22:29:34 -08002434 __SetPageUptodate(page);
Hugh Dickinsac9b9c62005-10-20 16:24:28 +01002435
Mel Gormanf83a2752009-05-28 14:34:40 -07002436 if (vma->vm_flags & VM_MAYSHARE) {
Christoph Lameter6bda6662006-01-06 00:10:49 -08002437 int err;
Ken Chen45c682a2007-11-14 16:59:44 -08002438 struct inode *inode = mapping->host;
Christoph Lameter6bda6662006-01-06 00:10:49 -08002439
2440 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
2441 if (err) {
2442 put_page(page);
Christoph Lameter6bda6662006-01-06 00:10:49 -08002443 if (err == -EEXIST)
2444 goto retry;
2445 goto out;
2446 }
Ken Chen45c682a2007-11-14 16:59:44 -08002447
2448 spin_lock(&inode->i_lock);
Andi Kleena5516432008-07-23 21:27:41 -07002449 inode->i_blocks += blocks_per_huge_page(h);
Ken Chen45c682a2007-11-14 16:59:44 -08002450 spin_unlock(&inode->i_lock);
Mel Gorman23be7462010-04-23 13:17:56 -04002451 } else {
Christoph Lameter6bda6662006-01-06 00:10:49 -08002452 lock_page(page);
Mel Gorman23be7462010-04-23 13:17:56 -04002453 page->mapping = HUGETLB_POISON;
2454 }
Christoph Lameter6bda6662006-01-06 00:10:49 -08002455 }
David Gibson1e8f8892006-01-06 00:10:44 -08002456
Andy Whitcroft57303d82008-08-12 15:08:47 -07002457 /*
2458 * If we are going to COW a private mapping later, we examine the
2459 * pending reservations for this page now. This will ensure that
2460 * any allocations necessary to record that reservation occur outside
2461 * the spinlock.
2462 */
Hugh Dickins788c7df2009-06-23 13:49:05 +01002463 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
Andy Whitcroft2b267362008-08-12 15:08:49 -07002464 if (vma_needs_reservation(h, vma, address) < 0) {
2465 ret = VM_FAULT_OOM;
2466 goto backout_unlocked;
2467 }
Andy Whitcroft57303d82008-08-12 15:08:47 -07002468
Hugh Dickinsac9b9c62005-10-20 16:24:28 +01002469 spin_lock(&mm->page_table_lock);
Andi Kleena5516432008-07-23 21:27:41 -07002470 size = i_size_read(mapping->host) >> huge_page_shift(h);
Adam Litke4c887262005-10-29 18:16:46 -07002471 if (idx >= size)
2472 goto backout;
2473
Nick Piggin83c54072007-07-19 01:47:05 -07002474 ret = 0;
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07002475 if (!huge_pte_none(huge_ptep_get(ptep)))
Adam Litke4c887262005-10-29 18:16:46 -07002476 goto backout;
2477
David Gibson1e8f8892006-01-06 00:10:44 -08002478 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
2479 && (vma->vm_flags & VM_SHARED)));
2480 set_huge_pte_at(mm, address, ptep, new_pte);
2481
Hugh Dickins788c7df2009-06-23 13:49:05 +01002482 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
David Gibson1e8f8892006-01-06 00:10:44 -08002483 /* Optimization, do the COW without a second fault */
Mel Gorman04f2cbe2008-07-23 21:27:25 -07002484 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
David Gibson1e8f8892006-01-06 00:10:44 -08002485 }
2486
Hugh Dickinsac9b9c62005-10-20 16:24:28 +01002487 spin_unlock(&mm->page_table_lock);
Adam Litke4c887262005-10-29 18:16:46 -07002488 unlock_page(page);
2489out:
Hugh Dickinsac9b9c62005-10-20 16:24:28 +01002490 return ret;
Adam Litke4c887262005-10-29 18:16:46 -07002491
2492backout:
2493 spin_unlock(&mm->page_table_lock);
Andy Whitcroft2b267362008-08-12 15:08:49 -07002494backout_unlocked:
Adam Litke4c887262005-10-29 18:16:46 -07002495 unlock_page(page);
2496 put_page(page);
2497 goto out;
Hugh Dickinsac9b9c62005-10-20 16:24:28 +01002498}
2499
Adam Litke86e52162006-01-06 00:10:43 -08002500int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
Hugh Dickins788c7df2009-06-23 13:49:05 +01002501 unsigned long address, unsigned int flags)
Adam Litke86e52162006-01-06 00:10:43 -08002502{
2503 pte_t *ptep;
2504 pte_t entry;
David Gibson1e8f8892006-01-06 00:10:44 -08002505 int ret;
Andy Whitcroft57303d82008-08-12 15:08:47 -07002506 struct page *pagecache_page = NULL;
David Gibson3935baa2006-03-22 00:08:53 -08002507 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
Andi Kleena5516432008-07-23 21:27:41 -07002508 struct hstate *h = hstate_vma(vma);
Adam Litke86e52162006-01-06 00:10:43 -08002509
Andi Kleena5516432008-07-23 21:27:41 -07002510 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
Adam Litke86e52162006-01-06 00:10:43 -08002511 if (!ptep)
2512 return VM_FAULT_OOM;
2513
David Gibson3935baa2006-03-22 00:08:53 -08002514 /*
2515 * Serialize hugepage allocation and instantiation, so that we don't
2516 * get spurious allocation failures if two CPUs race to instantiate
2517 * the same page in the page cache.
2518 */
2519 mutex_lock(&hugetlb_instantiation_mutex);
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07002520 entry = huge_ptep_get(ptep);
2521 if (huge_pte_none(entry)) {
Hugh Dickins788c7df2009-06-23 13:49:05 +01002522 ret = hugetlb_no_page(mm, vma, address, ptep, flags);
David Gibsonb4d1d992008-10-15 22:01:11 -07002523 goto out_mutex;
David Gibson3935baa2006-03-22 00:08:53 -08002524 }
Adam Litke86e52162006-01-06 00:10:43 -08002525
Nick Piggin83c54072007-07-19 01:47:05 -07002526 ret = 0;
David Gibson1e8f8892006-01-06 00:10:44 -08002527
Andy Whitcroft57303d82008-08-12 15:08:47 -07002528 /*
2529 * If we are going to COW the mapping later, we examine the pending
2530 * reservations for this page now. This will ensure that any
2531 * allocations necessary to record that reservation occur outside the
2532 * spinlock. For private mappings, we also lookup the pagecache
2533 * page now as it is used to determine if a reservation has been
2534 * consumed.
2535 */
Hugh Dickins788c7df2009-06-23 13:49:05 +01002536 if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
Andy Whitcroft2b267362008-08-12 15:08:49 -07002537 if (vma_needs_reservation(h, vma, address) < 0) {
2538 ret = VM_FAULT_OOM;
David Gibsonb4d1d992008-10-15 22:01:11 -07002539 goto out_mutex;
Andy Whitcroft2b267362008-08-12 15:08:49 -07002540 }
Andy Whitcroft57303d82008-08-12 15:08:47 -07002541
Mel Gormanf83a2752009-05-28 14:34:40 -07002542 if (!(vma->vm_flags & VM_MAYSHARE))
Andy Whitcroft57303d82008-08-12 15:08:47 -07002543 pagecache_page = hugetlbfs_pagecache_page(h,
2544 vma, address);
2545 }
2546
David Gibson1e8f8892006-01-06 00:10:44 -08002547 spin_lock(&mm->page_table_lock);
2548 /* Check for a racing update before calling hugetlb_cow */
David Gibsonb4d1d992008-10-15 22:01:11 -07002549 if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
2550 goto out_page_table_lock;
2551
2552
Hugh Dickins788c7df2009-06-23 13:49:05 +01002553 if (flags & FAULT_FLAG_WRITE) {
David Gibsonb4d1d992008-10-15 22:01:11 -07002554 if (!pte_write(entry)) {
Andy Whitcroft57303d82008-08-12 15:08:47 -07002555 ret = hugetlb_cow(mm, vma, address, ptep, entry,
2556 pagecache_page);
David Gibsonb4d1d992008-10-15 22:01:11 -07002557 goto out_page_table_lock;
2558 }
2559 entry = pte_mkdirty(entry);
2560 }
2561 entry = pte_mkyoung(entry);
Hugh Dickins788c7df2009-06-23 13:49:05 +01002562 if (huge_ptep_set_access_flags(vma, address, ptep, entry,
2563 flags & FAULT_FLAG_WRITE))
Russell King4b3073e2009-12-18 16:40:18 +00002564 update_mmu_cache(vma, address, ptep);
David Gibsonb4d1d992008-10-15 22:01:11 -07002565
2566out_page_table_lock:
David Gibson1e8f8892006-01-06 00:10:44 -08002567 spin_unlock(&mm->page_table_lock);
Andy Whitcroft57303d82008-08-12 15:08:47 -07002568
2569 if (pagecache_page) {
2570 unlock_page(pagecache_page);
2571 put_page(pagecache_page);
2572 }
2573
David Gibsonb4d1d992008-10-15 22:01:11 -07002574out_mutex:
David Gibson3935baa2006-03-22 00:08:53 -08002575 mutex_unlock(&hugetlb_instantiation_mutex);
David Gibson1e8f8892006-01-06 00:10:44 -08002576
2577 return ret;
Adam Litke86e52162006-01-06 00:10:43 -08002578}
2579
Andi Kleenceb86872008-07-23 21:27:50 -07002580/* Can be overriden by architectures */
2581__attribute__((weak)) struct page *
2582follow_huge_pud(struct mm_struct *mm, unsigned long address,
2583 pud_t *pud, int write)
2584{
2585 BUG();
2586 return NULL;
2587}
2588
David Gibson63551ae2005-06-21 17:14:44 -07002589int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
2590 struct page **pages, struct vm_area_struct **vmas,
Adam Litke5b23dbe2007-11-14 16:59:33 -08002591 unsigned long *position, int *length, int i,
Hugh Dickins2a15efc2009-09-21 17:03:27 -07002592 unsigned int flags)
David Gibson63551ae2005-06-21 17:14:44 -07002593{
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -08002594 unsigned long pfn_offset;
2595 unsigned long vaddr = *position;
David Gibson63551ae2005-06-21 17:14:44 -07002596 int remainder = *length;
Andi Kleena5516432008-07-23 21:27:41 -07002597 struct hstate *h = hstate_vma(vma);
David Gibson63551ae2005-06-21 17:14:44 -07002598
Hugh Dickins1c598272005-10-19 21:23:43 -07002599 spin_lock(&mm->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -07002600 while (vaddr < vma->vm_end && remainder) {
Adam Litke4c887262005-10-29 18:16:46 -07002601 pte_t *pte;
Hugh Dickins2a15efc2009-09-21 17:03:27 -07002602 int absent;
Adam Litke4c887262005-10-29 18:16:46 -07002603 struct page *page;
2604
2605 /*
2606 * Some archs (sparc64, sh*) have multiple pte_ts to
Hugh Dickins2a15efc2009-09-21 17:03:27 -07002607 * each hugepage. We have to make sure we get the
Adam Litke4c887262005-10-29 18:16:46 -07002608 * first, for the page indexing below to work.
2609 */
Andi Kleena5516432008-07-23 21:27:41 -07002610 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
Hugh Dickins2a15efc2009-09-21 17:03:27 -07002611 absent = !pte || huge_pte_none(huge_ptep_get(pte));
Adam Litke4c887262005-10-29 18:16:46 -07002612
Hugh Dickins2a15efc2009-09-21 17:03:27 -07002613 /*
2614 * When coredumping, it suits get_dump_page if we just return
Hugh Dickins3ae77f42009-09-21 17:03:33 -07002615 * an error where there's an empty slot with no huge pagecache
2616 * to back it. This way, we avoid allocating a hugepage, and
2617 * the sparse dumpfile avoids allocating disk blocks, but its
2618 * huge holes still show up with zeroes where they need to be.
Hugh Dickins2a15efc2009-09-21 17:03:27 -07002619 */
Hugh Dickins3ae77f42009-09-21 17:03:33 -07002620 if (absent && (flags & FOLL_DUMP) &&
2621 !hugetlbfs_pagecache_present(h, vma, vaddr)) {
Hugh Dickins2a15efc2009-09-21 17:03:27 -07002622 remainder = 0;
2623 break;
2624 }
2625
2626 if (absent ||
2627 ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
Adam Litke4c887262005-10-29 18:16:46 -07002628 int ret;
2629
2630 spin_unlock(&mm->page_table_lock);
Hugh Dickins2a15efc2009-09-21 17:03:27 -07002631 ret = hugetlb_fault(mm, vma, vaddr,
2632 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
Adam Litke4c887262005-10-29 18:16:46 -07002633 spin_lock(&mm->page_table_lock);
Adam Litkea89182c2007-08-22 14:01:51 -07002634 if (!(ret & VM_FAULT_ERROR))
Adam Litke4c887262005-10-29 18:16:46 -07002635 continue;
2636
2637 remainder = 0;
Adam Litke4c887262005-10-29 18:16:46 -07002638 break;
2639 }
David Gibson63551ae2005-06-21 17:14:44 -07002640
Andi Kleena5516432008-07-23 21:27:41 -07002641 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07002642 page = pte_page(huge_ptep_get(pte));
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -08002643same_page:
Chen, Kenneth Wd6692182006-03-31 02:29:57 -08002644 if (pages) {
Hugh Dickins2a15efc2009-09-21 17:03:27 -07002645 pages[i] = mem_map_offset(page, pfn_offset);
KOSAKI Motohiro4b2e38a2008-10-18 20:27:10 -07002646 get_page(pages[i]);
Chen, Kenneth Wd6692182006-03-31 02:29:57 -08002647 }
David Gibson63551ae2005-06-21 17:14:44 -07002648
2649 if (vmas)
2650 vmas[i] = vma;
2651
2652 vaddr += PAGE_SIZE;
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -08002653 ++pfn_offset;
David Gibson63551ae2005-06-21 17:14:44 -07002654 --remainder;
2655 ++i;
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -08002656 if (vaddr < vma->vm_end && remainder &&
Andi Kleena5516432008-07-23 21:27:41 -07002657 pfn_offset < pages_per_huge_page(h)) {
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -08002658 /*
2659 * We use pfn_offset to avoid touching the pageframes
2660 * of this compound page.
2661 */
2662 goto same_page;
2663 }
David Gibson63551ae2005-06-21 17:14:44 -07002664 }
Hugh Dickins1c598272005-10-19 21:23:43 -07002665 spin_unlock(&mm->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -07002666 *length = remainder;
2667 *position = vaddr;
2668
Hugh Dickins2a15efc2009-09-21 17:03:27 -07002669 return i ? i : -EFAULT;
David Gibson63551ae2005-06-21 17:14:44 -07002670}
Zhang, Yanmin8f860592006-03-22 00:08:50 -08002671
2672void hugetlb_change_protection(struct vm_area_struct *vma,
2673 unsigned long address, unsigned long end, pgprot_t newprot)
2674{
2675 struct mm_struct *mm = vma->vm_mm;
2676 unsigned long start = address;
2677 pte_t *ptep;
2678 pte_t pte;
Andi Kleena5516432008-07-23 21:27:41 -07002679 struct hstate *h = hstate_vma(vma);
Zhang, Yanmin8f860592006-03-22 00:08:50 -08002680
2681 BUG_ON(address >= end);
2682 flush_cache_range(vma, address, end);
2683
Chen, Kenneth W39dde652006-12-06 20:32:03 -08002684 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
Zhang, Yanmin8f860592006-03-22 00:08:50 -08002685 spin_lock(&mm->page_table_lock);
Andi Kleena5516432008-07-23 21:27:41 -07002686 for (; address < end; address += huge_page_size(h)) {
Zhang, Yanmin8f860592006-03-22 00:08:50 -08002687 ptep = huge_pte_offset(mm, address);
2688 if (!ptep)
2689 continue;
Chen, Kenneth W39dde652006-12-06 20:32:03 -08002690 if (huge_pmd_unshare(mm, &address, ptep))
2691 continue;
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07002692 if (!huge_pte_none(huge_ptep_get(ptep))) {
Zhang, Yanmin8f860592006-03-22 00:08:50 -08002693 pte = huge_ptep_get_and_clear(mm, address, ptep);
2694 pte = pte_mkhuge(pte_modify(pte, newprot));
2695 set_huge_pte_at(mm, address, ptep, pte);
Zhang, Yanmin8f860592006-03-22 00:08:50 -08002696 }
2697 }
2698 spin_unlock(&mm->page_table_lock);
Chen, Kenneth W39dde652006-12-06 20:32:03 -08002699 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
Zhang, Yanmin8f860592006-03-22 00:08:50 -08002700
2701 flush_tlb_range(vma, start, end);
2702}
2703
Mel Gormana1e78772008-07-23 21:27:23 -07002704int hugetlb_reserve_pages(struct inode *inode,
2705 long from, long to,
Mel Gorman5a6fe122009-02-10 14:02:27 +00002706 struct vm_area_struct *vma,
2707 int acctflag)
Adam Litkee4e574b2007-10-16 01:26:19 -07002708{
Mel Gorman17c9d122009-02-11 16:34:16 +00002709 long ret, chg;
Andi Kleena5516432008-07-23 21:27:41 -07002710 struct hstate *h = hstate_inode(inode);
Adam Litkee4e574b2007-10-16 01:26:19 -07002711
Mel Gormana1e78772008-07-23 21:27:23 -07002712 /*
Mel Gorman17c9d122009-02-11 16:34:16 +00002713 * Only apply hugepage reservation if asked. At fault time, an
2714 * attempt will be made for VM_NORESERVE to allocate a page
2715 * and filesystem quota without using reserves
2716 */
2717 if (acctflag & VM_NORESERVE)
2718 return 0;
2719
2720 /*
Mel Gormana1e78772008-07-23 21:27:23 -07002721 * Shared mappings base their reservation on the number of pages that
2722 * are already allocated on behalf of the file. Private mappings need
2723 * to reserve the full area even if read-only as mprotect() may be
2724 * called to make the mapping read-write. Assume !vma is a shm mapping
2725 */
Mel Gormanf83a2752009-05-28 14:34:40 -07002726 if (!vma || vma->vm_flags & VM_MAYSHARE)
Mel Gormana1e78772008-07-23 21:27:23 -07002727 chg = region_chg(&inode->i_mapping->private_list, from, to);
Mel Gorman5a6fe122009-02-10 14:02:27 +00002728 else {
2729 struct resv_map *resv_map = resv_map_alloc();
Mel Gorman5a6fe122009-02-10 14:02:27 +00002730 if (!resv_map)
2731 return -ENOMEM;
2732
Mel Gorman17c9d122009-02-11 16:34:16 +00002733 chg = to - from;
2734
Mel Gorman5a6fe122009-02-10 14:02:27 +00002735 set_vma_resv_map(vma, resv_map);
2736 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
2737 }
2738
Mel Gorman17c9d122009-02-11 16:34:16 +00002739 if (chg < 0)
2740 return chg;
2741
2742 /* There must be enough filesystem quota for the mapping */
2743 if (hugetlb_get_quota(inode->i_mapping, chg))
2744 return -ENOSPC;
2745
2746 /*
2747 * Check enough hugepages are available for the reservation.
2748 * Hand back the quota if there are not
2749 */
2750 ret = hugetlb_acct_memory(h, chg);
2751 if (ret < 0) {
2752 hugetlb_put_quota(inode->i_mapping, chg);
2753 return ret;
2754 }
2755
2756 /*
2757 * Account for the reservations made. Shared mappings record regions
2758 * that have reservations as they are shared by multiple VMAs.
2759 * When the last VMA disappears, the region map says how much
2760 * the reservation was and the page cache tells how much of
2761 * the reservation was consumed. Private mappings are per-VMA and
2762 * only the consumed reservations are tracked. When the VMA
2763 * disappears, the original reservation is the VMA size and the
2764 * consumed reservations are stored in the map. Hence, nothing
2765 * else has to be done for private mappings here
2766 */
Mel Gormanf83a2752009-05-28 14:34:40 -07002767 if (!vma || vma->vm_flags & VM_MAYSHARE)
Mel Gorman17c9d122009-02-11 16:34:16 +00002768 region_add(&inode->i_mapping->private_list, from, to);
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -07002769 return 0;
2770}
2771
2772void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
2773{
Andi Kleena5516432008-07-23 21:27:41 -07002774 struct hstate *h = hstate_inode(inode);
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -07002775 long chg = region_truncate(&inode->i_mapping->private_list, offset);
Ken Chen45c682a2007-11-14 16:59:44 -08002776
2777 spin_lock(&inode->i_lock);
Eric Sandeene4c6f8b2009-07-29 15:02:16 -07002778 inode->i_blocks -= (blocks_per_huge_page(h) * freed);
Ken Chen45c682a2007-11-14 16:59:44 -08002779 spin_unlock(&inode->i_lock);
2780
Adam Litke90d8b7e2007-11-14 16:59:42 -08002781 hugetlb_put_quota(inode->i_mapping, (chg - freed));
Andi Kleena5516432008-07-23 21:27:41 -07002782 hugetlb_acct_memory(h, -(chg - freed));
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -07002783}