blob: cafdcee154e8dba53fbc15806b623a2fb131376e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Generic hugetlb support.
3 * (C) William Irwin, April 2004
4 */
5#include <linux/gfp.h>
6#include <linux/list.h>
7#include <linux/init.h>
8#include <linux/module.h>
9#include <linux/mm.h>
Alexey Dobriyane1759c22008-10-15 23:50:22 +040010#include <linux/seq_file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/sysctl.h>
12#include <linux/highmem.h>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070013#include <linux/mmu_notifier.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/nodemask.h>
David Gibson63551ae2005-06-21 17:14:44 -070015#include <linux/pagemap.h>
Christoph Lameter5da7ca82006-01-06 00:10:46 -080016#include <linux/mempolicy.h>
Christoph Lameteraea47ff2006-01-08 01:00:57 -080017#include <linux/cpuset.h>
David Gibson3935baa2006-03-22 00:08:53 -080018#include <linux/mutex.h>
Andi Kleenaa888a72008-07-23 21:27:47 -070019#include <linux/bootmem.h>
Nishanth Aravamudana3437872008-07-23 21:27:44 -070020#include <linux/sysfs.h>
Linus Torvaldsd6606682008-08-06 12:04:54 -070021
David Gibson63551ae2005-06-21 17:14:44 -070022#include <asm/page.h>
23#include <asm/pgtable.h>
Adrian Bunk78a34ae2008-07-28 15:46:30 -070024#include <asm/io.h>
David Gibson63551ae2005-06-21 17:14:44 -070025
26#include <linux/hugetlb.h>
Nick Piggin7835e982006-03-22 00:08:40 -080027#include "internal.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
29const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
Mel Gorman396faf02007-07-17 04:03:13 -070030static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
31unsigned long hugepages_treat_as_movable;
Andi Kleena5516432008-07-23 21:27:41 -070032
Andi Kleene5ff2152008-07-23 21:27:42 -070033static int max_hstate;
34unsigned int default_hstate_idx;
35struct hstate hstates[HUGE_MAX_HSTATE];
36
Jon Tollefson53ba51d2008-07-23 21:27:52 -070037__initdata LIST_HEAD(huge_boot_pages);
38
Andi Kleene5ff2152008-07-23 21:27:42 -070039/* for command line parsing */
40static struct hstate * __initdata parsed_hstate;
41static unsigned long __initdata default_hstate_max_huge_pages;
Nick Piggine11bfbf2008-07-23 21:27:52 -070042static unsigned long __initdata default_hstate_size;
Andi Kleene5ff2152008-07-23 21:27:42 -070043
44#define for_each_hstate(h) \
45 for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++)
Mel Gorman396faf02007-07-17 04:03:13 -070046
David Gibson3935baa2006-03-22 00:08:53 -080047/*
48 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
49 */
50static DEFINE_SPINLOCK(hugetlb_lock);
Eric Paris0bd0f9f2005-11-21 21:32:28 -080051
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -070052/*
Andy Whitcroft96822902008-07-23 21:27:29 -070053 * Region tracking -- allows tracking of reservations and instantiated pages
54 * across the pages in a mapping.
Andy Whitcroft84afd992008-07-23 21:27:32 -070055 *
56 * The region data structures are protected by a combination of the mmap_sem
57 * and the hugetlb_instantion_mutex. To access or modify a region the caller
58 * must either hold the mmap_sem for write, or the mmap_sem for read and
59 * the hugetlb_instantiation mutex:
60 *
61 * down_write(&mm->mmap_sem);
62 * or
63 * down_read(&mm->mmap_sem);
64 * mutex_lock(&hugetlb_instantiation_mutex);
Andy Whitcroft96822902008-07-23 21:27:29 -070065 */
66struct file_region {
67 struct list_head link;
68 long from;
69 long to;
70};
71
72static long region_add(struct list_head *head, long f, long t)
73{
74 struct file_region *rg, *nrg, *trg;
75
76 /* Locate the region we are either in or before. */
77 list_for_each_entry(rg, head, link)
78 if (f <= rg->to)
79 break;
80
81 /* Round our left edge to the current segment if it encloses us. */
82 if (f > rg->from)
83 f = rg->from;
84
85 /* Check for and consume any regions we now overlap with. */
86 nrg = rg;
87 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
88 if (&rg->link == head)
89 break;
90 if (rg->from > t)
91 break;
92
93 /* If this area reaches higher then extend our area to
94 * include it completely. If this is not the first area
95 * which we intend to reuse, free it. */
96 if (rg->to > t)
97 t = rg->to;
98 if (rg != nrg) {
99 list_del(&rg->link);
100 kfree(rg);
101 }
102 }
103 nrg->from = f;
104 nrg->to = t;
105 return 0;
106}
107
108static long region_chg(struct list_head *head, long f, long t)
109{
110 struct file_region *rg, *nrg;
111 long chg = 0;
112
113 /* Locate the region we are before or in. */
114 list_for_each_entry(rg, head, link)
115 if (f <= rg->to)
116 break;
117
118 /* If we are below the current region then a new region is required.
119 * Subtle, allocate a new region at the position but make it zero
120 * size such that we can guarantee to record the reservation. */
121 if (&rg->link == head || t < rg->from) {
122 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
123 if (!nrg)
124 return -ENOMEM;
125 nrg->from = f;
126 nrg->to = f;
127 INIT_LIST_HEAD(&nrg->link);
128 list_add(&nrg->link, rg->link.prev);
129
130 return t - f;
131 }
132
133 /* Round our left edge to the current segment if it encloses us. */
134 if (f > rg->from)
135 f = rg->from;
136 chg = t - f;
137
138 /* Check for and consume any regions we now overlap with. */
139 list_for_each_entry(rg, rg->link.prev, link) {
140 if (&rg->link == head)
141 break;
142 if (rg->from > t)
143 return chg;
144
145 /* We overlap with this area, if it extends futher than
146 * us then we must extend ourselves. Account for its
147 * existing reservation. */
148 if (rg->to > t) {
149 chg += rg->to - t;
150 t = rg->to;
151 }
152 chg -= rg->to - rg->from;
153 }
154 return chg;
155}
156
157static long region_truncate(struct list_head *head, long end)
158{
159 struct file_region *rg, *trg;
160 long chg = 0;
161
162 /* Locate the region we are either in or before. */
163 list_for_each_entry(rg, head, link)
164 if (end <= rg->to)
165 break;
166 if (&rg->link == head)
167 return 0;
168
169 /* If we are in the middle of a region then adjust it. */
170 if (end > rg->from) {
171 chg = rg->to - end;
172 rg->to = end;
173 rg = list_entry(rg->link.next, typeof(*rg), link);
174 }
175
176 /* Drop any remaining regions. */
177 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
178 if (&rg->link == head)
179 break;
180 chg += rg->to - rg->from;
181 list_del(&rg->link);
182 kfree(rg);
183 }
184 return chg;
185}
186
Andy Whitcroft84afd992008-07-23 21:27:32 -0700187static long region_count(struct list_head *head, long f, long t)
188{
189 struct file_region *rg;
190 long chg = 0;
191
192 /* Locate each segment we overlap with, and count that overlap. */
193 list_for_each_entry(rg, head, link) {
194 int seg_from;
195 int seg_to;
196
197 if (rg->to <= f)
198 continue;
199 if (rg->from >= t)
200 break;
201
202 seg_from = max(rg->from, f);
203 seg_to = min(rg->to, t);
204
205 chg += seg_to - seg_from;
206 }
207
208 return chg;
209}
210
Andy Whitcroft96822902008-07-23 21:27:29 -0700211/*
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -0700212 * Convert the address within this vma to the page offset within
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -0700213 * the mapping, in pagecache page units; huge pages here.
214 */
Andi Kleena5516432008-07-23 21:27:41 -0700215static pgoff_t vma_hugecache_offset(struct hstate *h,
216 struct vm_area_struct *vma, unsigned long address)
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -0700217{
Andi Kleena5516432008-07-23 21:27:41 -0700218 return ((address - vma->vm_start) >> huge_page_shift(h)) +
219 (vma->vm_pgoff >> huge_page_order(h));
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -0700220}
221
Andy Whitcroft84afd992008-07-23 21:27:32 -0700222/*
Mel Gorman08fba692009-01-06 14:38:53 -0800223 * Return the size of the pages allocated when backing a VMA. In the majority
224 * cases this will be same size as used by the page table entries.
225 */
226unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
227{
228 struct hstate *hstate;
229
230 if (!is_vm_hugetlb_page(vma))
231 return PAGE_SIZE;
232
233 hstate = hstate_vma(vma);
234
235 return 1UL << (hstate->order + PAGE_SHIFT);
236}
237
238/*
Mel Gorman33402892009-01-06 14:38:54 -0800239 * Return the page size being used by the MMU to back a VMA. In the majority
240 * of cases, the page size used by the kernel matches the MMU size. On
241 * architectures where it differs, an architecture-specific version of this
242 * function is required.
243 */
244#ifndef vma_mmu_pagesize
245unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
246{
247 return vma_kernel_pagesize(vma);
248}
249#endif
250
251/*
Andy Whitcroft84afd992008-07-23 21:27:32 -0700252 * Flags for MAP_PRIVATE reservations. These are stored in the bottom
253 * bits of the reservation map pointer, which are always clear due to
254 * alignment.
255 */
256#define HPAGE_RESV_OWNER (1UL << 0)
257#define HPAGE_RESV_UNMAPPED (1UL << 1)
Mel Gorman04f2cbe2008-07-23 21:27:25 -0700258#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
Andy Whitcroft84afd992008-07-23 21:27:32 -0700259
Mel Gormana1e78772008-07-23 21:27:23 -0700260/*
261 * These helpers are used to track how many pages are reserved for
262 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
263 * is guaranteed to have their future faults succeed.
264 *
265 * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
266 * the reserve counters are updated with the hugetlb_lock held. It is safe
267 * to reset the VMA at fork() time as it is not in use yet and there is no
268 * chance of the global counters getting corrupted as a result of the values.
Andy Whitcroft84afd992008-07-23 21:27:32 -0700269 *
270 * The private mapping reservation is represented in a subtly different
271 * manner to a shared mapping. A shared mapping has a region map associated
272 * with the underlying file, this region map represents the backing file
273 * pages which have ever had a reservation assigned which this persists even
274 * after the page is instantiated. A private mapping has a region map
275 * associated with the original mmap which is attached to all VMAs which
276 * reference it, this region map represents those offsets which have consumed
277 * reservation ie. where pages have been instantiated.
Mel Gormana1e78772008-07-23 21:27:23 -0700278 */
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -0700279static unsigned long get_vma_private_data(struct vm_area_struct *vma)
280{
281 return (unsigned long)vma->vm_private_data;
282}
283
284static void set_vma_private_data(struct vm_area_struct *vma,
285 unsigned long value)
286{
287 vma->vm_private_data = (void *)value;
288}
289
Andy Whitcroft84afd992008-07-23 21:27:32 -0700290struct resv_map {
291 struct kref refs;
292 struct list_head regions;
293};
294
Harvey Harrison2a4b3de2008-10-18 20:27:06 -0700295static struct resv_map *resv_map_alloc(void)
Andy Whitcroft84afd992008-07-23 21:27:32 -0700296{
297 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
298 if (!resv_map)
299 return NULL;
300
301 kref_init(&resv_map->refs);
302 INIT_LIST_HEAD(&resv_map->regions);
303
304 return resv_map;
305}
306
Harvey Harrison2a4b3de2008-10-18 20:27:06 -0700307static void resv_map_release(struct kref *ref)
Andy Whitcroft84afd992008-07-23 21:27:32 -0700308{
309 struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
310
311 /* Clear out any active regions before we release the map. */
312 region_truncate(&resv_map->regions, 0);
313 kfree(resv_map);
314}
315
316static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
Mel Gormana1e78772008-07-23 21:27:23 -0700317{
318 VM_BUG_ON(!is_vm_hugetlb_page(vma));
Mel Gormanf83a2752009-05-28 14:34:40 -0700319 if (!(vma->vm_flags & VM_MAYSHARE))
Andy Whitcroft84afd992008-07-23 21:27:32 -0700320 return (struct resv_map *)(get_vma_private_data(vma) &
321 ~HPAGE_RESV_MASK);
Harvey Harrison2a4b3de2008-10-18 20:27:06 -0700322 return NULL;
Mel Gormana1e78772008-07-23 21:27:23 -0700323}
324
Andy Whitcroft84afd992008-07-23 21:27:32 -0700325static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
Mel Gormana1e78772008-07-23 21:27:23 -0700326{
327 VM_BUG_ON(!is_vm_hugetlb_page(vma));
Mel Gormanf83a2752009-05-28 14:34:40 -0700328 VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
Mel Gormana1e78772008-07-23 21:27:23 -0700329
Andy Whitcroft84afd992008-07-23 21:27:32 -0700330 set_vma_private_data(vma, (get_vma_private_data(vma) &
331 HPAGE_RESV_MASK) | (unsigned long)map);
Mel Gorman04f2cbe2008-07-23 21:27:25 -0700332}
333
334static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
335{
Mel Gorman04f2cbe2008-07-23 21:27:25 -0700336 VM_BUG_ON(!is_vm_hugetlb_page(vma));
Mel Gormanf83a2752009-05-28 14:34:40 -0700337 VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -0700338
339 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
Mel Gorman04f2cbe2008-07-23 21:27:25 -0700340}
341
342static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
343{
344 VM_BUG_ON(!is_vm_hugetlb_page(vma));
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -0700345
346 return (get_vma_private_data(vma) & flag) != 0;
Mel Gormana1e78772008-07-23 21:27:23 -0700347}
348
349/* Decrement the reserved pages in the hugepage pool by one */
Andi Kleena5516432008-07-23 21:27:41 -0700350static void decrement_hugepage_resv_vma(struct hstate *h,
351 struct vm_area_struct *vma)
Mel Gormana1e78772008-07-23 21:27:23 -0700352{
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -0700353 if (vma->vm_flags & VM_NORESERVE)
354 return;
355
Mel Gormanf83a2752009-05-28 14:34:40 -0700356 if (vma->vm_flags & VM_MAYSHARE) {
Mel Gormana1e78772008-07-23 21:27:23 -0700357 /* Shared mappings always use reserves */
Andi Kleena5516432008-07-23 21:27:41 -0700358 h->resv_huge_pages--;
Andy Whitcroft84afd992008-07-23 21:27:32 -0700359 } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
Mel Gormana1e78772008-07-23 21:27:23 -0700360 /*
361 * Only the process that called mmap() has reserves for
362 * private mappings.
363 */
Andi Kleena5516432008-07-23 21:27:41 -0700364 h->resv_huge_pages--;
Mel Gormana1e78772008-07-23 21:27:23 -0700365 }
366}
367
Mel Gorman04f2cbe2008-07-23 21:27:25 -0700368/* Reset counters to 0 and clear all HPAGE_RESV_* flags */
Mel Gormana1e78772008-07-23 21:27:23 -0700369void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
370{
371 VM_BUG_ON(!is_vm_hugetlb_page(vma));
Mel Gormanf83a2752009-05-28 14:34:40 -0700372 if (!(vma->vm_flags & VM_MAYSHARE))
Mel Gormana1e78772008-07-23 21:27:23 -0700373 vma->vm_private_data = (void *)0;
374}
375
376/* Returns true if the VMA has associated reserve pages */
Mel Gorman7f09ca52008-07-23 21:27:58 -0700377static int vma_has_reserves(struct vm_area_struct *vma)
Mel Gormana1e78772008-07-23 21:27:23 -0700378{
Mel Gormanf83a2752009-05-28 14:34:40 -0700379 if (vma->vm_flags & VM_MAYSHARE)
Mel Gorman7f09ca52008-07-23 21:27:58 -0700380 return 1;
381 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
382 return 1;
383 return 0;
Mel Gormana1e78772008-07-23 21:27:23 -0700384}
385
Andy Whitcroft69d177c2008-11-06 12:53:26 -0800386static void clear_gigantic_page(struct page *page,
387 unsigned long addr, unsigned long sz)
388{
389 int i;
390 struct page *p = page;
391
392 might_sleep();
393 for (i = 0; i < sz/PAGE_SIZE; i++, p = mem_map_next(p, page, i)) {
394 cond_resched();
395 clear_user_highpage(p, addr + i * PAGE_SIZE);
396 }
397}
Andi Kleena5516432008-07-23 21:27:41 -0700398static void clear_huge_page(struct page *page,
399 unsigned long addr, unsigned long sz)
David Gibson79ac6ba2006-03-22 00:08:51 -0800400{
401 int i;
402
Hannes Ederebdd4ae2009-01-06 14:39:58 -0800403 if (unlikely(sz > MAX_ORDER_NR_PAGES)) {
404 clear_gigantic_page(page, addr, sz);
405 return;
406 }
Andy Whitcroft69d177c2008-11-06 12:53:26 -0800407
David Gibson79ac6ba2006-03-22 00:08:51 -0800408 might_sleep();
Andi Kleena5516432008-07-23 21:27:41 -0700409 for (i = 0; i < sz/PAGE_SIZE; i++) {
David Gibson79ac6ba2006-03-22 00:08:51 -0800410 cond_resched();
Ralf Baechle281e0e32007-10-01 01:20:10 -0700411 clear_user_highpage(page + i, addr + i * PAGE_SIZE);
David Gibson79ac6ba2006-03-22 00:08:51 -0800412 }
413}
414
Andy Whitcroft69d177c2008-11-06 12:53:26 -0800415static void copy_gigantic_page(struct page *dst, struct page *src,
416 unsigned long addr, struct vm_area_struct *vma)
417{
418 int i;
419 struct hstate *h = hstate_vma(vma);
420 struct page *dst_base = dst;
421 struct page *src_base = src;
422 might_sleep();
423 for (i = 0; i < pages_per_huge_page(h); ) {
424 cond_resched();
425 copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
426
427 i++;
428 dst = mem_map_next(dst, dst_base, i);
429 src = mem_map_next(src, src_base, i);
430 }
431}
David Gibson79ac6ba2006-03-22 00:08:51 -0800432static void copy_huge_page(struct page *dst, struct page *src,
Atsushi Nemoto9de455b2006-12-12 17:14:55 +0000433 unsigned long addr, struct vm_area_struct *vma)
David Gibson79ac6ba2006-03-22 00:08:51 -0800434{
435 int i;
Andi Kleena5516432008-07-23 21:27:41 -0700436 struct hstate *h = hstate_vma(vma);
David Gibson79ac6ba2006-03-22 00:08:51 -0800437
Hannes Ederebdd4ae2009-01-06 14:39:58 -0800438 if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
439 copy_gigantic_page(dst, src, addr, vma);
440 return;
441 }
Andy Whitcroft69d177c2008-11-06 12:53:26 -0800442
David Gibson79ac6ba2006-03-22 00:08:51 -0800443 might_sleep();
Andi Kleena5516432008-07-23 21:27:41 -0700444 for (i = 0; i < pages_per_huge_page(h); i++) {
David Gibson79ac6ba2006-03-22 00:08:51 -0800445 cond_resched();
Atsushi Nemoto9de455b2006-12-12 17:14:55 +0000446 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
David Gibson79ac6ba2006-03-22 00:08:51 -0800447 }
448}
449
Andi Kleena5516432008-07-23 21:27:41 -0700450static void enqueue_huge_page(struct hstate *h, struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451{
452 int nid = page_to_nid(page);
Andi Kleena5516432008-07-23 21:27:41 -0700453 list_add(&page->lru, &h->hugepage_freelists[nid]);
454 h->free_huge_pages++;
455 h->free_huge_pages_node[nid]++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456}
457
Andi Kleena5516432008-07-23 21:27:41 -0700458static struct page *dequeue_huge_page(struct hstate *h)
Nishanth Aravamudan348e1e02008-03-04 14:29:42 -0800459{
460 int nid;
461 struct page *page = NULL;
462
463 for (nid = 0; nid < MAX_NUMNODES; ++nid) {
Andi Kleena5516432008-07-23 21:27:41 -0700464 if (!list_empty(&h->hugepage_freelists[nid])) {
465 page = list_entry(h->hugepage_freelists[nid].next,
Nishanth Aravamudan348e1e02008-03-04 14:29:42 -0800466 struct page, lru);
467 list_del(&page->lru);
Andi Kleena5516432008-07-23 21:27:41 -0700468 h->free_huge_pages--;
469 h->free_huge_pages_node[nid]--;
Nishanth Aravamudan348e1e02008-03-04 14:29:42 -0800470 break;
471 }
472 }
473 return page;
474}
475
Andi Kleena5516432008-07-23 21:27:41 -0700476static struct page *dequeue_huge_page_vma(struct hstate *h,
477 struct vm_area_struct *vma,
Mel Gorman04f2cbe2008-07-23 21:27:25 -0700478 unsigned long address, int avoid_reserve)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479{
Nishanth Aravamudan31a5c6e2007-07-15 23:38:02 -0700480 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 struct page *page = NULL;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -0700482 struct mempolicy *mpol;
Mel Gorman19770b32008-04-28 02:12:18 -0700483 nodemask_t *nodemask;
Mel Gorman396faf02007-07-17 04:03:13 -0700484 struct zonelist *zonelist = huge_zonelist(vma, address,
Mel Gorman19770b32008-04-28 02:12:18 -0700485 htlb_alloc_mask, &mpol, &nodemask);
Mel Gormandd1a2392008-04-28 02:12:17 -0700486 struct zone *zone;
487 struct zoneref *z;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488
Mel Gormana1e78772008-07-23 21:27:23 -0700489 /*
490 * A child process with MAP_PRIVATE mappings created by their parent
491 * have no page reserves. This check ensures that reservations are
492 * not "stolen". The child may still get SIGKILLed
493 */
Mel Gorman7f09ca52008-07-23 21:27:58 -0700494 if (!vma_has_reserves(vma) &&
Andi Kleena5516432008-07-23 21:27:41 -0700495 h->free_huge_pages - h->resv_huge_pages == 0)
Mel Gormana1e78772008-07-23 21:27:23 -0700496 return NULL;
497
Mel Gorman04f2cbe2008-07-23 21:27:25 -0700498 /* If reserves cannot be used, ensure enough pages are in the pool */
Andi Kleena5516432008-07-23 21:27:41 -0700499 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
Mel Gorman04f2cbe2008-07-23 21:27:25 -0700500 return NULL;
501
Mel Gorman19770b32008-04-28 02:12:18 -0700502 for_each_zone_zonelist_nodemask(zone, z, zonelist,
503 MAX_NR_ZONES - 1, nodemask) {
Mel Gorman54a6eb52008-04-28 02:12:16 -0700504 nid = zone_to_nid(zone);
505 if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) &&
Andi Kleena5516432008-07-23 21:27:41 -0700506 !list_empty(&h->hugepage_freelists[nid])) {
507 page = list_entry(h->hugepage_freelists[nid].next,
Andrew Morton3abf7af2007-07-19 01:49:08 -0700508 struct page, lru);
509 list_del(&page->lru);
Andi Kleena5516432008-07-23 21:27:41 -0700510 h->free_huge_pages--;
511 h->free_huge_pages_node[nid]--;
Mel Gorman04f2cbe2008-07-23 21:27:25 -0700512
513 if (!avoid_reserve)
Andi Kleena5516432008-07-23 21:27:41 -0700514 decrement_hugepage_resv_vma(h, vma);
Mel Gormana1e78772008-07-23 21:27:23 -0700515
Ken Chen5ab3ee72007-07-23 18:44:00 -0700516 break;
Andrew Morton3abf7af2007-07-19 01:49:08 -0700517 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 }
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700519 mpol_cond_put(mpol);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 return page;
521}
522
Andi Kleena5516432008-07-23 21:27:41 -0700523static void update_and_free_page(struct hstate *h, struct page *page)
Adam Litke6af2acb2007-10-16 01:26:16 -0700524{
525 int i;
Andi Kleena5516432008-07-23 21:27:41 -0700526
Andy Whitcroft18229df2008-11-06 12:53:27 -0800527 VM_BUG_ON(h->order >= MAX_ORDER);
528
Andi Kleena5516432008-07-23 21:27:41 -0700529 h->nr_huge_pages--;
530 h->nr_huge_pages_node[page_to_nid(page)]--;
531 for (i = 0; i < pages_per_huge_page(h); i++) {
Adam Litke6af2acb2007-10-16 01:26:16 -0700532 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
533 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
534 1 << PG_private | 1<< PG_writeback);
535 }
536 set_compound_page_dtor(page, NULL);
537 set_page_refcounted(page);
Gerald Schaefer7f2e9522008-04-28 02:13:29 -0700538 arch_release_hugepage(page);
Andi Kleena5516432008-07-23 21:27:41 -0700539 __free_pages(page, huge_page_order(h));
Adam Litke6af2acb2007-10-16 01:26:16 -0700540}
541
Andi Kleene5ff2152008-07-23 21:27:42 -0700542struct hstate *size_to_hstate(unsigned long size)
543{
544 struct hstate *h;
545
546 for_each_hstate(h) {
547 if (huge_page_size(h) == size)
548 return h;
549 }
550 return NULL;
551}
552
David Gibson27a85ef2006-03-22 00:08:56 -0800553static void free_huge_page(struct page *page)
554{
Andi Kleena5516432008-07-23 21:27:41 -0700555 /*
556 * Can't pass hstate in here because it is called from the
557 * compound page destructor.
558 */
Andi Kleene5ff2152008-07-23 21:27:42 -0700559 struct hstate *h = page_hstate(page);
Adam Litke7893d1d2007-10-16 01:26:18 -0700560 int nid = page_to_nid(page);
Adam Litkec79fb752007-11-14 16:59:38 -0800561 struct address_space *mapping;
David Gibson27a85ef2006-03-22 00:08:56 -0800562
Adam Litkec79fb752007-11-14 16:59:38 -0800563 mapping = (struct address_space *) page_private(page);
Andy Whitcrofte5df70a2008-02-23 15:23:32 -0800564 set_page_private(page, 0);
Adam Litke7893d1d2007-10-16 01:26:18 -0700565 BUG_ON(page_count(page));
David Gibson27a85ef2006-03-22 00:08:56 -0800566 INIT_LIST_HEAD(&page->lru);
567
568 spin_lock(&hugetlb_lock);
Andi Kleenaa888a72008-07-23 21:27:47 -0700569 if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
Andi Kleena5516432008-07-23 21:27:41 -0700570 update_and_free_page(h, page);
571 h->surplus_huge_pages--;
572 h->surplus_huge_pages_node[nid]--;
Adam Litke7893d1d2007-10-16 01:26:18 -0700573 } else {
Andi Kleena5516432008-07-23 21:27:41 -0700574 enqueue_huge_page(h, page);
Adam Litke7893d1d2007-10-16 01:26:18 -0700575 }
David Gibson27a85ef2006-03-22 00:08:56 -0800576 spin_unlock(&hugetlb_lock);
Adam Litkec79fb752007-11-14 16:59:38 -0800577 if (mapping)
Adam Litke9a119c02007-11-14 16:59:41 -0800578 hugetlb_put_quota(mapping, 1);
David Gibson27a85ef2006-03-22 00:08:56 -0800579}
580
Andi Kleena5516432008-07-23 21:27:41 -0700581static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
Andi Kleenb7ba30c2008-07-23 21:27:40 -0700582{
583 set_compound_page_dtor(page, free_huge_page);
584 spin_lock(&hugetlb_lock);
Andi Kleena5516432008-07-23 21:27:41 -0700585 h->nr_huge_pages++;
586 h->nr_huge_pages_node[nid]++;
Andi Kleenb7ba30c2008-07-23 21:27:40 -0700587 spin_unlock(&hugetlb_lock);
588 put_page(page); /* free it into the hugepage allocator */
589}
590
Wu Fengguang20a03072009-06-16 15:32:22 -0700591static void prep_compound_gigantic_page(struct page *page, unsigned long order)
592{
593 int i;
594 int nr_pages = 1 << order;
595 struct page *p = page + 1;
596
597 /* we rely on prep_new_huge_page to set the destructor */
598 set_compound_order(page, order);
599 __SetPageHead(page);
600 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
601 __SetPageTail(p);
602 p->first_page = page;
603 }
604}
605
606int PageHuge(struct page *page)
607{
608 compound_page_dtor *dtor;
609
610 if (!PageCompound(page))
611 return 0;
612
613 page = compound_head(page);
614 dtor = get_compound_page_dtor(page);
615
616 return dtor == free_huge_page;
617}
618
Andi Kleena5516432008-07-23 21:27:41 -0700619static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 struct page *page;
Joe Jinf96efd52007-07-15 23:38:12 -0700622
Andi Kleenaa888a72008-07-23 21:27:47 -0700623 if (h->order >= MAX_ORDER)
624 return NULL;
625
Mel Gorman6484eb32009-06-16 15:31:54 -0700626 page = alloc_pages_exact_node(nid,
Nishanth Aravamudan551883a2008-04-29 00:58:26 -0700627 htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
628 __GFP_REPEAT|__GFP_NOWARN,
Andi Kleena5516432008-07-23 21:27:41 -0700629 huge_page_order(h));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 if (page) {
Gerald Schaefer7f2e9522008-04-28 02:13:29 -0700631 if (arch_prepare_hugepage(page)) {
Gerald Schaefercaff3a22008-08-12 15:08:38 -0700632 __free_pages(page, huge_page_order(h));
Harvey Harrison7b8ee842008-04-28 14:13:19 -0700633 return NULL;
Gerald Schaefer7f2e9522008-04-28 02:13:29 -0700634 }
Andi Kleena5516432008-07-23 21:27:41 -0700635 prep_new_huge_page(h, page, nid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 }
Nishanth Aravamudan63b46132007-10-16 01:26:24 -0700637
638 return page;
639}
640
Andi Kleen5ced66c2008-07-23 21:27:45 -0700641/*
642 * Use a helper variable to find the next node and then
643 * copy it back to hugetlb_next_nid afterwards:
644 * otherwise there's a window in which a racer might
Mel Gorman6484eb32009-06-16 15:31:54 -0700645 * pass invalid nid MAX_NUMNODES to alloc_pages_exact_node.
Andi Kleen5ced66c2008-07-23 21:27:45 -0700646 * But we don't need to use a spin_lock here: it really
647 * doesn't matter if occasionally a racer chooses the
648 * same nid as we do. Move nid forward in the mask even
649 * if we just successfully allocated a hugepage so that
650 * the next caller gets hugepages on the next node.
651 */
652static int hstate_next_node(struct hstate *h)
653{
654 int next_nid;
655 next_nid = next_node(h->hugetlb_next_nid, node_online_map);
656 if (next_nid == MAX_NUMNODES)
657 next_nid = first_node(node_online_map);
658 h->hugetlb_next_nid = next_nid;
659 return next_nid;
660}
661
Andi Kleena5516432008-07-23 21:27:41 -0700662static int alloc_fresh_huge_page(struct hstate *h)
Nishanth Aravamudan63b46132007-10-16 01:26:24 -0700663{
664 struct page *page;
665 int start_nid;
666 int next_nid;
667 int ret = 0;
668
Andi Kleena5516432008-07-23 21:27:41 -0700669 start_nid = h->hugetlb_next_nid;
Nishanth Aravamudan63b46132007-10-16 01:26:24 -0700670
671 do {
Andi Kleena5516432008-07-23 21:27:41 -0700672 page = alloc_fresh_huge_page_node(h, h->hugetlb_next_nid);
Nishanth Aravamudan63b46132007-10-16 01:26:24 -0700673 if (page)
674 ret = 1;
Andi Kleen5ced66c2008-07-23 21:27:45 -0700675 next_nid = hstate_next_node(h);
Andi Kleena5516432008-07-23 21:27:41 -0700676 } while (!page && h->hugetlb_next_nid != start_nid);
Nishanth Aravamudan63b46132007-10-16 01:26:24 -0700677
Adam Litke3b116302008-04-28 02:13:06 -0700678 if (ret)
679 count_vm_event(HTLB_BUDDY_PGALLOC);
680 else
681 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
682
Nishanth Aravamudan63b46132007-10-16 01:26:24 -0700683 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684}
685
Andi Kleena5516432008-07-23 21:27:41 -0700686static struct page *alloc_buddy_huge_page(struct hstate *h,
687 struct vm_area_struct *vma, unsigned long address)
Adam Litke7893d1d2007-10-16 01:26:18 -0700688{
689 struct page *page;
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -0800690 unsigned int nid;
Adam Litke7893d1d2007-10-16 01:26:18 -0700691
Andi Kleenaa888a72008-07-23 21:27:47 -0700692 if (h->order >= MAX_ORDER)
693 return NULL;
694
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -0800695 /*
696 * Assume we will successfully allocate the surplus page to
697 * prevent racing processes from causing the surplus to exceed
698 * overcommit
699 *
700 * This however introduces a different race, where a process B
701 * tries to grow the static hugepage pool while alloc_pages() is
702 * called by process A. B will only examine the per-node
703 * counters in determining if surplus huge pages can be
704 * converted to normal huge pages in adjust_pool_surplus(). A
705 * won't be able to increment the per-node counter, until the
706 * lock is dropped by B, but B doesn't drop hugetlb_lock until
707 * no more huge pages can be converted from surplus to normal
708 * state (and doesn't try to convert again). Thus, we have a
709 * case where a surplus huge page exists, the pool is grown, and
710 * the surplus huge page still exists after, even though it
711 * should just have been converted to a normal huge page. This
712 * does not leak memory, though, as the hugepage will be freed
713 * once it is out of use. It also does not allow the counters to
714 * go out of whack in adjust_pool_surplus() as we don't modify
715 * the node values until we've gotten the hugepage and only the
716 * per-node value is checked there.
717 */
718 spin_lock(&hugetlb_lock);
Andi Kleena5516432008-07-23 21:27:41 -0700719 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -0800720 spin_unlock(&hugetlb_lock);
721 return NULL;
722 } else {
Andi Kleena5516432008-07-23 21:27:41 -0700723 h->nr_huge_pages++;
724 h->surplus_huge_pages++;
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -0800725 }
726 spin_unlock(&hugetlb_lock);
727
Nishanth Aravamudan551883a2008-04-29 00:58:26 -0700728 page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
729 __GFP_REPEAT|__GFP_NOWARN,
Andi Kleena5516432008-07-23 21:27:41 -0700730 huge_page_order(h));
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -0800731
Gerald Schaefercaff3a22008-08-12 15:08:38 -0700732 if (page && arch_prepare_hugepage(page)) {
733 __free_pages(page, huge_page_order(h));
734 return NULL;
735 }
736
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -0800737 spin_lock(&hugetlb_lock);
Adam Litke7893d1d2007-10-16 01:26:18 -0700738 if (page) {
Adam Litke2668db92008-03-10 11:43:50 -0700739 /*
740 * This page is now managed by the hugetlb allocator and has
741 * no users -- drop the buddy allocator's reference.
742 */
743 put_page_testzero(page);
744 VM_BUG_ON(page_count(page));
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -0800745 nid = page_to_nid(page);
Adam Litke7893d1d2007-10-16 01:26:18 -0700746 set_compound_page_dtor(page, free_huge_page);
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -0800747 /*
748 * We incremented the global counters already
749 */
Andi Kleena5516432008-07-23 21:27:41 -0700750 h->nr_huge_pages_node[nid]++;
751 h->surplus_huge_pages_node[nid]++;
Adam Litke3b116302008-04-28 02:13:06 -0700752 __count_vm_event(HTLB_BUDDY_PGALLOC);
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -0800753 } else {
Andi Kleena5516432008-07-23 21:27:41 -0700754 h->nr_huge_pages--;
755 h->surplus_huge_pages--;
Adam Litke3b116302008-04-28 02:13:06 -0700756 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
Adam Litke7893d1d2007-10-16 01:26:18 -0700757 }
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -0800758 spin_unlock(&hugetlb_lock);
Adam Litke7893d1d2007-10-16 01:26:18 -0700759
760 return page;
761}
762
Adam Litkee4e574b2007-10-16 01:26:19 -0700763/*
764 * Increase the hugetlb pool such that it can accomodate a reservation
765 * of size 'delta'.
766 */
Andi Kleena5516432008-07-23 21:27:41 -0700767static int gather_surplus_pages(struct hstate *h, int delta)
Adam Litkee4e574b2007-10-16 01:26:19 -0700768{
769 struct list_head surplus_list;
770 struct page *page, *tmp;
771 int ret, i;
772 int needed, allocated;
773
Andi Kleena5516432008-07-23 21:27:41 -0700774 needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
Adam Litkeac09b3a2008-03-04 14:29:38 -0800775 if (needed <= 0) {
Andi Kleena5516432008-07-23 21:27:41 -0700776 h->resv_huge_pages += delta;
Adam Litkee4e574b2007-10-16 01:26:19 -0700777 return 0;
Adam Litkeac09b3a2008-03-04 14:29:38 -0800778 }
Adam Litkee4e574b2007-10-16 01:26:19 -0700779
780 allocated = 0;
781 INIT_LIST_HEAD(&surplus_list);
782
783 ret = -ENOMEM;
784retry:
785 spin_unlock(&hugetlb_lock);
786 for (i = 0; i < needed; i++) {
Andi Kleena5516432008-07-23 21:27:41 -0700787 page = alloc_buddy_huge_page(h, NULL, 0);
Adam Litkee4e574b2007-10-16 01:26:19 -0700788 if (!page) {
789 /*
790 * We were not able to allocate enough pages to
791 * satisfy the entire reservation so we free what
792 * we've allocated so far.
793 */
794 spin_lock(&hugetlb_lock);
795 needed = 0;
796 goto free;
797 }
798
799 list_add(&page->lru, &surplus_list);
800 }
801 allocated += needed;
802
803 /*
804 * After retaking hugetlb_lock, we need to recalculate 'needed'
805 * because either resv_huge_pages or free_huge_pages may have changed.
806 */
807 spin_lock(&hugetlb_lock);
Andi Kleena5516432008-07-23 21:27:41 -0700808 needed = (h->resv_huge_pages + delta) -
809 (h->free_huge_pages + allocated);
Adam Litkee4e574b2007-10-16 01:26:19 -0700810 if (needed > 0)
811 goto retry;
812
813 /*
814 * The surplus_list now contains _at_least_ the number of extra pages
815 * needed to accomodate the reservation. Add the appropriate number
816 * of pages to the hugetlb pool and free the extras back to the buddy
Adam Litkeac09b3a2008-03-04 14:29:38 -0800817 * allocator. Commit the entire reservation here to prevent another
818 * process from stealing the pages as they are added to the pool but
819 * before they are reserved.
Adam Litkee4e574b2007-10-16 01:26:19 -0700820 */
821 needed += allocated;
Andi Kleena5516432008-07-23 21:27:41 -0700822 h->resv_huge_pages += delta;
Adam Litkee4e574b2007-10-16 01:26:19 -0700823 ret = 0;
824free:
Adam Litke19fc3f02008-04-28 02:12:20 -0700825 /* Free the needed pages to the hugetlb pool */
Adam Litkee4e574b2007-10-16 01:26:19 -0700826 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
Adam Litke19fc3f02008-04-28 02:12:20 -0700827 if ((--needed) < 0)
828 break;
Adam Litkee4e574b2007-10-16 01:26:19 -0700829 list_del(&page->lru);
Andi Kleena5516432008-07-23 21:27:41 -0700830 enqueue_huge_page(h, page);
Adam Litke19fc3f02008-04-28 02:12:20 -0700831 }
832
833 /* Free unnecessary surplus pages to the buddy allocator */
834 if (!list_empty(&surplus_list)) {
835 spin_unlock(&hugetlb_lock);
836 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
837 list_del(&page->lru);
Adam Litkeaf767cb2007-10-16 01:26:25 -0700838 /*
Adam Litke2668db92008-03-10 11:43:50 -0700839 * The page has a reference count of zero already, so
840 * call free_huge_page directly instead of using
841 * put_page. This must be done with hugetlb_lock
Adam Litkeaf767cb2007-10-16 01:26:25 -0700842 * unlocked which is safe because free_huge_page takes
843 * hugetlb_lock before deciding how to free the page.
844 */
Adam Litke2668db92008-03-10 11:43:50 -0700845 free_huge_page(page);
Adam Litkeaf767cb2007-10-16 01:26:25 -0700846 }
Adam Litke19fc3f02008-04-28 02:12:20 -0700847 spin_lock(&hugetlb_lock);
Adam Litkee4e574b2007-10-16 01:26:19 -0700848 }
849
850 return ret;
851}
852
853/*
854 * When releasing a hugetlb pool reservation, any surplus pages that were
855 * allocated to satisfy the reservation must be explicitly freed if they were
856 * never used.
857 */
Andi Kleena5516432008-07-23 21:27:41 -0700858static void return_unused_surplus_pages(struct hstate *h,
859 unsigned long unused_resv_pages)
Adam Litkee4e574b2007-10-16 01:26:19 -0700860{
861 static int nid = -1;
862 struct page *page;
863 unsigned long nr_pages;
864
Nishanth Aravamudan11320d12008-03-26 14:40:20 -0700865 /*
866 * We want to release as many surplus pages as possible, spread
867 * evenly across all nodes. Iterate across all nodes until we
868 * can no longer free unreserved surplus pages. This occurs when
869 * the nodes with surplus pages have no free pages.
870 */
Christoph Lameter62bc62a2009-06-16 15:32:15 -0700871 unsigned long remaining_iterations = nr_online_nodes;
Nishanth Aravamudan11320d12008-03-26 14:40:20 -0700872
Adam Litkeac09b3a2008-03-04 14:29:38 -0800873 /* Uncommit the reservation */
Andi Kleena5516432008-07-23 21:27:41 -0700874 h->resv_huge_pages -= unused_resv_pages;
Adam Litkeac09b3a2008-03-04 14:29:38 -0800875
Andi Kleenaa888a72008-07-23 21:27:47 -0700876 /* Cannot return gigantic pages currently */
877 if (h->order >= MAX_ORDER)
878 return;
879
Andi Kleena5516432008-07-23 21:27:41 -0700880 nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
Adam Litkee4e574b2007-10-16 01:26:19 -0700881
Nishanth Aravamudan11320d12008-03-26 14:40:20 -0700882 while (remaining_iterations-- && nr_pages) {
Adam Litkee4e574b2007-10-16 01:26:19 -0700883 nid = next_node(nid, node_online_map);
884 if (nid == MAX_NUMNODES)
885 nid = first_node(node_online_map);
886
Andi Kleena5516432008-07-23 21:27:41 -0700887 if (!h->surplus_huge_pages_node[nid])
Adam Litkee4e574b2007-10-16 01:26:19 -0700888 continue;
889
Andi Kleena5516432008-07-23 21:27:41 -0700890 if (!list_empty(&h->hugepage_freelists[nid])) {
891 page = list_entry(h->hugepage_freelists[nid].next,
Adam Litkee4e574b2007-10-16 01:26:19 -0700892 struct page, lru);
893 list_del(&page->lru);
Andi Kleena5516432008-07-23 21:27:41 -0700894 update_and_free_page(h, page);
895 h->free_huge_pages--;
896 h->free_huge_pages_node[nid]--;
897 h->surplus_huge_pages--;
898 h->surplus_huge_pages_node[nid]--;
Adam Litkee4e574b2007-10-16 01:26:19 -0700899 nr_pages--;
Christoph Lameter62bc62a2009-06-16 15:32:15 -0700900 remaining_iterations = nr_online_nodes;
Adam Litkee4e574b2007-10-16 01:26:19 -0700901 }
902 }
903}
904
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -0700905/*
906 * Determine if the huge page at addr within the vma has an associated
907 * reservation. Where it does not we will need to logically increase
908 * reservation and actually increase quota before an allocation can occur.
909 * Where any new reservation would be required the reservation change is
910 * prepared, but not committed. Once the page has been quota'd allocated
911 * an instantiated the change should be committed via vma_commit_reservation.
912 * No action is required on failure.
913 */
Roel Kluine2f17d92009-03-31 15:23:15 -0700914static long vma_needs_reservation(struct hstate *h,
Andi Kleena5516432008-07-23 21:27:41 -0700915 struct vm_area_struct *vma, unsigned long addr)
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -0700916{
917 struct address_space *mapping = vma->vm_file->f_mapping;
918 struct inode *inode = mapping->host;
919
Mel Gormanf83a2752009-05-28 14:34:40 -0700920 if (vma->vm_flags & VM_MAYSHARE) {
Andi Kleena5516432008-07-23 21:27:41 -0700921 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -0700922 return region_chg(&inode->i_mapping->private_list,
923 idx, idx + 1);
924
Andy Whitcroft84afd992008-07-23 21:27:32 -0700925 } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
926 return 1;
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -0700927
Andy Whitcroft84afd992008-07-23 21:27:32 -0700928 } else {
Roel Kluine2f17d92009-03-31 15:23:15 -0700929 long err;
Andi Kleena5516432008-07-23 21:27:41 -0700930 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
Andy Whitcroft84afd992008-07-23 21:27:32 -0700931 struct resv_map *reservations = vma_resv_map(vma);
932
933 err = region_chg(&reservations->regions, idx, idx + 1);
934 if (err < 0)
935 return err;
936 return 0;
937 }
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -0700938}
Andi Kleena5516432008-07-23 21:27:41 -0700939static void vma_commit_reservation(struct hstate *h,
940 struct vm_area_struct *vma, unsigned long addr)
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -0700941{
942 struct address_space *mapping = vma->vm_file->f_mapping;
943 struct inode *inode = mapping->host;
944
Mel Gormanf83a2752009-05-28 14:34:40 -0700945 if (vma->vm_flags & VM_MAYSHARE) {
Andi Kleena5516432008-07-23 21:27:41 -0700946 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -0700947 region_add(&inode->i_mapping->private_list, idx, idx + 1);
Andy Whitcroft84afd992008-07-23 21:27:32 -0700948
949 } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
Andi Kleena5516432008-07-23 21:27:41 -0700950 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
Andy Whitcroft84afd992008-07-23 21:27:32 -0700951 struct resv_map *reservations = vma_resv_map(vma);
952
953 /* Mark this page used in the map. */
954 region_add(&reservations->regions, idx, idx + 1);
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -0700955 }
956}
957
David Gibson27a85ef2006-03-22 00:08:56 -0800958static struct page *alloc_huge_page(struct vm_area_struct *vma,
Mel Gorman04f2cbe2008-07-23 21:27:25 -0700959 unsigned long addr, int avoid_reserve)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960{
Andi Kleena5516432008-07-23 21:27:41 -0700961 struct hstate *h = hstate_vma(vma);
Adam Litke348ea202007-11-14 16:59:37 -0800962 struct page *page;
Adam Litke2fc39ce2007-11-14 16:59:39 -0800963 struct address_space *mapping = vma->vm_file->f_mapping;
Mel Gormana1e78772008-07-23 21:27:23 -0700964 struct inode *inode = mapping->host;
Roel Kluine2f17d92009-03-31 15:23:15 -0700965 long chg;
Adam Litke2fc39ce2007-11-14 16:59:39 -0800966
Mel Gormana1e78772008-07-23 21:27:23 -0700967 /*
968 * Processes that did not create the mapping will have no reserves and
969 * will not have accounted against quota. Check that the quota can be
970 * made before satisfying the allocation
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -0700971 * MAP_NORESERVE mappings may also need pages and quota allocated
972 * if no reserve mapping overlaps.
Mel Gormana1e78772008-07-23 21:27:23 -0700973 */
Andi Kleena5516432008-07-23 21:27:41 -0700974 chg = vma_needs_reservation(h, vma, addr);
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -0700975 if (chg < 0)
976 return ERR_PTR(chg);
977 if (chg)
Mel Gormana1e78772008-07-23 21:27:23 -0700978 if (hugetlb_get_quota(inode->i_mapping, chg))
979 return ERR_PTR(-ENOSPC);
Mel Gormana1e78772008-07-23 21:27:23 -0700980
981 spin_lock(&hugetlb_lock);
Andi Kleena5516432008-07-23 21:27:41 -0700982 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
Mel Gormana1e78772008-07-23 21:27:23 -0700983 spin_unlock(&hugetlb_lock);
984
985 if (!page) {
Andi Kleena5516432008-07-23 21:27:41 -0700986 page = alloc_buddy_huge_page(h, vma, addr);
Mel Gormana1e78772008-07-23 21:27:23 -0700987 if (!page) {
988 hugetlb_put_quota(inode->i_mapping, chg);
989 return ERR_PTR(-VM_FAULT_OOM);
990 }
991 }
992
993 set_page_refcounted(page);
994 set_page_private(page, (unsigned long) mapping);
995
Andi Kleena5516432008-07-23 21:27:41 -0700996 vma_commit_reservation(h, vma, addr);
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -0700997
Adam Litke90d8b7e2007-11-14 16:59:42 -0800998 return page;
David Gibsonb45b5bd2006-03-22 00:08:55 -0800999}
1000
Cyrill Gorcunov91f47662009-01-06 14:40:33 -08001001int __weak alloc_bootmem_huge_page(struct hstate *h)
Andi Kleenaa888a72008-07-23 21:27:47 -07001002{
1003 struct huge_bootmem_page *m;
1004 int nr_nodes = nodes_weight(node_online_map);
1005
1006 while (nr_nodes) {
1007 void *addr;
1008
1009 addr = __alloc_bootmem_node_nopanic(
1010 NODE_DATA(h->hugetlb_next_nid),
1011 huge_page_size(h), huge_page_size(h), 0);
1012
1013 if (addr) {
1014 /*
1015 * Use the beginning of the huge page to store the
1016 * huge_bootmem_page struct (until gather_bootmem
1017 * puts them into the mem_map).
1018 */
1019 m = addr;
Cyrill Gorcunov91f47662009-01-06 14:40:33 -08001020 goto found;
Andi Kleenaa888a72008-07-23 21:27:47 -07001021 }
1022 hstate_next_node(h);
1023 nr_nodes--;
1024 }
1025 return 0;
1026
1027found:
1028 BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
1029 /* Put them into a private list first because mem_map is not up yet */
1030 list_add(&m->list, &huge_boot_pages);
1031 m->hstate = h;
1032 return 1;
1033}
1034
Andy Whitcroft18229df2008-11-06 12:53:27 -08001035static void prep_compound_huge_page(struct page *page, int order)
1036{
1037 if (unlikely(order > (MAX_ORDER - 1)))
1038 prep_compound_gigantic_page(page, order);
1039 else
1040 prep_compound_page(page, order);
1041}
1042
Andi Kleenaa888a72008-07-23 21:27:47 -07001043/* Put bootmem huge pages into the standard lists after mem_map is up */
1044static void __init gather_bootmem_prealloc(void)
1045{
1046 struct huge_bootmem_page *m;
1047
1048 list_for_each_entry(m, &huge_boot_pages, list) {
1049 struct page *page = virt_to_page(m);
1050 struct hstate *h = m->hstate;
1051 __ClearPageReserved(page);
1052 WARN_ON(page_count(page) != 1);
Andy Whitcroft18229df2008-11-06 12:53:27 -08001053 prep_compound_huge_page(page, h->order);
Andi Kleenaa888a72008-07-23 21:27:47 -07001054 prep_new_huge_page(h, page, page_to_nid(page));
1055 }
1056}
1057
Andi Kleen8faa8b02008-07-23 21:27:48 -07001058static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059{
1060 unsigned long i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061
Andi Kleene5ff2152008-07-23 21:27:42 -07001062 for (i = 0; i < h->max_huge_pages; ++i) {
Andi Kleenaa888a72008-07-23 21:27:47 -07001063 if (h->order >= MAX_ORDER) {
1064 if (!alloc_bootmem_huge_page(h))
1065 break;
1066 } else if (!alloc_fresh_huge_page(h))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 }
Andi Kleen8faa8b02008-07-23 21:27:48 -07001069 h->max_huge_pages = i;
Andi Kleene5ff2152008-07-23 21:27:42 -07001070}
1071
1072static void __init hugetlb_init_hstates(void)
1073{
1074 struct hstate *h;
1075
1076 for_each_hstate(h) {
Andi Kleen8faa8b02008-07-23 21:27:48 -07001077 /* oversize hugepages were init'ed in early boot */
1078 if (h->order < MAX_ORDER)
1079 hugetlb_hstate_alloc_pages(h);
Andi Kleene5ff2152008-07-23 21:27:42 -07001080 }
1081}
1082
Andi Kleen4abd32d2008-07-23 21:27:49 -07001083static char * __init memfmt(char *buf, unsigned long n)
1084{
1085 if (n >= (1UL << 30))
1086 sprintf(buf, "%lu GB", n >> 30);
1087 else if (n >= (1UL << 20))
1088 sprintf(buf, "%lu MB", n >> 20);
1089 else
1090 sprintf(buf, "%lu KB", n >> 10);
1091 return buf;
1092}
1093
Andi Kleene5ff2152008-07-23 21:27:42 -07001094static void __init report_hugepages(void)
1095{
1096 struct hstate *h;
1097
1098 for_each_hstate(h) {
Andi Kleen4abd32d2008-07-23 21:27:49 -07001099 char buf[32];
1100 printk(KERN_INFO "HugeTLB registered %s page size, "
1101 "pre-allocated %ld pages\n",
1102 memfmt(buf, huge_page_size(h)),
1103 h->free_huge_pages);
Andi Kleene5ff2152008-07-23 21:27:42 -07001104 }
1105}
1106
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107#ifdef CONFIG_HIGHMEM
Andi Kleena5516432008-07-23 21:27:41 -07001108static void try_to_free_low(struct hstate *h, unsigned long count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109{
Christoph Lameter4415cc82006-09-25 23:31:55 -07001110 int i;
1111
Andi Kleenaa888a72008-07-23 21:27:47 -07001112 if (h->order >= MAX_ORDER)
1113 return;
1114
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 for (i = 0; i < MAX_NUMNODES; ++i) {
1116 struct page *page, *next;
Andi Kleena5516432008-07-23 21:27:41 -07001117 struct list_head *freel = &h->hugepage_freelists[i];
1118 list_for_each_entry_safe(page, next, freel, lru) {
1119 if (count >= h->nr_huge_pages)
Adam Litke6b0c8802007-10-16 01:26:23 -07001120 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 if (PageHighMem(page))
1122 continue;
1123 list_del(&page->lru);
Andi Kleene5ff2152008-07-23 21:27:42 -07001124 update_and_free_page(h, page);
Andi Kleena5516432008-07-23 21:27:41 -07001125 h->free_huge_pages--;
1126 h->free_huge_pages_node[page_to_nid(page)]--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127 }
1128 }
1129}
1130#else
Andi Kleena5516432008-07-23 21:27:41 -07001131static inline void try_to_free_low(struct hstate *h, unsigned long count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132{
1133}
1134#endif
1135
Wu Fengguang20a03072009-06-16 15:32:22 -07001136/*
1137 * Increment or decrement surplus_huge_pages. Keep node-specific counters
1138 * balanced by operating on them in a round-robin fashion.
1139 * Returns 1 if an adjustment was made.
1140 */
1141static int adjust_pool_surplus(struct hstate *h, int delta)
1142{
1143 static int prev_nid;
1144 int nid = prev_nid;
1145 int ret = 0;
1146
1147 VM_BUG_ON(delta != -1 && delta != 1);
1148 do {
1149 nid = next_node(nid, node_online_map);
1150 if (nid == MAX_NUMNODES)
1151 nid = first_node(node_online_map);
1152
1153 /* To shrink on this node, there must be a surplus page */
1154 if (delta < 0 && !h->surplus_huge_pages_node[nid])
1155 continue;
1156 /* Surplus cannot exceed the total number of pages */
1157 if (delta > 0 && h->surplus_huge_pages_node[nid] >=
1158 h->nr_huge_pages_node[nid])
1159 continue;
1160
1161 h->surplus_huge_pages += delta;
1162 h->surplus_huge_pages_node[nid] += delta;
1163 ret = 1;
1164 break;
1165 } while (nid != prev_nid);
1166
1167 prev_nid = nid;
1168 return ret;
1169}
1170
Andi Kleena5516432008-07-23 21:27:41 -07001171#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
Andi Kleene5ff2152008-07-23 21:27:42 -07001172static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173{
Adam Litke7893d1d2007-10-16 01:26:18 -07001174 unsigned long min_count, ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175
Andi Kleenaa888a72008-07-23 21:27:47 -07001176 if (h->order >= MAX_ORDER)
1177 return h->max_huge_pages;
1178
Adam Litke7893d1d2007-10-16 01:26:18 -07001179 /*
1180 * Increase the pool size
1181 * First take pages out of surplus state. Then make up the
1182 * remaining difference by allocating fresh huge pages.
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -08001183 *
1184 * We might race with alloc_buddy_huge_page() here and be unable
1185 * to convert a surplus huge page to a normal huge page. That is
1186 * not critical, though, it just means the overall size of the
1187 * pool might be one hugepage larger than it needs to be, but
1188 * within all the constraints specified by the sysctls.
Adam Litke7893d1d2007-10-16 01:26:18 -07001189 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 spin_lock(&hugetlb_lock);
Andi Kleena5516432008-07-23 21:27:41 -07001191 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
1192 if (!adjust_pool_surplus(h, -1))
Adam Litke7893d1d2007-10-16 01:26:18 -07001193 break;
1194 }
1195
Andi Kleena5516432008-07-23 21:27:41 -07001196 while (count > persistent_huge_pages(h)) {
Adam Litke7893d1d2007-10-16 01:26:18 -07001197 /*
1198 * If this allocation races such that we no longer need the
1199 * page, free_huge_page will handle it by freeing the page
1200 * and reducing the surplus.
1201 */
1202 spin_unlock(&hugetlb_lock);
Andi Kleena5516432008-07-23 21:27:41 -07001203 ret = alloc_fresh_huge_page(h);
Adam Litke7893d1d2007-10-16 01:26:18 -07001204 spin_lock(&hugetlb_lock);
1205 if (!ret)
1206 goto out;
1207
1208 }
Adam Litke7893d1d2007-10-16 01:26:18 -07001209
1210 /*
1211 * Decrease the pool size
1212 * First return free pages to the buddy allocator (being careful
1213 * to keep enough around to satisfy reservations). Then place
1214 * pages into surplus state as needed so the pool will shrink
1215 * to the desired size as pages become free.
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -08001216 *
1217 * By placing pages into the surplus state independent of the
1218 * overcommit value, we are allowing the surplus pool size to
1219 * exceed overcommit. There are few sane options here. Since
1220 * alloc_buddy_huge_page() is checking the global counter,
1221 * though, we'll note that we're not allowed to exceed surplus
1222 * and won't grow the pool anywhere else. Not until one of the
1223 * sysctls are changed, or the surplus pages go out of use.
Adam Litke7893d1d2007-10-16 01:26:18 -07001224 */
Andi Kleena5516432008-07-23 21:27:41 -07001225 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
Adam Litke6b0c8802007-10-16 01:26:23 -07001226 min_count = max(count, min_count);
Andi Kleena5516432008-07-23 21:27:41 -07001227 try_to_free_low(h, min_count);
1228 while (min_count < persistent_huge_pages(h)) {
1229 struct page *page = dequeue_huge_page(h);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 if (!page)
1231 break;
Andi Kleena5516432008-07-23 21:27:41 -07001232 update_and_free_page(h, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 }
Andi Kleena5516432008-07-23 21:27:41 -07001234 while (count < persistent_huge_pages(h)) {
1235 if (!adjust_pool_surplus(h, 1))
Adam Litke7893d1d2007-10-16 01:26:18 -07001236 break;
1237 }
1238out:
Andi Kleena5516432008-07-23 21:27:41 -07001239 ret = persistent_huge_pages(h);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240 spin_unlock(&hugetlb_lock);
Adam Litke7893d1d2007-10-16 01:26:18 -07001241 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242}
1243
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001244#define HSTATE_ATTR_RO(_name) \
1245 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1246
1247#define HSTATE_ATTR(_name) \
1248 static struct kobj_attribute _name##_attr = \
1249 __ATTR(_name, 0644, _name##_show, _name##_store)
1250
1251static struct kobject *hugepages_kobj;
1252static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1253
1254static struct hstate *kobj_to_hstate(struct kobject *kobj)
1255{
1256 int i;
1257 for (i = 0; i < HUGE_MAX_HSTATE; i++)
1258 if (hstate_kobjs[i] == kobj)
1259 return &hstates[i];
1260 BUG();
1261 return NULL;
1262}
1263
1264static ssize_t nr_hugepages_show(struct kobject *kobj,
1265 struct kobj_attribute *attr, char *buf)
1266{
1267 struct hstate *h = kobj_to_hstate(kobj);
1268 return sprintf(buf, "%lu\n", h->nr_huge_pages);
1269}
1270static ssize_t nr_hugepages_store(struct kobject *kobj,
1271 struct kobj_attribute *attr, const char *buf, size_t count)
1272{
1273 int err;
1274 unsigned long input;
1275 struct hstate *h = kobj_to_hstate(kobj);
1276
1277 err = strict_strtoul(buf, 10, &input);
1278 if (err)
1279 return 0;
1280
1281 h->max_huge_pages = set_max_huge_pages(h, input);
1282
1283 return count;
1284}
1285HSTATE_ATTR(nr_hugepages);
1286
1287static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1288 struct kobj_attribute *attr, char *buf)
1289{
1290 struct hstate *h = kobj_to_hstate(kobj);
1291 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1292}
1293static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1294 struct kobj_attribute *attr, const char *buf, size_t count)
1295{
1296 int err;
1297 unsigned long input;
1298 struct hstate *h = kobj_to_hstate(kobj);
1299
1300 err = strict_strtoul(buf, 10, &input);
1301 if (err)
1302 return 0;
1303
1304 spin_lock(&hugetlb_lock);
1305 h->nr_overcommit_huge_pages = input;
1306 spin_unlock(&hugetlb_lock);
1307
1308 return count;
1309}
1310HSTATE_ATTR(nr_overcommit_hugepages);
1311
1312static ssize_t free_hugepages_show(struct kobject *kobj,
1313 struct kobj_attribute *attr, char *buf)
1314{
1315 struct hstate *h = kobj_to_hstate(kobj);
1316 return sprintf(buf, "%lu\n", h->free_huge_pages);
1317}
1318HSTATE_ATTR_RO(free_hugepages);
1319
1320static ssize_t resv_hugepages_show(struct kobject *kobj,
1321 struct kobj_attribute *attr, char *buf)
1322{
1323 struct hstate *h = kobj_to_hstate(kobj);
1324 return sprintf(buf, "%lu\n", h->resv_huge_pages);
1325}
1326HSTATE_ATTR_RO(resv_hugepages);
1327
1328static ssize_t surplus_hugepages_show(struct kobject *kobj,
1329 struct kobj_attribute *attr, char *buf)
1330{
1331 struct hstate *h = kobj_to_hstate(kobj);
1332 return sprintf(buf, "%lu\n", h->surplus_huge_pages);
1333}
1334HSTATE_ATTR_RO(surplus_hugepages);
1335
1336static struct attribute *hstate_attrs[] = {
1337 &nr_hugepages_attr.attr,
1338 &nr_overcommit_hugepages_attr.attr,
1339 &free_hugepages_attr.attr,
1340 &resv_hugepages_attr.attr,
1341 &surplus_hugepages_attr.attr,
1342 NULL,
1343};
1344
1345static struct attribute_group hstate_attr_group = {
1346 .attrs = hstate_attrs,
1347};
1348
1349static int __init hugetlb_sysfs_add_hstate(struct hstate *h)
1350{
1351 int retval;
1352
1353 hstate_kobjs[h - hstates] = kobject_create_and_add(h->name,
1354 hugepages_kobj);
1355 if (!hstate_kobjs[h - hstates])
1356 return -ENOMEM;
1357
1358 retval = sysfs_create_group(hstate_kobjs[h - hstates],
1359 &hstate_attr_group);
1360 if (retval)
1361 kobject_put(hstate_kobjs[h - hstates]);
1362
1363 return retval;
1364}
1365
1366static void __init hugetlb_sysfs_init(void)
1367{
1368 struct hstate *h;
1369 int err;
1370
1371 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
1372 if (!hugepages_kobj)
1373 return;
1374
1375 for_each_hstate(h) {
1376 err = hugetlb_sysfs_add_hstate(h);
1377 if (err)
1378 printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
1379 h->name);
1380 }
1381}
1382
1383static void __exit hugetlb_exit(void)
1384{
1385 struct hstate *h;
1386
1387 for_each_hstate(h) {
1388 kobject_put(hstate_kobjs[h - hstates]);
1389 }
1390
1391 kobject_put(hugepages_kobj);
1392}
1393module_exit(hugetlb_exit);
1394
1395static int __init hugetlb_init(void)
1396{
Benjamin Herrenschmidt0ef89d22008-07-31 00:07:30 -07001397 /* Some platform decide whether they support huge pages at boot
1398 * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
1399 * there is no such support
1400 */
1401 if (HPAGE_SHIFT == 0)
1402 return 0;
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001403
Nick Piggine11bfbf2008-07-23 21:27:52 -07001404 if (!size_to_hstate(default_hstate_size)) {
1405 default_hstate_size = HPAGE_SIZE;
1406 if (!size_to_hstate(default_hstate_size))
1407 hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001408 }
Nick Piggine11bfbf2008-07-23 21:27:52 -07001409 default_hstate_idx = size_to_hstate(default_hstate_size) - hstates;
1410 if (default_hstate_max_huge_pages)
1411 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001412
1413 hugetlb_init_hstates();
1414
Andi Kleenaa888a72008-07-23 21:27:47 -07001415 gather_bootmem_prealloc();
1416
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001417 report_hugepages();
1418
1419 hugetlb_sysfs_init();
1420
1421 return 0;
1422}
1423module_init(hugetlb_init);
1424
1425/* Should be called on processing a hugepagesz=... option */
1426void __init hugetlb_add_hstate(unsigned order)
1427{
1428 struct hstate *h;
Andi Kleen8faa8b02008-07-23 21:27:48 -07001429 unsigned long i;
1430
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001431 if (size_to_hstate(PAGE_SIZE << order)) {
1432 printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
1433 return;
1434 }
1435 BUG_ON(max_hstate >= HUGE_MAX_HSTATE);
1436 BUG_ON(order == 0);
1437 h = &hstates[max_hstate++];
1438 h->order = order;
1439 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
Andi Kleen8faa8b02008-07-23 21:27:48 -07001440 h->nr_huge_pages = 0;
1441 h->free_huge_pages = 0;
1442 for (i = 0; i < MAX_NUMNODES; ++i)
1443 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
1444 h->hugetlb_next_nid = first_node(node_online_map);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001445 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
1446 huge_page_size(h)/1024);
Andi Kleen8faa8b02008-07-23 21:27:48 -07001447
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001448 parsed_hstate = h;
1449}
1450
Nick Piggine11bfbf2008-07-23 21:27:52 -07001451static int __init hugetlb_nrpages_setup(char *s)
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001452{
1453 unsigned long *mhp;
Andi Kleen8faa8b02008-07-23 21:27:48 -07001454 static unsigned long *last_mhp;
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001455
1456 /*
1457 * !max_hstate means we haven't parsed a hugepagesz= parameter yet,
1458 * so this hugepages= parameter goes to the "default hstate".
1459 */
1460 if (!max_hstate)
1461 mhp = &default_hstate_max_huge_pages;
1462 else
1463 mhp = &parsed_hstate->max_huge_pages;
1464
Andi Kleen8faa8b02008-07-23 21:27:48 -07001465 if (mhp == last_mhp) {
1466 printk(KERN_WARNING "hugepages= specified twice without "
1467 "interleaving hugepagesz=, ignoring\n");
1468 return 1;
1469 }
1470
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001471 if (sscanf(s, "%lu", mhp) <= 0)
1472 *mhp = 0;
1473
Andi Kleen8faa8b02008-07-23 21:27:48 -07001474 /*
1475 * Global state is always initialized later in hugetlb_init.
1476 * But we need to allocate >= MAX_ORDER hstates here early to still
1477 * use the bootmem allocator.
1478 */
1479 if (max_hstate && parsed_hstate->order >= MAX_ORDER)
1480 hugetlb_hstate_alloc_pages(parsed_hstate);
1481
1482 last_mhp = mhp;
1483
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001484 return 1;
1485}
Nick Piggine11bfbf2008-07-23 21:27:52 -07001486__setup("hugepages=", hugetlb_nrpages_setup);
1487
1488static int __init hugetlb_default_setup(char *s)
1489{
1490 default_hstate_size = memparse(s, &s);
1491 return 1;
1492}
1493__setup("default_hugepagesz=", hugetlb_default_setup);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001494
Nishanth Aravamudan8a213462008-07-25 19:44:37 -07001495static unsigned int cpuset_mems_nr(unsigned int *array)
1496{
1497 int node;
1498 unsigned int nr = 0;
1499
1500 for_each_node_mask(node, cpuset_current_mems_allowed)
1501 nr += array[node];
1502
1503 return nr;
1504}
1505
1506#ifdef CONFIG_SYSCTL
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507int hugetlb_sysctl_handler(struct ctl_table *table, int write,
1508 struct file *file, void __user *buffer,
1509 size_t *length, loff_t *ppos)
1510{
Andi Kleene5ff2152008-07-23 21:27:42 -07001511 struct hstate *h = &default_hstate;
1512 unsigned long tmp;
1513
1514 if (!write)
1515 tmp = h->max_huge_pages;
1516
1517 table->data = &tmp;
1518 table->maxlen = sizeof(unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
Andi Kleene5ff2152008-07-23 21:27:42 -07001520
1521 if (write)
1522 h->max_huge_pages = set_max_huge_pages(h, tmp);
1523
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 return 0;
1525}
Mel Gorman396faf02007-07-17 04:03:13 -07001526
1527int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
1528 struct file *file, void __user *buffer,
1529 size_t *length, loff_t *ppos)
1530{
1531 proc_dointvec(table, write, file, buffer, length, ppos);
1532 if (hugepages_treat_as_movable)
1533 htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
1534 else
1535 htlb_alloc_mask = GFP_HIGHUSER;
1536 return 0;
1537}
1538
Nishanth Aravamudana3d0c6a2008-02-08 04:18:18 -08001539int hugetlb_overcommit_handler(struct ctl_table *table, int write,
1540 struct file *file, void __user *buffer,
1541 size_t *length, loff_t *ppos)
1542{
Andi Kleena5516432008-07-23 21:27:41 -07001543 struct hstate *h = &default_hstate;
Andi Kleene5ff2152008-07-23 21:27:42 -07001544 unsigned long tmp;
1545
1546 if (!write)
1547 tmp = h->nr_overcommit_huge_pages;
1548
1549 table->data = &tmp;
1550 table->maxlen = sizeof(unsigned long);
Nishanth Aravamudana3d0c6a2008-02-08 04:18:18 -08001551 proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
Andi Kleene5ff2152008-07-23 21:27:42 -07001552
1553 if (write) {
1554 spin_lock(&hugetlb_lock);
1555 h->nr_overcommit_huge_pages = tmp;
1556 spin_unlock(&hugetlb_lock);
1557 }
1558
Nishanth Aravamudana3d0c6a2008-02-08 04:18:18 -08001559 return 0;
1560}
1561
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562#endif /* CONFIG_SYSCTL */
1563
Alexey Dobriyane1759c22008-10-15 23:50:22 +04001564void hugetlb_report_meminfo(struct seq_file *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565{
Andi Kleena5516432008-07-23 21:27:41 -07001566 struct hstate *h = &default_hstate;
Alexey Dobriyane1759c22008-10-15 23:50:22 +04001567 seq_printf(m,
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001568 "HugePages_Total: %5lu\n"
1569 "HugePages_Free: %5lu\n"
1570 "HugePages_Rsvd: %5lu\n"
1571 "HugePages_Surp: %5lu\n"
1572 "Hugepagesize: %8lu kB\n",
Andi Kleena5516432008-07-23 21:27:41 -07001573 h->nr_huge_pages,
1574 h->free_huge_pages,
1575 h->resv_huge_pages,
1576 h->surplus_huge_pages,
1577 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578}
1579
1580int hugetlb_report_node_meminfo(int nid, char *buf)
1581{
Andi Kleena5516432008-07-23 21:27:41 -07001582 struct hstate *h = &default_hstate;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583 return sprintf(buf,
1584 "Node %d HugePages_Total: %5u\n"
Nishanth Aravamudana1de0912008-03-26 14:37:53 -07001585 "Node %d HugePages_Free: %5u\n"
1586 "Node %d HugePages_Surp: %5u\n",
Andi Kleena5516432008-07-23 21:27:41 -07001587 nid, h->nr_huge_pages_node[nid],
1588 nid, h->free_huge_pages_node[nid],
1589 nid, h->surplus_huge_pages_node[nid]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590}
1591
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
1593unsigned long hugetlb_total_pages(void)
1594{
Andi Kleena5516432008-07-23 21:27:41 -07001595 struct hstate *h = &default_hstate;
1596 return h->nr_huge_pages * pages_per_huge_page(h);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598
Andi Kleena5516432008-07-23 21:27:41 -07001599static int hugetlb_acct_memory(struct hstate *h, long delta)
Mel Gormanfc1b8a72008-07-23 21:27:22 -07001600{
1601 int ret = -ENOMEM;
1602
1603 spin_lock(&hugetlb_lock);
1604 /*
1605 * When cpuset is configured, it breaks the strict hugetlb page
1606 * reservation as the accounting is done on a global variable. Such
1607 * reservation is completely rubbish in the presence of cpuset because
1608 * the reservation is not checked against page availability for the
1609 * current cpuset. Application can still potentially OOM'ed by kernel
1610 * with lack of free htlb page in cpuset that the task is in.
1611 * Attempt to enforce strict accounting with cpuset is almost
1612 * impossible (or too ugly) because cpuset is too fluid that
1613 * task or memory node can be dynamically moved between cpusets.
1614 *
1615 * The change of semantics for shared hugetlb mapping with cpuset is
1616 * undesirable. However, in order to preserve some of the semantics,
1617 * we fall back to check against current free page availability as
1618 * a best attempt and hopefully to minimize the impact of changing
1619 * semantics that cpuset has.
1620 */
1621 if (delta > 0) {
Andi Kleena5516432008-07-23 21:27:41 -07001622 if (gather_surplus_pages(h, delta) < 0)
Mel Gormanfc1b8a72008-07-23 21:27:22 -07001623 goto out;
1624
Andi Kleena5516432008-07-23 21:27:41 -07001625 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
1626 return_unused_surplus_pages(h, delta);
Mel Gormanfc1b8a72008-07-23 21:27:22 -07001627 goto out;
1628 }
1629 }
1630
1631 ret = 0;
1632 if (delta < 0)
Andi Kleena5516432008-07-23 21:27:41 -07001633 return_unused_surplus_pages(h, (unsigned long) -delta);
Mel Gormanfc1b8a72008-07-23 21:27:22 -07001634
1635out:
1636 spin_unlock(&hugetlb_lock);
1637 return ret;
1638}
1639
Andy Whitcroft84afd992008-07-23 21:27:32 -07001640static void hugetlb_vm_op_open(struct vm_area_struct *vma)
1641{
1642 struct resv_map *reservations = vma_resv_map(vma);
1643
1644 /*
1645 * This new VMA should share its siblings reservation map if present.
1646 * The VMA will only ever have a valid reservation map pointer where
1647 * it is being copied for another still existing VMA. As that VMA
1648 * has a reference to the reservation map it cannot dissappear until
1649 * after this open call completes. It is therefore safe to take a
1650 * new reference here without additional locking.
1651 */
1652 if (reservations)
1653 kref_get(&reservations->refs);
1654}
1655
Mel Gormana1e78772008-07-23 21:27:23 -07001656static void hugetlb_vm_op_close(struct vm_area_struct *vma)
1657{
Andi Kleena5516432008-07-23 21:27:41 -07001658 struct hstate *h = hstate_vma(vma);
Andy Whitcroft84afd992008-07-23 21:27:32 -07001659 struct resv_map *reservations = vma_resv_map(vma);
1660 unsigned long reserve;
1661 unsigned long start;
1662 unsigned long end;
1663
1664 if (reservations) {
Andi Kleena5516432008-07-23 21:27:41 -07001665 start = vma_hugecache_offset(h, vma, vma->vm_start);
1666 end = vma_hugecache_offset(h, vma, vma->vm_end);
Andy Whitcroft84afd992008-07-23 21:27:32 -07001667
1668 reserve = (end - start) -
1669 region_count(&reservations->regions, start, end);
1670
1671 kref_put(&reservations->refs, resv_map_release);
1672
Adam Litke7251ff72008-07-23 21:27:59 -07001673 if (reserve) {
Andi Kleena5516432008-07-23 21:27:41 -07001674 hugetlb_acct_memory(h, -reserve);
Adam Litke7251ff72008-07-23 21:27:59 -07001675 hugetlb_put_quota(vma->vm_file->f_mapping, reserve);
1676 }
Andy Whitcroft84afd992008-07-23 21:27:32 -07001677 }
Mel Gormana1e78772008-07-23 21:27:23 -07001678}
1679
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680/*
1681 * We cannot handle pagefaults against hugetlb pages at all. They cause
1682 * handle_mm_fault() to try to instantiate regular-sized pages in the
1683 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
1684 * this far.
1685 */
Nick Piggind0217ac2007-07-19 01:47:03 -07001686static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687{
1688 BUG();
Nick Piggind0217ac2007-07-19 01:47:03 -07001689 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690}
1691
1692struct vm_operations_struct hugetlb_vm_ops = {
Nick Piggind0217ac2007-07-19 01:47:03 -07001693 .fault = hugetlb_vm_op_fault,
Andy Whitcroft84afd992008-07-23 21:27:32 -07001694 .open = hugetlb_vm_op_open,
Mel Gormana1e78772008-07-23 21:27:23 -07001695 .close = hugetlb_vm_op_close,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696};
1697
David Gibson1e8f8892006-01-06 00:10:44 -08001698static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
1699 int writable)
David Gibson63551ae2005-06-21 17:14:44 -07001700{
1701 pte_t entry;
1702
David Gibson1e8f8892006-01-06 00:10:44 -08001703 if (writable) {
David Gibson63551ae2005-06-21 17:14:44 -07001704 entry =
1705 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
1706 } else {
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07001707 entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
David Gibson63551ae2005-06-21 17:14:44 -07001708 }
1709 entry = pte_mkyoung(entry);
1710 entry = pte_mkhuge(entry);
1711
1712 return entry;
1713}
1714
David Gibson1e8f8892006-01-06 00:10:44 -08001715static void set_huge_ptep_writable(struct vm_area_struct *vma,
1716 unsigned long address, pte_t *ptep)
1717{
1718 pte_t entry;
1719
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07001720 entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
1721 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
Benjamin Herrenschmidt8dab5242007-06-16 10:16:12 -07001722 update_mmu_cache(vma, address, entry);
Benjamin Herrenschmidt8dab5242007-06-16 10:16:12 -07001723 }
David Gibson1e8f8892006-01-06 00:10:44 -08001724}
1725
1726
David Gibson63551ae2005-06-21 17:14:44 -07001727int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
1728 struct vm_area_struct *vma)
1729{
1730 pte_t *src_pte, *dst_pte, entry;
1731 struct page *ptepage;
Hugh Dickins1c598272005-10-19 21:23:43 -07001732 unsigned long addr;
David Gibson1e8f8892006-01-06 00:10:44 -08001733 int cow;
Andi Kleena5516432008-07-23 21:27:41 -07001734 struct hstate *h = hstate_vma(vma);
1735 unsigned long sz = huge_page_size(h);
David Gibson1e8f8892006-01-06 00:10:44 -08001736
1737 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
David Gibson63551ae2005-06-21 17:14:44 -07001738
Andi Kleena5516432008-07-23 21:27:41 -07001739 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
Hugh Dickinsc74df322005-10-29 18:16:23 -07001740 src_pte = huge_pte_offset(src, addr);
1741 if (!src_pte)
1742 continue;
Andi Kleena5516432008-07-23 21:27:41 -07001743 dst_pte = huge_pte_alloc(dst, addr, sz);
David Gibson63551ae2005-06-21 17:14:44 -07001744 if (!dst_pte)
1745 goto nomem;
Larry Woodmanc5c99422008-01-24 05:49:25 -08001746
1747 /* If the pagetables are shared don't copy or take references */
1748 if (dst_pte == src_pte)
1749 continue;
1750
Hugh Dickinsc74df322005-10-29 18:16:23 -07001751 spin_lock(&dst->page_table_lock);
Nick Piggin46478752008-06-05 22:45:57 -07001752 spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07001753 if (!huge_pte_none(huge_ptep_get(src_pte))) {
David Gibson1e8f8892006-01-06 00:10:44 -08001754 if (cow)
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07001755 huge_ptep_set_wrprotect(src, addr, src_pte);
1756 entry = huge_ptep_get(src_pte);
Hugh Dickins1c598272005-10-19 21:23:43 -07001757 ptepage = pte_page(entry);
1758 get_page(ptepage);
Hugh Dickins1c598272005-10-19 21:23:43 -07001759 set_huge_pte_at(dst, addr, dst_pte, entry);
1760 }
1761 spin_unlock(&src->page_table_lock);
Hugh Dickinsc74df322005-10-29 18:16:23 -07001762 spin_unlock(&dst->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -07001763 }
1764 return 0;
1765
1766nomem:
1767 return -ENOMEM;
1768}
1769
Chen, Kenneth W502717f2006-10-11 01:20:46 -07001770void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001771 unsigned long end, struct page *ref_page)
David Gibson63551ae2005-06-21 17:14:44 -07001772{
1773 struct mm_struct *mm = vma->vm_mm;
1774 unsigned long address;
David Gibsonc7546f82005-08-05 11:59:35 -07001775 pte_t *ptep;
David Gibson63551ae2005-06-21 17:14:44 -07001776 pte_t pte;
1777 struct page *page;
Chen, Kenneth Wfe1668a2006-10-04 02:15:24 -07001778 struct page *tmp;
Andi Kleena5516432008-07-23 21:27:41 -07001779 struct hstate *h = hstate_vma(vma);
1780 unsigned long sz = huge_page_size(h);
1781
Chen, Kenneth Wc0a499c2006-12-06 20:31:39 -08001782 /*
1783 * A page gathering list, protected by per file i_mmap_lock. The
1784 * lock is used to avoid list corruption from multiple unmapping
1785 * of the same page since we are using page->lru.
1786 */
Chen, Kenneth Wfe1668a2006-10-04 02:15:24 -07001787 LIST_HEAD(page_list);
David Gibson63551ae2005-06-21 17:14:44 -07001788
1789 WARN_ON(!is_vm_hugetlb_page(vma));
Andi Kleena5516432008-07-23 21:27:41 -07001790 BUG_ON(start & ~huge_page_mask(h));
1791 BUG_ON(end & ~huge_page_mask(h));
David Gibson63551ae2005-06-21 17:14:44 -07001792
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07001793 mmu_notifier_invalidate_range_start(mm, start, end);
Hugh Dickins508034a2005-10-29 18:16:30 -07001794 spin_lock(&mm->page_table_lock);
Andi Kleena5516432008-07-23 21:27:41 -07001795 for (address = start; address < end; address += sz) {
David Gibsonc7546f82005-08-05 11:59:35 -07001796 ptep = huge_pte_offset(mm, address);
Adam Litke4c887262005-10-29 18:16:46 -07001797 if (!ptep)
David Gibsonc7546f82005-08-05 11:59:35 -07001798 continue;
1799
Chen, Kenneth W39dde652006-12-06 20:32:03 -08001800 if (huge_pmd_unshare(mm, &address, ptep))
1801 continue;
1802
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001803 /*
1804 * If a reference page is supplied, it is because a specific
1805 * page is being unmapped, not a range. Ensure the page we
1806 * are about to unmap is the actual page of interest.
1807 */
1808 if (ref_page) {
1809 pte = huge_ptep_get(ptep);
1810 if (huge_pte_none(pte))
1811 continue;
1812 page = pte_page(pte);
1813 if (page != ref_page)
1814 continue;
1815
1816 /*
1817 * Mark the VMA as having unmapped its page so that
1818 * future faults in this VMA will fail rather than
1819 * looking like data was lost
1820 */
1821 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
1822 }
1823
David Gibsonc7546f82005-08-05 11:59:35 -07001824 pte = huge_ptep_get_and_clear(mm, address, ptep);
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07001825 if (huge_pte_none(pte))
David Gibson63551ae2005-06-21 17:14:44 -07001826 continue;
David Gibsonc7546f82005-08-05 11:59:35 -07001827
David Gibson63551ae2005-06-21 17:14:44 -07001828 page = pte_page(pte);
Ken Chen6649a382007-02-08 14:20:27 -08001829 if (pte_dirty(pte))
1830 set_page_dirty(page);
Chen, Kenneth Wfe1668a2006-10-04 02:15:24 -07001831 list_add(&page->lru, &page_list);
David Gibson63551ae2005-06-21 17:14:44 -07001832 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833 spin_unlock(&mm->page_table_lock);
Hugh Dickins508034a2005-10-29 18:16:30 -07001834 flush_tlb_range(vma, start, end);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07001835 mmu_notifier_invalidate_range_end(mm, start, end);
Chen, Kenneth Wfe1668a2006-10-04 02:15:24 -07001836 list_for_each_entry_safe(page, tmp, &page_list, lru) {
1837 list_del(&page->lru);
1838 put_page(page);
1839 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840}
David Gibson63551ae2005-06-21 17:14:44 -07001841
Chen, Kenneth W502717f2006-10-11 01:20:46 -07001842void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001843 unsigned long end, struct page *ref_page)
Chen, Kenneth W502717f2006-10-11 01:20:46 -07001844{
Andi Kleena137e1c2008-07-23 21:27:43 -07001845 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
1846 __unmap_hugepage_range(vma, start, end, ref_page);
1847 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
Chen, Kenneth W502717f2006-10-11 01:20:46 -07001848}
1849
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001850/*
1851 * This is called when the original mapper is failing to COW a MAP_PRIVATE
1852 * mappping it owns the reserve page for. The intention is to unmap the page
1853 * from other VMAs and let the children be SIGKILLed if they are faulting the
1854 * same region.
1855 */
Harvey Harrison2a4b3de2008-10-18 20:27:06 -07001856static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
1857 struct page *page, unsigned long address)
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001858{
Adam Litke75266742008-11-12 13:24:56 -08001859 struct hstate *h = hstate_vma(vma);
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001860 struct vm_area_struct *iter_vma;
1861 struct address_space *mapping;
1862 struct prio_tree_iter iter;
1863 pgoff_t pgoff;
1864
1865 /*
1866 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
1867 * from page cache lookup which is in HPAGE_SIZE units.
1868 */
Adam Litke75266742008-11-12 13:24:56 -08001869 address = address & huge_page_mask(h);
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001870 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
1871 + (vma->vm_pgoff >> PAGE_SHIFT);
1872 mapping = (struct address_space *)page_private(page);
1873
1874 vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1875 /* Do not unmap the current VMA */
1876 if (iter_vma == vma)
1877 continue;
1878
1879 /*
1880 * Unmap the page from other VMAs without their own reserves.
1881 * They get marked to be SIGKILLed if they fault in these
1882 * areas. This is because a future no-page fault on this VMA
1883 * could insert a zeroed page instead of the data existing
1884 * from the time of fork. This would look like data corruption
1885 */
1886 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
1887 unmap_hugepage_range(iter_vma,
Adam Litke75266742008-11-12 13:24:56 -08001888 address, address + huge_page_size(h),
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001889 page);
1890 }
1891
1892 return 1;
1893}
1894
David Gibson1e8f8892006-01-06 00:10:44 -08001895static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001896 unsigned long address, pte_t *ptep, pte_t pte,
1897 struct page *pagecache_page)
David Gibson1e8f8892006-01-06 00:10:44 -08001898{
Andi Kleena5516432008-07-23 21:27:41 -07001899 struct hstate *h = hstate_vma(vma);
David Gibson1e8f8892006-01-06 00:10:44 -08001900 struct page *old_page, *new_page;
David Gibson79ac6ba2006-03-22 00:08:51 -08001901 int avoidcopy;
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001902 int outside_reserve = 0;
David Gibson1e8f8892006-01-06 00:10:44 -08001903
1904 old_page = pte_page(pte);
1905
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001906retry_avoidcopy:
David Gibson1e8f8892006-01-06 00:10:44 -08001907 /* If no-one else is actually using this page, avoid the copy
1908 * and just make the page writable */
1909 avoidcopy = (page_count(old_page) == 1);
1910 if (avoidcopy) {
1911 set_huge_ptep_writable(vma, address, ptep);
Nick Piggin83c54072007-07-19 01:47:05 -07001912 return 0;
David Gibson1e8f8892006-01-06 00:10:44 -08001913 }
1914
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001915 /*
1916 * If the process that created a MAP_PRIVATE mapping is about to
1917 * perform a COW due to a shared page count, attempt to satisfy
1918 * the allocation without using the existing reserves. The pagecache
1919 * page is used to determine if the reserve at this address was
1920 * consumed or not. If reserves were used, a partial faulted mapping
1921 * at the time of fork() could consume its reserves on COW instead
1922 * of the full address range.
1923 */
Mel Gormanf83a2752009-05-28 14:34:40 -07001924 if (!(vma->vm_flags & VM_MAYSHARE) &&
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001925 is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
1926 old_page != pagecache_page)
1927 outside_reserve = 1;
1928
David Gibson1e8f8892006-01-06 00:10:44 -08001929 page_cache_get(old_page);
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001930 new_page = alloc_huge_page(vma, address, outside_reserve);
David Gibson1e8f8892006-01-06 00:10:44 -08001931
Adam Litke2fc39ce2007-11-14 16:59:39 -08001932 if (IS_ERR(new_page)) {
David Gibson1e8f8892006-01-06 00:10:44 -08001933 page_cache_release(old_page);
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001934
1935 /*
1936 * If a process owning a MAP_PRIVATE mapping fails to COW,
1937 * it is due to references held by a child and an insufficient
1938 * huge page pool. To guarantee the original mappers
1939 * reliability, unmap the page from child processes. The child
1940 * may get SIGKILLed if it later faults.
1941 */
1942 if (outside_reserve) {
1943 BUG_ON(huge_pte_none(pte));
1944 if (unmap_ref_private(mm, vma, old_page, address)) {
1945 BUG_ON(page_count(old_page) != 1);
1946 BUG_ON(huge_pte_none(pte));
1947 goto retry_avoidcopy;
1948 }
1949 WARN_ON_ONCE(1);
1950 }
1951
Adam Litke2fc39ce2007-11-14 16:59:39 -08001952 return -PTR_ERR(new_page);
David Gibson1e8f8892006-01-06 00:10:44 -08001953 }
1954
1955 spin_unlock(&mm->page_table_lock);
Atsushi Nemoto9de455b2006-12-12 17:14:55 +00001956 copy_huge_page(new_page, old_page, address, vma);
Nick Piggin0ed361d2008-02-04 22:29:34 -08001957 __SetPageUptodate(new_page);
David Gibson1e8f8892006-01-06 00:10:44 -08001958 spin_lock(&mm->page_table_lock);
1959
Andi Kleena5516432008-07-23 21:27:41 -07001960 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07001961 if (likely(pte_same(huge_ptep_get(ptep), pte))) {
David Gibson1e8f8892006-01-06 00:10:44 -08001962 /* Break COW */
Gerald Schaefer8fe627e2008-04-28 02:13:28 -07001963 huge_ptep_clear_flush(vma, address, ptep);
David Gibson1e8f8892006-01-06 00:10:44 -08001964 set_huge_pte_at(mm, address, ptep,
1965 make_huge_pte(vma, new_page, 1));
1966 /* Make the old page be freed below */
1967 new_page = old_page;
1968 }
1969 page_cache_release(new_page);
1970 page_cache_release(old_page);
Nick Piggin83c54072007-07-19 01:47:05 -07001971 return 0;
David Gibson1e8f8892006-01-06 00:10:44 -08001972}
1973
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001974/* Return the pagecache page at a given address within a VMA */
Andi Kleena5516432008-07-23 21:27:41 -07001975static struct page *hugetlbfs_pagecache_page(struct hstate *h,
1976 struct vm_area_struct *vma, unsigned long address)
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001977{
1978 struct address_space *mapping;
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -07001979 pgoff_t idx;
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001980
1981 mapping = vma->vm_file->f_mapping;
Andi Kleena5516432008-07-23 21:27:41 -07001982 idx = vma_hugecache_offset(h, vma, address);
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001983
1984 return find_lock_page(mapping, idx);
1985}
1986
Robert P. J. Daya1ed3dd2007-07-17 04:03:33 -07001987static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
Hugh Dickins788c7df2009-06-23 13:49:05 +01001988 unsigned long address, pte_t *ptep, unsigned int flags)
Hugh Dickinsac9b9c62005-10-20 16:24:28 +01001989{
Andi Kleena5516432008-07-23 21:27:41 -07001990 struct hstate *h = hstate_vma(vma);
Hugh Dickinsac9b9c62005-10-20 16:24:28 +01001991 int ret = VM_FAULT_SIGBUS;
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -07001992 pgoff_t idx;
Adam Litke4c887262005-10-29 18:16:46 -07001993 unsigned long size;
Adam Litke4c887262005-10-29 18:16:46 -07001994 struct page *page;
1995 struct address_space *mapping;
David Gibson1e8f8892006-01-06 00:10:44 -08001996 pte_t new_pte;
Adam Litke4c887262005-10-29 18:16:46 -07001997
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001998 /*
1999 * Currently, we are forced to kill the process in the event the
2000 * original mapper has unmapped pages from the child due to a failed
2001 * COW. Warn that such a situation has occured as it may not be obvious
2002 */
2003 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
2004 printk(KERN_WARNING
2005 "PID %d killed due to inadequate hugepage pool\n",
2006 current->pid);
2007 return ret;
2008 }
2009
Adam Litke4c887262005-10-29 18:16:46 -07002010 mapping = vma->vm_file->f_mapping;
Andi Kleena5516432008-07-23 21:27:41 -07002011 idx = vma_hugecache_offset(h, vma, address);
Adam Litke4c887262005-10-29 18:16:46 -07002012
2013 /*
2014 * Use page lock to guard against racing truncation
2015 * before we get page_table_lock.
2016 */
Christoph Lameter6bda6662006-01-06 00:10:49 -08002017retry:
2018 page = find_lock_page(mapping, idx);
2019 if (!page) {
Andi Kleena5516432008-07-23 21:27:41 -07002020 size = i_size_read(mapping->host) >> huge_page_shift(h);
Hugh Dickinsebed4bf2006-10-28 10:38:43 -07002021 if (idx >= size)
2022 goto out;
Mel Gorman04f2cbe2008-07-23 21:27:25 -07002023 page = alloc_huge_page(vma, address, 0);
Adam Litke2fc39ce2007-11-14 16:59:39 -08002024 if (IS_ERR(page)) {
2025 ret = -PTR_ERR(page);
Christoph Lameter6bda6662006-01-06 00:10:49 -08002026 goto out;
2027 }
Andi Kleena5516432008-07-23 21:27:41 -07002028 clear_huge_page(page, address, huge_page_size(h));
Nick Piggin0ed361d2008-02-04 22:29:34 -08002029 __SetPageUptodate(page);
Hugh Dickinsac9b9c62005-10-20 16:24:28 +01002030
Mel Gormanf83a2752009-05-28 14:34:40 -07002031 if (vma->vm_flags & VM_MAYSHARE) {
Christoph Lameter6bda6662006-01-06 00:10:49 -08002032 int err;
Ken Chen45c682a2007-11-14 16:59:44 -08002033 struct inode *inode = mapping->host;
Christoph Lameter6bda6662006-01-06 00:10:49 -08002034
2035 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
2036 if (err) {
2037 put_page(page);
Christoph Lameter6bda6662006-01-06 00:10:49 -08002038 if (err == -EEXIST)
2039 goto retry;
2040 goto out;
2041 }
Ken Chen45c682a2007-11-14 16:59:44 -08002042
2043 spin_lock(&inode->i_lock);
Andi Kleena5516432008-07-23 21:27:41 -07002044 inode->i_blocks += blocks_per_huge_page(h);
Ken Chen45c682a2007-11-14 16:59:44 -08002045 spin_unlock(&inode->i_lock);
Christoph Lameter6bda6662006-01-06 00:10:49 -08002046 } else
2047 lock_page(page);
2048 }
David Gibson1e8f8892006-01-06 00:10:44 -08002049
Andy Whitcroft57303d82008-08-12 15:08:47 -07002050 /*
2051 * If we are going to COW a private mapping later, we examine the
2052 * pending reservations for this page now. This will ensure that
2053 * any allocations necessary to record that reservation occur outside
2054 * the spinlock.
2055 */
Hugh Dickins788c7df2009-06-23 13:49:05 +01002056 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
Andy Whitcroft2b267362008-08-12 15:08:49 -07002057 if (vma_needs_reservation(h, vma, address) < 0) {
2058 ret = VM_FAULT_OOM;
2059 goto backout_unlocked;
2060 }
Andy Whitcroft57303d82008-08-12 15:08:47 -07002061
Hugh Dickinsac9b9c62005-10-20 16:24:28 +01002062 spin_lock(&mm->page_table_lock);
Andi Kleena5516432008-07-23 21:27:41 -07002063 size = i_size_read(mapping->host) >> huge_page_shift(h);
Adam Litke4c887262005-10-29 18:16:46 -07002064 if (idx >= size)
2065 goto backout;
2066
Nick Piggin83c54072007-07-19 01:47:05 -07002067 ret = 0;
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07002068 if (!huge_pte_none(huge_ptep_get(ptep)))
Adam Litke4c887262005-10-29 18:16:46 -07002069 goto backout;
2070
David Gibson1e8f8892006-01-06 00:10:44 -08002071 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
2072 && (vma->vm_flags & VM_SHARED)));
2073 set_huge_pte_at(mm, address, ptep, new_pte);
2074
Hugh Dickins788c7df2009-06-23 13:49:05 +01002075 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
David Gibson1e8f8892006-01-06 00:10:44 -08002076 /* Optimization, do the COW without a second fault */
Mel Gorman04f2cbe2008-07-23 21:27:25 -07002077 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
David Gibson1e8f8892006-01-06 00:10:44 -08002078 }
2079
Hugh Dickinsac9b9c62005-10-20 16:24:28 +01002080 spin_unlock(&mm->page_table_lock);
Adam Litke4c887262005-10-29 18:16:46 -07002081 unlock_page(page);
2082out:
Hugh Dickinsac9b9c62005-10-20 16:24:28 +01002083 return ret;
Adam Litke4c887262005-10-29 18:16:46 -07002084
2085backout:
2086 spin_unlock(&mm->page_table_lock);
Andy Whitcroft2b267362008-08-12 15:08:49 -07002087backout_unlocked:
Adam Litke4c887262005-10-29 18:16:46 -07002088 unlock_page(page);
2089 put_page(page);
2090 goto out;
Hugh Dickinsac9b9c62005-10-20 16:24:28 +01002091}
2092
Adam Litke86e52162006-01-06 00:10:43 -08002093int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
Hugh Dickins788c7df2009-06-23 13:49:05 +01002094 unsigned long address, unsigned int flags)
Adam Litke86e52162006-01-06 00:10:43 -08002095{
2096 pte_t *ptep;
2097 pte_t entry;
David Gibson1e8f8892006-01-06 00:10:44 -08002098 int ret;
Andy Whitcroft57303d82008-08-12 15:08:47 -07002099 struct page *pagecache_page = NULL;
David Gibson3935baa2006-03-22 00:08:53 -08002100 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
Andi Kleena5516432008-07-23 21:27:41 -07002101 struct hstate *h = hstate_vma(vma);
Adam Litke86e52162006-01-06 00:10:43 -08002102
Andi Kleena5516432008-07-23 21:27:41 -07002103 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
Adam Litke86e52162006-01-06 00:10:43 -08002104 if (!ptep)
2105 return VM_FAULT_OOM;
2106
David Gibson3935baa2006-03-22 00:08:53 -08002107 /*
2108 * Serialize hugepage allocation and instantiation, so that we don't
2109 * get spurious allocation failures if two CPUs race to instantiate
2110 * the same page in the page cache.
2111 */
2112 mutex_lock(&hugetlb_instantiation_mutex);
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07002113 entry = huge_ptep_get(ptep);
2114 if (huge_pte_none(entry)) {
Hugh Dickins788c7df2009-06-23 13:49:05 +01002115 ret = hugetlb_no_page(mm, vma, address, ptep, flags);
David Gibsonb4d1d992008-10-15 22:01:11 -07002116 goto out_mutex;
David Gibson3935baa2006-03-22 00:08:53 -08002117 }
Adam Litke86e52162006-01-06 00:10:43 -08002118
Nick Piggin83c54072007-07-19 01:47:05 -07002119 ret = 0;
David Gibson1e8f8892006-01-06 00:10:44 -08002120
Andy Whitcroft57303d82008-08-12 15:08:47 -07002121 /*
2122 * If we are going to COW the mapping later, we examine the pending
2123 * reservations for this page now. This will ensure that any
2124 * allocations necessary to record that reservation occur outside the
2125 * spinlock. For private mappings, we also lookup the pagecache
2126 * page now as it is used to determine if a reservation has been
2127 * consumed.
2128 */
Hugh Dickins788c7df2009-06-23 13:49:05 +01002129 if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
Andy Whitcroft2b267362008-08-12 15:08:49 -07002130 if (vma_needs_reservation(h, vma, address) < 0) {
2131 ret = VM_FAULT_OOM;
David Gibsonb4d1d992008-10-15 22:01:11 -07002132 goto out_mutex;
Andy Whitcroft2b267362008-08-12 15:08:49 -07002133 }
Andy Whitcroft57303d82008-08-12 15:08:47 -07002134
Mel Gormanf83a2752009-05-28 14:34:40 -07002135 if (!(vma->vm_flags & VM_MAYSHARE))
Andy Whitcroft57303d82008-08-12 15:08:47 -07002136 pagecache_page = hugetlbfs_pagecache_page(h,
2137 vma, address);
2138 }
2139
David Gibson1e8f8892006-01-06 00:10:44 -08002140 spin_lock(&mm->page_table_lock);
2141 /* Check for a racing update before calling hugetlb_cow */
David Gibsonb4d1d992008-10-15 22:01:11 -07002142 if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
2143 goto out_page_table_lock;
2144
2145
Hugh Dickins788c7df2009-06-23 13:49:05 +01002146 if (flags & FAULT_FLAG_WRITE) {
David Gibsonb4d1d992008-10-15 22:01:11 -07002147 if (!pte_write(entry)) {
Andy Whitcroft57303d82008-08-12 15:08:47 -07002148 ret = hugetlb_cow(mm, vma, address, ptep, entry,
2149 pagecache_page);
David Gibsonb4d1d992008-10-15 22:01:11 -07002150 goto out_page_table_lock;
2151 }
2152 entry = pte_mkdirty(entry);
2153 }
2154 entry = pte_mkyoung(entry);
Hugh Dickins788c7df2009-06-23 13:49:05 +01002155 if (huge_ptep_set_access_flags(vma, address, ptep, entry,
2156 flags & FAULT_FLAG_WRITE))
David Gibsonb4d1d992008-10-15 22:01:11 -07002157 update_mmu_cache(vma, address, entry);
2158
2159out_page_table_lock:
David Gibson1e8f8892006-01-06 00:10:44 -08002160 spin_unlock(&mm->page_table_lock);
Andy Whitcroft57303d82008-08-12 15:08:47 -07002161
2162 if (pagecache_page) {
2163 unlock_page(pagecache_page);
2164 put_page(pagecache_page);
2165 }
2166
David Gibsonb4d1d992008-10-15 22:01:11 -07002167out_mutex:
David Gibson3935baa2006-03-22 00:08:53 -08002168 mutex_unlock(&hugetlb_instantiation_mutex);
David Gibson1e8f8892006-01-06 00:10:44 -08002169
2170 return ret;
Adam Litke86e52162006-01-06 00:10:43 -08002171}
2172
Andi Kleenceb86872008-07-23 21:27:50 -07002173/* Can be overriden by architectures */
2174__attribute__((weak)) struct page *
2175follow_huge_pud(struct mm_struct *mm, unsigned long address,
2176 pud_t *pud, int write)
2177{
2178 BUG();
2179 return NULL;
2180}
2181
KOSAKI Motohiro4b2e38a2008-10-18 20:27:10 -07002182static int huge_zeropage_ok(pte_t *ptep, int write, int shared)
2183{
2184 if (!ptep || write || shared)
2185 return 0;
2186 else
2187 return huge_pte_none(huge_ptep_get(ptep));
2188}
2189
David Gibson63551ae2005-06-21 17:14:44 -07002190int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
2191 struct page **pages, struct vm_area_struct **vmas,
Adam Litke5b23dbe2007-11-14 16:59:33 -08002192 unsigned long *position, int *length, int i,
2193 int write)
David Gibson63551ae2005-06-21 17:14:44 -07002194{
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -08002195 unsigned long pfn_offset;
2196 unsigned long vaddr = *position;
David Gibson63551ae2005-06-21 17:14:44 -07002197 int remainder = *length;
Andi Kleena5516432008-07-23 21:27:41 -07002198 struct hstate *h = hstate_vma(vma);
KOSAKI Motohiro4b2e38a2008-10-18 20:27:10 -07002199 int zeropage_ok = 0;
2200 int shared = vma->vm_flags & VM_SHARED;
David Gibson63551ae2005-06-21 17:14:44 -07002201
Hugh Dickins1c598272005-10-19 21:23:43 -07002202 spin_lock(&mm->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -07002203 while (vaddr < vma->vm_end && remainder) {
Adam Litke4c887262005-10-29 18:16:46 -07002204 pte_t *pte;
2205 struct page *page;
2206
2207 /*
2208 * Some archs (sparc64, sh*) have multiple pte_ts to
2209 * each hugepage. We have to make * sure we get the
2210 * first, for the page indexing below to work.
2211 */
Andi Kleena5516432008-07-23 21:27:41 -07002212 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
KOSAKI Motohiro4b2e38a2008-10-18 20:27:10 -07002213 if (huge_zeropage_ok(pte, write, shared))
2214 zeropage_ok = 1;
Adam Litke4c887262005-10-29 18:16:46 -07002215
KOSAKI Motohiro4b2e38a2008-10-18 20:27:10 -07002216 if (!pte ||
2217 (huge_pte_none(huge_ptep_get(pte)) && !zeropage_ok) ||
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07002218 (write && !pte_write(huge_ptep_get(pte)))) {
Adam Litke4c887262005-10-29 18:16:46 -07002219 int ret;
2220
2221 spin_unlock(&mm->page_table_lock);
Adam Litke5b23dbe2007-11-14 16:59:33 -08002222 ret = hugetlb_fault(mm, vma, vaddr, write);
Adam Litke4c887262005-10-29 18:16:46 -07002223 spin_lock(&mm->page_table_lock);
Adam Litkea89182c2007-08-22 14:01:51 -07002224 if (!(ret & VM_FAULT_ERROR))
Adam Litke4c887262005-10-29 18:16:46 -07002225 continue;
2226
2227 remainder = 0;
2228 if (!i)
2229 i = -EFAULT;
2230 break;
2231 }
David Gibson63551ae2005-06-21 17:14:44 -07002232
Andi Kleena5516432008-07-23 21:27:41 -07002233 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07002234 page = pte_page(huge_ptep_get(pte));
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -08002235same_page:
Chen, Kenneth Wd6692182006-03-31 02:29:57 -08002236 if (pages) {
KOSAKI Motohiro4b2e38a2008-10-18 20:27:10 -07002237 if (zeropage_ok)
2238 pages[i] = ZERO_PAGE(0);
2239 else
Andy Whitcroft69d177c2008-11-06 12:53:26 -08002240 pages[i] = mem_map_offset(page, pfn_offset);
KOSAKI Motohiro4b2e38a2008-10-18 20:27:10 -07002241 get_page(pages[i]);
Chen, Kenneth Wd6692182006-03-31 02:29:57 -08002242 }
David Gibson63551ae2005-06-21 17:14:44 -07002243
2244 if (vmas)
2245 vmas[i] = vma;
2246
2247 vaddr += PAGE_SIZE;
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -08002248 ++pfn_offset;
David Gibson63551ae2005-06-21 17:14:44 -07002249 --remainder;
2250 ++i;
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -08002251 if (vaddr < vma->vm_end && remainder &&
Andi Kleena5516432008-07-23 21:27:41 -07002252 pfn_offset < pages_per_huge_page(h)) {
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -08002253 /*
2254 * We use pfn_offset to avoid touching the pageframes
2255 * of this compound page.
2256 */
2257 goto same_page;
2258 }
David Gibson63551ae2005-06-21 17:14:44 -07002259 }
Hugh Dickins1c598272005-10-19 21:23:43 -07002260 spin_unlock(&mm->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -07002261 *length = remainder;
2262 *position = vaddr;
2263
2264 return i;
2265}
Zhang, Yanmin8f860592006-03-22 00:08:50 -08002266
2267void hugetlb_change_protection(struct vm_area_struct *vma,
2268 unsigned long address, unsigned long end, pgprot_t newprot)
2269{
2270 struct mm_struct *mm = vma->vm_mm;
2271 unsigned long start = address;
2272 pte_t *ptep;
2273 pte_t pte;
Andi Kleena5516432008-07-23 21:27:41 -07002274 struct hstate *h = hstate_vma(vma);
Zhang, Yanmin8f860592006-03-22 00:08:50 -08002275
2276 BUG_ON(address >= end);
2277 flush_cache_range(vma, address, end);
2278
Chen, Kenneth W39dde652006-12-06 20:32:03 -08002279 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
Zhang, Yanmin8f860592006-03-22 00:08:50 -08002280 spin_lock(&mm->page_table_lock);
Andi Kleena5516432008-07-23 21:27:41 -07002281 for (; address < end; address += huge_page_size(h)) {
Zhang, Yanmin8f860592006-03-22 00:08:50 -08002282 ptep = huge_pte_offset(mm, address);
2283 if (!ptep)
2284 continue;
Chen, Kenneth W39dde652006-12-06 20:32:03 -08002285 if (huge_pmd_unshare(mm, &address, ptep))
2286 continue;
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07002287 if (!huge_pte_none(huge_ptep_get(ptep))) {
Zhang, Yanmin8f860592006-03-22 00:08:50 -08002288 pte = huge_ptep_get_and_clear(mm, address, ptep);
2289 pte = pte_mkhuge(pte_modify(pte, newprot));
2290 set_huge_pte_at(mm, address, ptep, pte);
Zhang, Yanmin8f860592006-03-22 00:08:50 -08002291 }
2292 }
2293 spin_unlock(&mm->page_table_lock);
Chen, Kenneth W39dde652006-12-06 20:32:03 -08002294 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
Zhang, Yanmin8f860592006-03-22 00:08:50 -08002295
2296 flush_tlb_range(vma, start, end);
2297}
2298
Mel Gormana1e78772008-07-23 21:27:23 -07002299int hugetlb_reserve_pages(struct inode *inode,
2300 long from, long to,
Mel Gorman5a6fe122009-02-10 14:02:27 +00002301 struct vm_area_struct *vma,
2302 int acctflag)
Adam Litkee4e574b2007-10-16 01:26:19 -07002303{
Mel Gorman17c9d122009-02-11 16:34:16 +00002304 long ret, chg;
Andi Kleena5516432008-07-23 21:27:41 -07002305 struct hstate *h = hstate_inode(inode);
Adam Litkee4e574b2007-10-16 01:26:19 -07002306
Mel Gormana1e78772008-07-23 21:27:23 -07002307 /*
Mel Gorman17c9d122009-02-11 16:34:16 +00002308 * Only apply hugepage reservation if asked. At fault time, an
2309 * attempt will be made for VM_NORESERVE to allocate a page
2310 * and filesystem quota without using reserves
2311 */
2312 if (acctflag & VM_NORESERVE)
2313 return 0;
2314
2315 /*
Mel Gormana1e78772008-07-23 21:27:23 -07002316 * Shared mappings base their reservation on the number of pages that
2317 * are already allocated on behalf of the file. Private mappings need
2318 * to reserve the full area even if read-only as mprotect() may be
2319 * called to make the mapping read-write. Assume !vma is a shm mapping
2320 */
Mel Gormanf83a2752009-05-28 14:34:40 -07002321 if (!vma || vma->vm_flags & VM_MAYSHARE)
Mel Gormana1e78772008-07-23 21:27:23 -07002322 chg = region_chg(&inode->i_mapping->private_list, from, to);
Mel Gorman5a6fe122009-02-10 14:02:27 +00002323 else {
2324 struct resv_map *resv_map = resv_map_alloc();
Mel Gorman5a6fe122009-02-10 14:02:27 +00002325 if (!resv_map)
2326 return -ENOMEM;
2327
Mel Gorman17c9d122009-02-11 16:34:16 +00002328 chg = to - from;
2329
Mel Gorman5a6fe122009-02-10 14:02:27 +00002330 set_vma_resv_map(vma, resv_map);
2331 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
2332 }
2333
Mel Gorman17c9d122009-02-11 16:34:16 +00002334 if (chg < 0)
2335 return chg;
2336
2337 /* There must be enough filesystem quota for the mapping */
2338 if (hugetlb_get_quota(inode->i_mapping, chg))
2339 return -ENOSPC;
2340
2341 /*
2342 * Check enough hugepages are available for the reservation.
2343 * Hand back the quota if there are not
2344 */
2345 ret = hugetlb_acct_memory(h, chg);
2346 if (ret < 0) {
2347 hugetlb_put_quota(inode->i_mapping, chg);
2348 return ret;
2349 }
2350
2351 /*
2352 * Account for the reservations made. Shared mappings record regions
2353 * that have reservations as they are shared by multiple VMAs.
2354 * When the last VMA disappears, the region map says how much
2355 * the reservation was and the page cache tells how much of
2356 * the reservation was consumed. Private mappings are per-VMA and
2357 * only the consumed reservations are tracked. When the VMA
2358 * disappears, the original reservation is the VMA size and the
2359 * consumed reservations are stored in the map. Hence, nothing
2360 * else has to be done for private mappings here
2361 */
Mel Gormanf83a2752009-05-28 14:34:40 -07002362 if (!vma || vma->vm_flags & VM_MAYSHARE)
Mel Gorman17c9d122009-02-11 16:34:16 +00002363 region_add(&inode->i_mapping->private_list, from, to);
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -07002364 return 0;
2365}
2366
2367void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
2368{
Andi Kleena5516432008-07-23 21:27:41 -07002369 struct hstate *h = hstate_inode(inode);
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -07002370 long chg = region_truncate(&inode->i_mapping->private_list, offset);
Ken Chen45c682a2007-11-14 16:59:44 -08002371
2372 spin_lock(&inode->i_lock);
Eric Sandeene4c6f8b2009-07-29 15:02:16 -07002373 inode->i_blocks -= (blocks_per_huge_page(h) * freed);
Ken Chen45c682a2007-11-14 16:59:44 -08002374 spin_unlock(&inode->i_lock);
2375
Adam Litke90d8b7e2007-11-14 16:59:42 -08002376 hugetlb_put_quota(inode->i_mapping, (chg - freed));
Andi Kleena5516432008-07-23 21:27:41 -07002377 hugetlb_acct_memory(h, -(chg - freed));
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -07002378}