blob: e6afe527bd09388872a2e07590b32105f050a579 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Generic hugetlb support.
3 * (C) William Irwin, April 2004
4 */
5#include <linux/gfp.h>
6#include <linux/list.h>
7#include <linux/init.h>
8#include <linux/module.h>
9#include <linux/mm.h>
Alexey Dobriyane1759c22008-10-15 23:50:22 +040010#include <linux/seq_file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/sysctl.h>
12#include <linux/highmem.h>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070013#include <linux/mmu_notifier.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/nodemask.h>
David Gibson63551ae2005-06-21 17:14:44 -070015#include <linux/pagemap.h>
Christoph Lameter5da7ca82006-01-06 00:10:46 -080016#include <linux/mempolicy.h>
Christoph Lameteraea47ff2006-01-08 01:00:57 -080017#include <linux/cpuset.h>
David Gibson3935baa2006-03-22 00:08:53 -080018#include <linux/mutex.h>
Andi Kleenaa888a72008-07-23 21:27:47 -070019#include <linux/bootmem.h>
Nishanth Aravamudana3437872008-07-23 21:27:44 -070020#include <linux/sysfs.h>
Linus Torvaldsd6606682008-08-06 12:04:54 -070021
David Gibson63551ae2005-06-21 17:14:44 -070022#include <asm/page.h>
23#include <asm/pgtable.h>
Adrian Bunk78a34ae2008-07-28 15:46:30 -070024#include <asm/io.h>
David Gibson63551ae2005-06-21 17:14:44 -070025
26#include <linux/hugetlb.h>
Nick Piggin7835e982006-03-22 00:08:40 -080027#include "internal.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
29const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
Mel Gorman396faf02007-07-17 04:03:13 -070030static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
31unsigned long hugepages_treat_as_movable;
Andi Kleena5516432008-07-23 21:27:41 -070032
Andi Kleene5ff2152008-07-23 21:27:42 -070033static int max_hstate;
34unsigned int default_hstate_idx;
35struct hstate hstates[HUGE_MAX_HSTATE];
36
Jon Tollefson53ba51d2008-07-23 21:27:52 -070037__initdata LIST_HEAD(huge_boot_pages);
38
Andi Kleene5ff2152008-07-23 21:27:42 -070039/* for command line parsing */
40static struct hstate * __initdata parsed_hstate;
41static unsigned long __initdata default_hstate_max_huge_pages;
Nick Piggine11bfbf2008-07-23 21:27:52 -070042static unsigned long __initdata default_hstate_size;
Andi Kleene5ff2152008-07-23 21:27:42 -070043
44#define for_each_hstate(h) \
45 for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++)
Mel Gorman396faf02007-07-17 04:03:13 -070046
David Gibson3935baa2006-03-22 00:08:53 -080047/*
48 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
49 */
50static DEFINE_SPINLOCK(hugetlb_lock);
Eric Paris0bd0f9f2005-11-21 21:32:28 -080051
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -070052/*
Andy Whitcroft96822902008-07-23 21:27:29 -070053 * Region tracking -- allows tracking of reservations and instantiated pages
54 * across the pages in a mapping.
Andy Whitcroft84afd992008-07-23 21:27:32 -070055 *
56 * The region data structures are protected by a combination of the mmap_sem
57 * and the hugetlb_instantion_mutex. To access or modify a region the caller
58 * must either hold the mmap_sem for write, or the mmap_sem for read and
59 * the hugetlb_instantiation mutex:
60 *
61 * down_write(&mm->mmap_sem);
62 * or
63 * down_read(&mm->mmap_sem);
64 * mutex_lock(&hugetlb_instantiation_mutex);
Andy Whitcroft96822902008-07-23 21:27:29 -070065 */
66struct file_region {
67 struct list_head link;
68 long from;
69 long to;
70};
71
72static long region_add(struct list_head *head, long f, long t)
73{
74 struct file_region *rg, *nrg, *trg;
75
76 /* Locate the region we are either in or before. */
77 list_for_each_entry(rg, head, link)
78 if (f <= rg->to)
79 break;
80
81 /* Round our left edge to the current segment if it encloses us. */
82 if (f > rg->from)
83 f = rg->from;
84
85 /* Check for and consume any regions we now overlap with. */
86 nrg = rg;
87 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
88 if (&rg->link == head)
89 break;
90 if (rg->from > t)
91 break;
92
93 /* If this area reaches higher then extend our area to
94 * include it completely. If this is not the first area
95 * which we intend to reuse, free it. */
96 if (rg->to > t)
97 t = rg->to;
98 if (rg != nrg) {
99 list_del(&rg->link);
100 kfree(rg);
101 }
102 }
103 nrg->from = f;
104 nrg->to = t;
105 return 0;
106}
107
108static long region_chg(struct list_head *head, long f, long t)
109{
110 struct file_region *rg, *nrg;
111 long chg = 0;
112
113 /* Locate the region we are before or in. */
114 list_for_each_entry(rg, head, link)
115 if (f <= rg->to)
116 break;
117
118 /* If we are below the current region then a new region is required.
119 * Subtle, allocate a new region at the position but make it zero
120 * size such that we can guarantee to record the reservation. */
121 if (&rg->link == head || t < rg->from) {
122 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
123 if (!nrg)
124 return -ENOMEM;
125 nrg->from = f;
126 nrg->to = f;
127 INIT_LIST_HEAD(&nrg->link);
128 list_add(&nrg->link, rg->link.prev);
129
130 return t - f;
131 }
132
133 /* Round our left edge to the current segment if it encloses us. */
134 if (f > rg->from)
135 f = rg->from;
136 chg = t - f;
137
138 /* Check for and consume any regions we now overlap with. */
139 list_for_each_entry(rg, rg->link.prev, link) {
140 if (&rg->link == head)
141 break;
142 if (rg->from > t)
143 return chg;
144
145 /* We overlap with this area, if it extends futher than
146 * us then we must extend ourselves. Account for its
147 * existing reservation. */
148 if (rg->to > t) {
149 chg += rg->to - t;
150 t = rg->to;
151 }
152 chg -= rg->to - rg->from;
153 }
154 return chg;
155}
156
157static long region_truncate(struct list_head *head, long end)
158{
159 struct file_region *rg, *trg;
160 long chg = 0;
161
162 /* Locate the region we are either in or before. */
163 list_for_each_entry(rg, head, link)
164 if (end <= rg->to)
165 break;
166 if (&rg->link == head)
167 return 0;
168
169 /* If we are in the middle of a region then adjust it. */
170 if (end > rg->from) {
171 chg = rg->to - end;
172 rg->to = end;
173 rg = list_entry(rg->link.next, typeof(*rg), link);
174 }
175
176 /* Drop any remaining regions. */
177 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
178 if (&rg->link == head)
179 break;
180 chg += rg->to - rg->from;
181 list_del(&rg->link);
182 kfree(rg);
183 }
184 return chg;
185}
186
Andy Whitcroft84afd992008-07-23 21:27:32 -0700187static long region_count(struct list_head *head, long f, long t)
188{
189 struct file_region *rg;
190 long chg = 0;
191
192 /* Locate each segment we overlap with, and count that overlap. */
193 list_for_each_entry(rg, head, link) {
194 int seg_from;
195 int seg_to;
196
197 if (rg->to <= f)
198 continue;
199 if (rg->from >= t)
200 break;
201
202 seg_from = max(rg->from, f);
203 seg_to = min(rg->to, t);
204
205 chg += seg_to - seg_from;
206 }
207
208 return chg;
209}
210
Andy Whitcroft96822902008-07-23 21:27:29 -0700211/*
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -0700212 * Convert the address within this vma to the page offset within
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -0700213 * the mapping, in pagecache page units; huge pages here.
214 */
Andi Kleena5516432008-07-23 21:27:41 -0700215static pgoff_t vma_hugecache_offset(struct hstate *h,
216 struct vm_area_struct *vma, unsigned long address)
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -0700217{
Andi Kleena5516432008-07-23 21:27:41 -0700218 return ((address - vma->vm_start) >> huge_page_shift(h)) +
219 (vma->vm_pgoff >> huge_page_order(h));
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -0700220}
221
Andy Whitcroft84afd992008-07-23 21:27:32 -0700222/*
223 * Flags for MAP_PRIVATE reservations. These are stored in the bottom
224 * bits of the reservation map pointer, which are always clear due to
225 * alignment.
226 */
227#define HPAGE_RESV_OWNER (1UL << 0)
228#define HPAGE_RESV_UNMAPPED (1UL << 1)
Mel Gorman04f2cbe2008-07-23 21:27:25 -0700229#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
Andy Whitcroft84afd992008-07-23 21:27:32 -0700230
Mel Gormana1e78772008-07-23 21:27:23 -0700231/*
232 * These helpers are used to track how many pages are reserved for
233 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
234 * is guaranteed to have their future faults succeed.
235 *
236 * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
237 * the reserve counters are updated with the hugetlb_lock held. It is safe
238 * to reset the VMA at fork() time as it is not in use yet and there is no
239 * chance of the global counters getting corrupted as a result of the values.
Andy Whitcroft84afd992008-07-23 21:27:32 -0700240 *
241 * The private mapping reservation is represented in a subtly different
242 * manner to a shared mapping. A shared mapping has a region map associated
243 * with the underlying file, this region map represents the backing file
244 * pages which have ever had a reservation assigned which this persists even
245 * after the page is instantiated. A private mapping has a region map
246 * associated with the original mmap which is attached to all VMAs which
247 * reference it, this region map represents those offsets which have consumed
248 * reservation ie. where pages have been instantiated.
Mel Gormana1e78772008-07-23 21:27:23 -0700249 */
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -0700250static unsigned long get_vma_private_data(struct vm_area_struct *vma)
251{
252 return (unsigned long)vma->vm_private_data;
253}
254
255static void set_vma_private_data(struct vm_area_struct *vma,
256 unsigned long value)
257{
258 vma->vm_private_data = (void *)value;
259}
260
Andy Whitcroft84afd992008-07-23 21:27:32 -0700261struct resv_map {
262 struct kref refs;
263 struct list_head regions;
264};
265
Harvey Harrison2a4b3de2008-10-18 20:27:06 -0700266static struct resv_map *resv_map_alloc(void)
Andy Whitcroft84afd992008-07-23 21:27:32 -0700267{
268 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
269 if (!resv_map)
270 return NULL;
271
272 kref_init(&resv_map->refs);
273 INIT_LIST_HEAD(&resv_map->regions);
274
275 return resv_map;
276}
277
Harvey Harrison2a4b3de2008-10-18 20:27:06 -0700278static void resv_map_release(struct kref *ref)
Andy Whitcroft84afd992008-07-23 21:27:32 -0700279{
280 struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
281
282 /* Clear out any active regions before we release the map. */
283 region_truncate(&resv_map->regions, 0);
284 kfree(resv_map);
285}
286
287static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
Mel Gormana1e78772008-07-23 21:27:23 -0700288{
289 VM_BUG_ON(!is_vm_hugetlb_page(vma));
290 if (!(vma->vm_flags & VM_SHARED))
Andy Whitcroft84afd992008-07-23 21:27:32 -0700291 return (struct resv_map *)(get_vma_private_data(vma) &
292 ~HPAGE_RESV_MASK);
Harvey Harrison2a4b3de2008-10-18 20:27:06 -0700293 return NULL;
Mel Gormana1e78772008-07-23 21:27:23 -0700294}
295
Andy Whitcroft84afd992008-07-23 21:27:32 -0700296static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
Mel Gormana1e78772008-07-23 21:27:23 -0700297{
298 VM_BUG_ON(!is_vm_hugetlb_page(vma));
299 VM_BUG_ON(vma->vm_flags & VM_SHARED);
300
Andy Whitcroft84afd992008-07-23 21:27:32 -0700301 set_vma_private_data(vma, (get_vma_private_data(vma) &
302 HPAGE_RESV_MASK) | (unsigned long)map);
Mel Gorman04f2cbe2008-07-23 21:27:25 -0700303}
304
305static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
306{
Mel Gorman04f2cbe2008-07-23 21:27:25 -0700307 VM_BUG_ON(!is_vm_hugetlb_page(vma));
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -0700308 VM_BUG_ON(vma->vm_flags & VM_SHARED);
309
310 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
Mel Gorman04f2cbe2008-07-23 21:27:25 -0700311}
312
313static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
314{
315 VM_BUG_ON(!is_vm_hugetlb_page(vma));
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -0700316
317 return (get_vma_private_data(vma) & flag) != 0;
Mel Gormana1e78772008-07-23 21:27:23 -0700318}
319
320/* Decrement the reserved pages in the hugepage pool by one */
Andi Kleena5516432008-07-23 21:27:41 -0700321static void decrement_hugepage_resv_vma(struct hstate *h,
322 struct vm_area_struct *vma)
Mel Gormana1e78772008-07-23 21:27:23 -0700323{
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -0700324 if (vma->vm_flags & VM_NORESERVE)
325 return;
326
Mel Gormana1e78772008-07-23 21:27:23 -0700327 if (vma->vm_flags & VM_SHARED) {
328 /* Shared mappings always use reserves */
Andi Kleena5516432008-07-23 21:27:41 -0700329 h->resv_huge_pages--;
Andy Whitcroft84afd992008-07-23 21:27:32 -0700330 } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
Mel Gormana1e78772008-07-23 21:27:23 -0700331 /*
332 * Only the process that called mmap() has reserves for
333 * private mappings.
334 */
Andi Kleena5516432008-07-23 21:27:41 -0700335 h->resv_huge_pages--;
Mel Gormana1e78772008-07-23 21:27:23 -0700336 }
337}
338
Mel Gorman04f2cbe2008-07-23 21:27:25 -0700339/* Reset counters to 0 and clear all HPAGE_RESV_* flags */
Mel Gormana1e78772008-07-23 21:27:23 -0700340void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
341{
342 VM_BUG_ON(!is_vm_hugetlb_page(vma));
343 if (!(vma->vm_flags & VM_SHARED))
344 vma->vm_private_data = (void *)0;
345}
346
347/* Returns true if the VMA has associated reserve pages */
Mel Gorman7f09ca52008-07-23 21:27:58 -0700348static int vma_has_reserves(struct vm_area_struct *vma)
Mel Gormana1e78772008-07-23 21:27:23 -0700349{
350 if (vma->vm_flags & VM_SHARED)
Mel Gorman7f09ca52008-07-23 21:27:58 -0700351 return 1;
352 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
353 return 1;
354 return 0;
Mel Gormana1e78772008-07-23 21:27:23 -0700355}
356
Andy Whitcroft69d177c2008-11-06 12:53:26 -0800357static void clear_gigantic_page(struct page *page,
358 unsigned long addr, unsigned long sz)
359{
360 int i;
361 struct page *p = page;
362
363 might_sleep();
364 for (i = 0; i < sz/PAGE_SIZE; i++, p = mem_map_next(p, page, i)) {
365 cond_resched();
366 clear_user_highpage(p, addr + i * PAGE_SIZE);
367 }
368}
Andi Kleena5516432008-07-23 21:27:41 -0700369static void clear_huge_page(struct page *page,
370 unsigned long addr, unsigned long sz)
David Gibson79ac6ba2006-03-22 00:08:51 -0800371{
372 int i;
373
Andy Whitcroft69d177c2008-11-06 12:53:26 -0800374 if (unlikely(sz > MAX_ORDER_NR_PAGES))
375 return clear_gigantic_page(page, addr, sz);
376
David Gibson79ac6ba2006-03-22 00:08:51 -0800377 might_sleep();
Andi Kleena5516432008-07-23 21:27:41 -0700378 for (i = 0; i < sz/PAGE_SIZE; i++) {
David Gibson79ac6ba2006-03-22 00:08:51 -0800379 cond_resched();
Ralf Baechle281e0e32007-10-01 01:20:10 -0700380 clear_user_highpage(page + i, addr + i * PAGE_SIZE);
David Gibson79ac6ba2006-03-22 00:08:51 -0800381 }
382}
383
Andy Whitcroft69d177c2008-11-06 12:53:26 -0800384static void copy_gigantic_page(struct page *dst, struct page *src,
385 unsigned long addr, struct vm_area_struct *vma)
386{
387 int i;
388 struct hstate *h = hstate_vma(vma);
389 struct page *dst_base = dst;
390 struct page *src_base = src;
391 might_sleep();
392 for (i = 0; i < pages_per_huge_page(h); ) {
393 cond_resched();
394 copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
395
396 i++;
397 dst = mem_map_next(dst, dst_base, i);
398 src = mem_map_next(src, src_base, i);
399 }
400}
David Gibson79ac6ba2006-03-22 00:08:51 -0800401static void copy_huge_page(struct page *dst, struct page *src,
Atsushi Nemoto9de455b2006-12-12 17:14:55 +0000402 unsigned long addr, struct vm_area_struct *vma)
David Gibson79ac6ba2006-03-22 00:08:51 -0800403{
404 int i;
Andi Kleena5516432008-07-23 21:27:41 -0700405 struct hstate *h = hstate_vma(vma);
David Gibson79ac6ba2006-03-22 00:08:51 -0800406
Andy Whitcroft69d177c2008-11-06 12:53:26 -0800407 if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES))
408 return copy_gigantic_page(dst, src, addr, vma);
409
David Gibson79ac6ba2006-03-22 00:08:51 -0800410 might_sleep();
Andi Kleena5516432008-07-23 21:27:41 -0700411 for (i = 0; i < pages_per_huge_page(h); i++) {
David Gibson79ac6ba2006-03-22 00:08:51 -0800412 cond_resched();
Atsushi Nemoto9de455b2006-12-12 17:14:55 +0000413 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
David Gibson79ac6ba2006-03-22 00:08:51 -0800414 }
415}
416
Andi Kleena5516432008-07-23 21:27:41 -0700417static void enqueue_huge_page(struct hstate *h, struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418{
419 int nid = page_to_nid(page);
Andi Kleena5516432008-07-23 21:27:41 -0700420 list_add(&page->lru, &h->hugepage_freelists[nid]);
421 h->free_huge_pages++;
422 h->free_huge_pages_node[nid]++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423}
424
Andi Kleena5516432008-07-23 21:27:41 -0700425static struct page *dequeue_huge_page(struct hstate *h)
Nishanth Aravamudan348e1e02008-03-04 14:29:42 -0800426{
427 int nid;
428 struct page *page = NULL;
429
430 for (nid = 0; nid < MAX_NUMNODES; ++nid) {
Andi Kleena5516432008-07-23 21:27:41 -0700431 if (!list_empty(&h->hugepage_freelists[nid])) {
432 page = list_entry(h->hugepage_freelists[nid].next,
Nishanth Aravamudan348e1e02008-03-04 14:29:42 -0800433 struct page, lru);
434 list_del(&page->lru);
Andi Kleena5516432008-07-23 21:27:41 -0700435 h->free_huge_pages--;
436 h->free_huge_pages_node[nid]--;
Nishanth Aravamudan348e1e02008-03-04 14:29:42 -0800437 break;
438 }
439 }
440 return page;
441}
442
Andi Kleena5516432008-07-23 21:27:41 -0700443static struct page *dequeue_huge_page_vma(struct hstate *h,
444 struct vm_area_struct *vma,
Mel Gorman04f2cbe2008-07-23 21:27:25 -0700445 unsigned long address, int avoid_reserve)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446{
Nishanth Aravamudan31a5c6e2007-07-15 23:38:02 -0700447 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 struct page *page = NULL;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -0700449 struct mempolicy *mpol;
Mel Gorman19770b32008-04-28 02:12:18 -0700450 nodemask_t *nodemask;
Mel Gorman396faf02007-07-17 04:03:13 -0700451 struct zonelist *zonelist = huge_zonelist(vma, address,
Mel Gorman19770b32008-04-28 02:12:18 -0700452 htlb_alloc_mask, &mpol, &nodemask);
Mel Gormandd1a2392008-04-28 02:12:17 -0700453 struct zone *zone;
454 struct zoneref *z;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455
Mel Gormana1e78772008-07-23 21:27:23 -0700456 /*
457 * A child process with MAP_PRIVATE mappings created by their parent
458 * have no page reserves. This check ensures that reservations are
459 * not "stolen". The child may still get SIGKILLed
460 */
Mel Gorman7f09ca52008-07-23 21:27:58 -0700461 if (!vma_has_reserves(vma) &&
Andi Kleena5516432008-07-23 21:27:41 -0700462 h->free_huge_pages - h->resv_huge_pages == 0)
Mel Gormana1e78772008-07-23 21:27:23 -0700463 return NULL;
464
Mel Gorman04f2cbe2008-07-23 21:27:25 -0700465 /* If reserves cannot be used, ensure enough pages are in the pool */
Andi Kleena5516432008-07-23 21:27:41 -0700466 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
Mel Gorman04f2cbe2008-07-23 21:27:25 -0700467 return NULL;
468
Mel Gorman19770b32008-04-28 02:12:18 -0700469 for_each_zone_zonelist_nodemask(zone, z, zonelist,
470 MAX_NR_ZONES - 1, nodemask) {
Mel Gorman54a6eb52008-04-28 02:12:16 -0700471 nid = zone_to_nid(zone);
472 if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) &&
Andi Kleena5516432008-07-23 21:27:41 -0700473 !list_empty(&h->hugepage_freelists[nid])) {
474 page = list_entry(h->hugepage_freelists[nid].next,
Andrew Morton3abf7af2007-07-19 01:49:08 -0700475 struct page, lru);
476 list_del(&page->lru);
Andi Kleena5516432008-07-23 21:27:41 -0700477 h->free_huge_pages--;
478 h->free_huge_pages_node[nid]--;
Mel Gorman04f2cbe2008-07-23 21:27:25 -0700479
480 if (!avoid_reserve)
Andi Kleena5516432008-07-23 21:27:41 -0700481 decrement_hugepage_resv_vma(h, vma);
Mel Gormana1e78772008-07-23 21:27:23 -0700482
Ken Chen5ab3ee72007-07-23 18:44:00 -0700483 break;
Andrew Morton3abf7af2007-07-19 01:49:08 -0700484 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 }
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700486 mpol_cond_put(mpol);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 return page;
488}
489
Andi Kleena5516432008-07-23 21:27:41 -0700490static void update_and_free_page(struct hstate *h, struct page *page)
Adam Litke6af2acb2007-10-16 01:26:16 -0700491{
492 int i;
Andi Kleena5516432008-07-23 21:27:41 -0700493
494 h->nr_huge_pages--;
495 h->nr_huge_pages_node[page_to_nid(page)]--;
496 for (i = 0; i < pages_per_huge_page(h); i++) {
Adam Litke6af2acb2007-10-16 01:26:16 -0700497 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
498 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
499 1 << PG_private | 1<< PG_writeback);
500 }
501 set_compound_page_dtor(page, NULL);
502 set_page_refcounted(page);
Gerald Schaefer7f2e9522008-04-28 02:13:29 -0700503 arch_release_hugepage(page);
Andi Kleena5516432008-07-23 21:27:41 -0700504 __free_pages(page, huge_page_order(h));
Adam Litke6af2acb2007-10-16 01:26:16 -0700505}
506
Andi Kleene5ff2152008-07-23 21:27:42 -0700507struct hstate *size_to_hstate(unsigned long size)
508{
509 struct hstate *h;
510
511 for_each_hstate(h) {
512 if (huge_page_size(h) == size)
513 return h;
514 }
515 return NULL;
516}
517
David Gibson27a85ef2006-03-22 00:08:56 -0800518static void free_huge_page(struct page *page)
519{
Andi Kleena5516432008-07-23 21:27:41 -0700520 /*
521 * Can't pass hstate in here because it is called from the
522 * compound page destructor.
523 */
Andi Kleene5ff2152008-07-23 21:27:42 -0700524 struct hstate *h = page_hstate(page);
Adam Litke7893d1d2007-10-16 01:26:18 -0700525 int nid = page_to_nid(page);
Adam Litkec79fb752007-11-14 16:59:38 -0800526 struct address_space *mapping;
David Gibson27a85ef2006-03-22 00:08:56 -0800527
Adam Litkec79fb752007-11-14 16:59:38 -0800528 mapping = (struct address_space *) page_private(page);
Andy Whitcrofte5df70a2008-02-23 15:23:32 -0800529 set_page_private(page, 0);
Adam Litke7893d1d2007-10-16 01:26:18 -0700530 BUG_ON(page_count(page));
David Gibson27a85ef2006-03-22 00:08:56 -0800531 INIT_LIST_HEAD(&page->lru);
532
533 spin_lock(&hugetlb_lock);
Andi Kleenaa888a72008-07-23 21:27:47 -0700534 if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
Andi Kleena5516432008-07-23 21:27:41 -0700535 update_and_free_page(h, page);
536 h->surplus_huge_pages--;
537 h->surplus_huge_pages_node[nid]--;
Adam Litke7893d1d2007-10-16 01:26:18 -0700538 } else {
Andi Kleena5516432008-07-23 21:27:41 -0700539 enqueue_huge_page(h, page);
Adam Litke7893d1d2007-10-16 01:26:18 -0700540 }
David Gibson27a85ef2006-03-22 00:08:56 -0800541 spin_unlock(&hugetlb_lock);
Adam Litkec79fb752007-11-14 16:59:38 -0800542 if (mapping)
Adam Litke9a119c02007-11-14 16:59:41 -0800543 hugetlb_put_quota(mapping, 1);
David Gibson27a85ef2006-03-22 00:08:56 -0800544}
545
Adam Litke7893d1d2007-10-16 01:26:18 -0700546/*
547 * Increment or decrement surplus_huge_pages. Keep node-specific counters
548 * balanced by operating on them in a round-robin fashion.
549 * Returns 1 if an adjustment was made.
550 */
Andi Kleena5516432008-07-23 21:27:41 -0700551static int adjust_pool_surplus(struct hstate *h, int delta)
Adam Litke7893d1d2007-10-16 01:26:18 -0700552{
553 static int prev_nid;
554 int nid = prev_nid;
555 int ret = 0;
556
557 VM_BUG_ON(delta != -1 && delta != 1);
558 do {
559 nid = next_node(nid, node_online_map);
560 if (nid == MAX_NUMNODES)
561 nid = first_node(node_online_map);
562
563 /* To shrink on this node, there must be a surplus page */
Andi Kleena5516432008-07-23 21:27:41 -0700564 if (delta < 0 && !h->surplus_huge_pages_node[nid])
Adam Litke7893d1d2007-10-16 01:26:18 -0700565 continue;
566 /* Surplus cannot exceed the total number of pages */
Andi Kleena5516432008-07-23 21:27:41 -0700567 if (delta > 0 && h->surplus_huge_pages_node[nid] >=
568 h->nr_huge_pages_node[nid])
Adam Litke7893d1d2007-10-16 01:26:18 -0700569 continue;
570
Andi Kleena5516432008-07-23 21:27:41 -0700571 h->surplus_huge_pages += delta;
572 h->surplus_huge_pages_node[nid] += delta;
Adam Litke7893d1d2007-10-16 01:26:18 -0700573 ret = 1;
574 break;
575 } while (nid != prev_nid);
576
577 prev_nid = nid;
578 return ret;
579}
580
Andi Kleena5516432008-07-23 21:27:41 -0700581static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
Andi Kleenb7ba30c2008-07-23 21:27:40 -0700582{
583 set_compound_page_dtor(page, free_huge_page);
584 spin_lock(&hugetlb_lock);
Andi Kleena5516432008-07-23 21:27:41 -0700585 h->nr_huge_pages++;
586 h->nr_huge_pages_node[nid]++;
Andi Kleenb7ba30c2008-07-23 21:27:40 -0700587 spin_unlock(&hugetlb_lock);
588 put_page(page); /* free it into the hugepage allocator */
589}
590
Andi Kleena5516432008-07-23 21:27:41 -0700591static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 struct page *page;
Joe Jinf96efd52007-07-15 23:38:12 -0700594
Andi Kleenaa888a72008-07-23 21:27:47 -0700595 if (h->order >= MAX_ORDER)
596 return NULL;
597
Nishanth Aravamudan63b46132007-10-16 01:26:24 -0700598 page = alloc_pages_node(nid,
Nishanth Aravamudan551883a2008-04-29 00:58:26 -0700599 htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
600 __GFP_REPEAT|__GFP_NOWARN,
Andi Kleena5516432008-07-23 21:27:41 -0700601 huge_page_order(h));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 if (page) {
Gerald Schaefer7f2e9522008-04-28 02:13:29 -0700603 if (arch_prepare_hugepage(page)) {
Gerald Schaefercaff3a22008-08-12 15:08:38 -0700604 __free_pages(page, huge_page_order(h));
Harvey Harrison7b8ee842008-04-28 14:13:19 -0700605 return NULL;
Gerald Schaefer7f2e9522008-04-28 02:13:29 -0700606 }
Andi Kleena5516432008-07-23 21:27:41 -0700607 prep_new_huge_page(h, page, nid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 }
Nishanth Aravamudan63b46132007-10-16 01:26:24 -0700609
610 return page;
611}
612
Andi Kleen5ced66c2008-07-23 21:27:45 -0700613/*
614 * Use a helper variable to find the next node and then
615 * copy it back to hugetlb_next_nid afterwards:
616 * otherwise there's a window in which a racer might
617 * pass invalid nid MAX_NUMNODES to alloc_pages_node.
618 * But we don't need to use a spin_lock here: it really
619 * doesn't matter if occasionally a racer chooses the
620 * same nid as we do. Move nid forward in the mask even
621 * if we just successfully allocated a hugepage so that
622 * the next caller gets hugepages on the next node.
623 */
624static int hstate_next_node(struct hstate *h)
625{
626 int next_nid;
627 next_nid = next_node(h->hugetlb_next_nid, node_online_map);
628 if (next_nid == MAX_NUMNODES)
629 next_nid = first_node(node_online_map);
630 h->hugetlb_next_nid = next_nid;
631 return next_nid;
632}
633
Andi Kleena5516432008-07-23 21:27:41 -0700634static int alloc_fresh_huge_page(struct hstate *h)
Nishanth Aravamudan63b46132007-10-16 01:26:24 -0700635{
636 struct page *page;
637 int start_nid;
638 int next_nid;
639 int ret = 0;
640
Andi Kleena5516432008-07-23 21:27:41 -0700641 start_nid = h->hugetlb_next_nid;
Nishanth Aravamudan63b46132007-10-16 01:26:24 -0700642
643 do {
Andi Kleena5516432008-07-23 21:27:41 -0700644 page = alloc_fresh_huge_page_node(h, h->hugetlb_next_nid);
Nishanth Aravamudan63b46132007-10-16 01:26:24 -0700645 if (page)
646 ret = 1;
Andi Kleen5ced66c2008-07-23 21:27:45 -0700647 next_nid = hstate_next_node(h);
Andi Kleena5516432008-07-23 21:27:41 -0700648 } while (!page && h->hugetlb_next_nid != start_nid);
Nishanth Aravamudan63b46132007-10-16 01:26:24 -0700649
Adam Litke3b116302008-04-28 02:13:06 -0700650 if (ret)
651 count_vm_event(HTLB_BUDDY_PGALLOC);
652 else
653 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
654
Nishanth Aravamudan63b46132007-10-16 01:26:24 -0700655 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656}
657
Andi Kleena5516432008-07-23 21:27:41 -0700658static struct page *alloc_buddy_huge_page(struct hstate *h,
659 struct vm_area_struct *vma, unsigned long address)
Adam Litke7893d1d2007-10-16 01:26:18 -0700660{
661 struct page *page;
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -0800662 unsigned int nid;
Adam Litke7893d1d2007-10-16 01:26:18 -0700663
Andi Kleenaa888a72008-07-23 21:27:47 -0700664 if (h->order >= MAX_ORDER)
665 return NULL;
666
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -0800667 /*
668 * Assume we will successfully allocate the surplus page to
669 * prevent racing processes from causing the surplus to exceed
670 * overcommit
671 *
672 * This however introduces a different race, where a process B
673 * tries to grow the static hugepage pool while alloc_pages() is
674 * called by process A. B will only examine the per-node
675 * counters in determining if surplus huge pages can be
676 * converted to normal huge pages in adjust_pool_surplus(). A
677 * won't be able to increment the per-node counter, until the
678 * lock is dropped by B, but B doesn't drop hugetlb_lock until
679 * no more huge pages can be converted from surplus to normal
680 * state (and doesn't try to convert again). Thus, we have a
681 * case where a surplus huge page exists, the pool is grown, and
682 * the surplus huge page still exists after, even though it
683 * should just have been converted to a normal huge page. This
684 * does not leak memory, though, as the hugepage will be freed
685 * once it is out of use. It also does not allow the counters to
686 * go out of whack in adjust_pool_surplus() as we don't modify
687 * the node values until we've gotten the hugepage and only the
688 * per-node value is checked there.
689 */
690 spin_lock(&hugetlb_lock);
Andi Kleena5516432008-07-23 21:27:41 -0700691 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -0800692 spin_unlock(&hugetlb_lock);
693 return NULL;
694 } else {
Andi Kleena5516432008-07-23 21:27:41 -0700695 h->nr_huge_pages++;
696 h->surplus_huge_pages++;
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -0800697 }
698 spin_unlock(&hugetlb_lock);
699
Nishanth Aravamudan551883a2008-04-29 00:58:26 -0700700 page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
701 __GFP_REPEAT|__GFP_NOWARN,
Andi Kleena5516432008-07-23 21:27:41 -0700702 huge_page_order(h));
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -0800703
Gerald Schaefercaff3a22008-08-12 15:08:38 -0700704 if (page && arch_prepare_hugepage(page)) {
705 __free_pages(page, huge_page_order(h));
706 return NULL;
707 }
708
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -0800709 spin_lock(&hugetlb_lock);
Adam Litke7893d1d2007-10-16 01:26:18 -0700710 if (page) {
Adam Litke2668db92008-03-10 11:43:50 -0700711 /*
712 * This page is now managed by the hugetlb allocator and has
713 * no users -- drop the buddy allocator's reference.
714 */
715 put_page_testzero(page);
716 VM_BUG_ON(page_count(page));
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -0800717 nid = page_to_nid(page);
Adam Litke7893d1d2007-10-16 01:26:18 -0700718 set_compound_page_dtor(page, free_huge_page);
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -0800719 /*
720 * We incremented the global counters already
721 */
Andi Kleena5516432008-07-23 21:27:41 -0700722 h->nr_huge_pages_node[nid]++;
723 h->surplus_huge_pages_node[nid]++;
Adam Litke3b116302008-04-28 02:13:06 -0700724 __count_vm_event(HTLB_BUDDY_PGALLOC);
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -0800725 } else {
Andi Kleena5516432008-07-23 21:27:41 -0700726 h->nr_huge_pages--;
727 h->surplus_huge_pages--;
Adam Litke3b116302008-04-28 02:13:06 -0700728 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
Adam Litke7893d1d2007-10-16 01:26:18 -0700729 }
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -0800730 spin_unlock(&hugetlb_lock);
Adam Litke7893d1d2007-10-16 01:26:18 -0700731
732 return page;
733}
734
Adam Litkee4e574b2007-10-16 01:26:19 -0700735/*
736 * Increase the hugetlb pool such that it can accomodate a reservation
737 * of size 'delta'.
738 */
Andi Kleena5516432008-07-23 21:27:41 -0700739static int gather_surplus_pages(struct hstate *h, int delta)
Adam Litkee4e574b2007-10-16 01:26:19 -0700740{
741 struct list_head surplus_list;
742 struct page *page, *tmp;
743 int ret, i;
744 int needed, allocated;
745
Andi Kleena5516432008-07-23 21:27:41 -0700746 needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
Adam Litkeac09b3a2008-03-04 14:29:38 -0800747 if (needed <= 0) {
Andi Kleena5516432008-07-23 21:27:41 -0700748 h->resv_huge_pages += delta;
Adam Litkee4e574b2007-10-16 01:26:19 -0700749 return 0;
Adam Litkeac09b3a2008-03-04 14:29:38 -0800750 }
Adam Litkee4e574b2007-10-16 01:26:19 -0700751
752 allocated = 0;
753 INIT_LIST_HEAD(&surplus_list);
754
755 ret = -ENOMEM;
756retry:
757 spin_unlock(&hugetlb_lock);
758 for (i = 0; i < needed; i++) {
Andi Kleena5516432008-07-23 21:27:41 -0700759 page = alloc_buddy_huge_page(h, NULL, 0);
Adam Litkee4e574b2007-10-16 01:26:19 -0700760 if (!page) {
761 /*
762 * We were not able to allocate enough pages to
763 * satisfy the entire reservation so we free what
764 * we've allocated so far.
765 */
766 spin_lock(&hugetlb_lock);
767 needed = 0;
768 goto free;
769 }
770
771 list_add(&page->lru, &surplus_list);
772 }
773 allocated += needed;
774
775 /*
776 * After retaking hugetlb_lock, we need to recalculate 'needed'
777 * because either resv_huge_pages or free_huge_pages may have changed.
778 */
779 spin_lock(&hugetlb_lock);
Andi Kleena5516432008-07-23 21:27:41 -0700780 needed = (h->resv_huge_pages + delta) -
781 (h->free_huge_pages + allocated);
Adam Litkee4e574b2007-10-16 01:26:19 -0700782 if (needed > 0)
783 goto retry;
784
785 /*
786 * The surplus_list now contains _at_least_ the number of extra pages
787 * needed to accomodate the reservation. Add the appropriate number
788 * of pages to the hugetlb pool and free the extras back to the buddy
Adam Litkeac09b3a2008-03-04 14:29:38 -0800789 * allocator. Commit the entire reservation here to prevent another
790 * process from stealing the pages as they are added to the pool but
791 * before they are reserved.
Adam Litkee4e574b2007-10-16 01:26:19 -0700792 */
793 needed += allocated;
Andi Kleena5516432008-07-23 21:27:41 -0700794 h->resv_huge_pages += delta;
Adam Litkee4e574b2007-10-16 01:26:19 -0700795 ret = 0;
796free:
Adam Litke19fc3f02008-04-28 02:12:20 -0700797 /* Free the needed pages to the hugetlb pool */
Adam Litkee4e574b2007-10-16 01:26:19 -0700798 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
Adam Litke19fc3f02008-04-28 02:12:20 -0700799 if ((--needed) < 0)
800 break;
Adam Litkee4e574b2007-10-16 01:26:19 -0700801 list_del(&page->lru);
Andi Kleena5516432008-07-23 21:27:41 -0700802 enqueue_huge_page(h, page);
Adam Litke19fc3f02008-04-28 02:12:20 -0700803 }
804
805 /* Free unnecessary surplus pages to the buddy allocator */
806 if (!list_empty(&surplus_list)) {
807 spin_unlock(&hugetlb_lock);
808 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
809 list_del(&page->lru);
Adam Litkeaf767cb2007-10-16 01:26:25 -0700810 /*
Adam Litke2668db92008-03-10 11:43:50 -0700811 * The page has a reference count of zero already, so
812 * call free_huge_page directly instead of using
813 * put_page. This must be done with hugetlb_lock
Adam Litkeaf767cb2007-10-16 01:26:25 -0700814 * unlocked which is safe because free_huge_page takes
815 * hugetlb_lock before deciding how to free the page.
816 */
Adam Litke2668db92008-03-10 11:43:50 -0700817 free_huge_page(page);
Adam Litkeaf767cb2007-10-16 01:26:25 -0700818 }
Adam Litke19fc3f02008-04-28 02:12:20 -0700819 spin_lock(&hugetlb_lock);
Adam Litkee4e574b2007-10-16 01:26:19 -0700820 }
821
822 return ret;
823}
824
825/*
826 * When releasing a hugetlb pool reservation, any surplus pages that were
827 * allocated to satisfy the reservation must be explicitly freed if they were
828 * never used.
829 */
Andi Kleena5516432008-07-23 21:27:41 -0700830static void return_unused_surplus_pages(struct hstate *h,
831 unsigned long unused_resv_pages)
Adam Litkee4e574b2007-10-16 01:26:19 -0700832{
833 static int nid = -1;
834 struct page *page;
835 unsigned long nr_pages;
836
Nishanth Aravamudan11320d12008-03-26 14:40:20 -0700837 /*
838 * We want to release as many surplus pages as possible, spread
839 * evenly across all nodes. Iterate across all nodes until we
840 * can no longer free unreserved surplus pages. This occurs when
841 * the nodes with surplus pages have no free pages.
842 */
843 unsigned long remaining_iterations = num_online_nodes();
844
Adam Litkeac09b3a2008-03-04 14:29:38 -0800845 /* Uncommit the reservation */
Andi Kleena5516432008-07-23 21:27:41 -0700846 h->resv_huge_pages -= unused_resv_pages;
Adam Litkeac09b3a2008-03-04 14:29:38 -0800847
Andi Kleenaa888a72008-07-23 21:27:47 -0700848 /* Cannot return gigantic pages currently */
849 if (h->order >= MAX_ORDER)
850 return;
851
Andi Kleena5516432008-07-23 21:27:41 -0700852 nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
Adam Litkee4e574b2007-10-16 01:26:19 -0700853
Nishanth Aravamudan11320d12008-03-26 14:40:20 -0700854 while (remaining_iterations-- && nr_pages) {
Adam Litkee4e574b2007-10-16 01:26:19 -0700855 nid = next_node(nid, node_online_map);
856 if (nid == MAX_NUMNODES)
857 nid = first_node(node_online_map);
858
Andi Kleena5516432008-07-23 21:27:41 -0700859 if (!h->surplus_huge_pages_node[nid])
Adam Litkee4e574b2007-10-16 01:26:19 -0700860 continue;
861
Andi Kleena5516432008-07-23 21:27:41 -0700862 if (!list_empty(&h->hugepage_freelists[nid])) {
863 page = list_entry(h->hugepage_freelists[nid].next,
Adam Litkee4e574b2007-10-16 01:26:19 -0700864 struct page, lru);
865 list_del(&page->lru);
Andi Kleena5516432008-07-23 21:27:41 -0700866 update_and_free_page(h, page);
867 h->free_huge_pages--;
868 h->free_huge_pages_node[nid]--;
869 h->surplus_huge_pages--;
870 h->surplus_huge_pages_node[nid]--;
Adam Litkee4e574b2007-10-16 01:26:19 -0700871 nr_pages--;
Nishanth Aravamudan11320d12008-03-26 14:40:20 -0700872 remaining_iterations = num_online_nodes();
Adam Litkee4e574b2007-10-16 01:26:19 -0700873 }
874 }
875}
876
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -0700877/*
878 * Determine if the huge page at addr within the vma has an associated
879 * reservation. Where it does not we will need to logically increase
880 * reservation and actually increase quota before an allocation can occur.
881 * Where any new reservation would be required the reservation change is
882 * prepared, but not committed. Once the page has been quota'd allocated
883 * an instantiated the change should be committed via vma_commit_reservation.
884 * No action is required on failure.
885 */
Andi Kleena5516432008-07-23 21:27:41 -0700886static int vma_needs_reservation(struct hstate *h,
887 struct vm_area_struct *vma, unsigned long addr)
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -0700888{
889 struct address_space *mapping = vma->vm_file->f_mapping;
890 struct inode *inode = mapping->host;
891
892 if (vma->vm_flags & VM_SHARED) {
Andi Kleena5516432008-07-23 21:27:41 -0700893 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -0700894 return region_chg(&inode->i_mapping->private_list,
895 idx, idx + 1);
896
Andy Whitcroft84afd992008-07-23 21:27:32 -0700897 } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
898 return 1;
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -0700899
Andy Whitcroft84afd992008-07-23 21:27:32 -0700900 } else {
901 int err;
Andi Kleena5516432008-07-23 21:27:41 -0700902 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
Andy Whitcroft84afd992008-07-23 21:27:32 -0700903 struct resv_map *reservations = vma_resv_map(vma);
904
905 err = region_chg(&reservations->regions, idx, idx + 1);
906 if (err < 0)
907 return err;
908 return 0;
909 }
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -0700910}
Andi Kleena5516432008-07-23 21:27:41 -0700911static void vma_commit_reservation(struct hstate *h,
912 struct vm_area_struct *vma, unsigned long addr)
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -0700913{
914 struct address_space *mapping = vma->vm_file->f_mapping;
915 struct inode *inode = mapping->host;
916
917 if (vma->vm_flags & VM_SHARED) {
Andi Kleena5516432008-07-23 21:27:41 -0700918 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -0700919 region_add(&inode->i_mapping->private_list, idx, idx + 1);
Andy Whitcroft84afd992008-07-23 21:27:32 -0700920
921 } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
Andi Kleena5516432008-07-23 21:27:41 -0700922 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
Andy Whitcroft84afd992008-07-23 21:27:32 -0700923 struct resv_map *reservations = vma_resv_map(vma);
924
925 /* Mark this page used in the map. */
926 region_add(&reservations->regions, idx, idx + 1);
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -0700927 }
928}
929
David Gibson27a85ef2006-03-22 00:08:56 -0800930static struct page *alloc_huge_page(struct vm_area_struct *vma,
Mel Gorman04f2cbe2008-07-23 21:27:25 -0700931 unsigned long addr, int avoid_reserve)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932{
Andi Kleena5516432008-07-23 21:27:41 -0700933 struct hstate *h = hstate_vma(vma);
Adam Litke348ea202007-11-14 16:59:37 -0800934 struct page *page;
Adam Litke2fc39ce2007-11-14 16:59:39 -0800935 struct address_space *mapping = vma->vm_file->f_mapping;
Mel Gormana1e78772008-07-23 21:27:23 -0700936 struct inode *inode = mapping->host;
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -0700937 unsigned int chg;
Adam Litke2fc39ce2007-11-14 16:59:39 -0800938
Mel Gormana1e78772008-07-23 21:27:23 -0700939 /*
940 * Processes that did not create the mapping will have no reserves and
941 * will not have accounted against quota. Check that the quota can be
942 * made before satisfying the allocation
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -0700943 * MAP_NORESERVE mappings may also need pages and quota allocated
944 * if no reserve mapping overlaps.
Mel Gormana1e78772008-07-23 21:27:23 -0700945 */
Andi Kleena5516432008-07-23 21:27:41 -0700946 chg = vma_needs_reservation(h, vma, addr);
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -0700947 if (chg < 0)
948 return ERR_PTR(chg);
949 if (chg)
Mel Gormana1e78772008-07-23 21:27:23 -0700950 if (hugetlb_get_quota(inode->i_mapping, chg))
951 return ERR_PTR(-ENOSPC);
Mel Gormana1e78772008-07-23 21:27:23 -0700952
953 spin_lock(&hugetlb_lock);
Andi Kleena5516432008-07-23 21:27:41 -0700954 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
Mel Gormana1e78772008-07-23 21:27:23 -0700955 spin_unlock(&hugetlb_lock);
956
957 if (!page) {
Andi Kleena5516432008-07-23 21:27:41 -0700958 page = alloc_buddy_huge_page(h, vma, addr);
Mel Gormana1e78772008-07-23 21:27:23 -0700959 if (!page) {
960 hugetlb_put_quota(inode->i_mapping, chg);
961 return ERR_PTR(-VM_FAULT_OOM);
962 }
963 }
964
965 set_page_refcounted(page);
966 set_page_private(page, (unsigned long) mapping);
967
Andi Kleena5516432008-07-23 21:27:41 -0700968 vma_commit_reservation(h, vma, addr);
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -0700969
Adam Litke90d8b7e2007-11-14 16:59:42 -0800970 return page;
David Gibsonb45b5bd2006-03-22 00:08:55 -0800971}
972
Jon Tollefson53ba51d2008-07-23 21:27:52 -0700973__attribute__((weak)) int alloc_bootmem_huge_page(struct hstate *h)
Andi Kleenaa888a72008-07-23 21:27:47 -0700974{
975 struct huge_bootmem_page *m;
976 int nr_nodes = nodes_weight(node_online_map);
977
978 while (nr_nodes) {
979 void *addr;
980
981 addr = __alloc_bootmem_node_nopanic(
982 NODE_DATA(h->hugetlb_next_nid),
983 huge_page_size(h), huge_page_size(h), 0);
984
985 if (addr) {
986 /*
987 * Use the beginning of the huge page to store the
988 * huge_bootmem_page struct (until gather_bootmem
989 * puts them into the mem_map).
990 */
991 m = addr;
992 if (m)
993 goto found;
994 }
995 hstate_next_node(h);
996 nr_nodes--;
997 }
998 return 0;
999
1000found:
1001 BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
1002 /* Put them into a private list first because mem_map is not up yet */
1003 list_add(&m->list, &huge_boot_pages);
1004 m->hstate = h;
1005 return 1;
1006}
1007
1008/* Put bootmem huge pages into the standard lists after mem_map is up */
1009static void __init gather_bootmem_prealloc(void)
1010{
1011 struct huge_bootmem_page *m;
1012
1013 list_for_each_entry(m, &huge_boot_pages, list) {
1014 struct page *page = virt_to_page(m);
1015 struct hstate *h = m->hstate;
1016 __ClearPageReserved(page);
1017 WARN_ON(page_count(page) != 1);
1018 prep_compound_page(page, h->order);
1019 prep_new_huge_page(h, page, page_to_nid(page));
1020 }
1021}
1022
Andi Kleen8faa8b02008-07-23 21:27:48 -07001023static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024{
1025 unsigned long i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026
Andi Kleene5ff2152008-07-23 21:27:42 -07001027 for (i = 0; i < h->max_huge_pages; ++i) {
Andi Kleenaa888a72008-07-23 21:27:47 -07001028 if (h->order >= MAX_ORDER) {
1029 if (!alloc_bootmem_huge_page(h))
1030 break;
1031 } else if (!alloc_fresh_huge_page(h))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 }
Andi Kleen8faa8b02008-07-23 21:27:48 -07001034 h->max_huge_pages = i;
Andi Kleene5ff2152008-07-23 21:27:42 -07001035}
1036
1037static void __init hugetlb_init_hstates(void)
1038{
1039 struct hstate *h;
1040
1041 for_each_hstate(h) {
Andi Kleen8faa8b02008-07-23 21:27:48 -07001042 /* oversize hugepages were init'ed in early boot */
1043 if (h->order < MAX_ORDER)
1044 hugetlb_hstate_alloc_pages(h);
Andi Kleene5ff2152008-07-23 21:27:42 -07001045 }
1046}
1047
Andi Kleen4abd32d2008-07-23 21:27:49 -07001048static char * __init memfmt(char *buf, unsigned long n)
1049{
1050 if (n >= (1UL << 30))
1051 sprintf(buf, "%lu GB", n >> 30);
1052 else if (n >= (1UL << 20))
1053 sprintf(buf, "%lu MB", n >> 20);
1054 else
1055 sprintf(buf, "%lu KB", n >> 10);
1056 return buf;
1057}
1058
Andi Kleene5ff2152008-07-23 21:27:42 -07001059static void __init report_hugepages(void)
1060{
1061 struct hstate *h;
1062
1063 for_each_hstate(h) {
Andi Kleen4abd32d2008-07-23 21:27:49 -07001064 char buf[32];
1065 printk(KERN_INFO "HugeTLB registered %s page size, "
1066 "pre-allocated %ld pages\n",
1067 memfmt(buf, huge_page_size(h)),
1068 h->free_huge_pages);
Andi Kleene5ff2152008-07-23 21:27:42 -07001069 }
1070}
1071
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072#ifdef CONFIG_HIGHMEM
Andi Kleena5516432008-07-23 21:27:41 -07001073static void try_to_free_low(struct hstate *h, unsigned long count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074{
Christoph Lameter4415cc82006-09-25 23:31:55 -07001075 int i;
1076
Andi Kleenaa888a72008-07-23 21:27:47 -07001077 if (h->order >= MAX_ORDER)
1078 return;
1079
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 for (i = 0; i < MAX_NUMNODES; ++i) {
1081 struct page *page, *next;
Andi Kleena5516432008-07-23 21:27:41 -07001082 struct list_head *freel = &h->hugepage_freelists[i];
1083 list_for_each_entry_safe(page, next, freel, lru) {
1084 if (count >= h->nr_huge_pages)
Adam Litke6b0c8802007-10-16 01:26:23 -07001085 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 if (PageHighMem(page))
1087 continue;
1088 list_del(&page->lru);
Andi Kleene5ff2152008-07-23 21:27:42 -07001089 update_and_free_page(h, page);
Andi Kleena5516432008-07-23 21:27:41 -07001090 h->free_huge_pages--;
1091 h->free_huge_pages_node[page_to_nid(page)]--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 }
1093 }
1094}
1095#else
Andi Kleena5516432008-07-23 21:27:41 -07001096static inline void try_to_free_low(struct hstate *h, unsigned long count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097{
1098}
1099#endif
1100
Andi Kleena5516432008-07-23 21:27:41 -07001101#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
Andi Kleene5ff2152008-07-23 21:27:42 -07001102static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103{
Adam Litke7893d1d2007-10-16 01:26:18 -07001104 unsigned long min_count, ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105
Andi Kleenaa888a72008-07-23 21:27:47 -07001106 if (h->order >= MAX_ORDER)
1107 return h->max_huge_pages;
1108
Adam Litke7893d1d2007-10-16 01:26:18 -07001109 /*
1110 * Increase the pool size
1111 * First take pages out of surplus state. Then make up the
1112 * remaining difference by allocating fresh huge pages.
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -08001113 *
1114 * We might race with alloc_buddy_huge_page() here and be unable
1115 * to convert a surplus huge page to a normal huge page. That is
1116 * not critical, though, it just means the overall size of the
1117 * pool might be one hugepage larger than it needs to be, but
1118 * within all the constraints specified by the sysctls.
Adam Litke7893d1d2007-10-16 01:26:18 -07001119 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 spin_lock(&hugetlb_lock);
Andi Kleena5516432008-07-23 21:27:41 -07001121 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
1122 if (!adjust_pool_surplus(h, -1))
Adam Litke7893d1d2007-10-16 01:26:18 -07001123 break;
1124 }
1125
Andi Kleena5516432008-07-23 21:27:41 -07001126 while (count > persistent_huge_pages(h)) {
Adam Litke7893d1d2007-10-16 01:26:18 -07001127 /*
1128 * If this allocation races such that we no longer need the
1129 * page, free_huge_page will handle it by freeing the page
1130 * and reducing the surplus.
1131 */
1132 spin_unlock(&hugetlb_lock);
Andi Kleena5516432008-07-23 21:27:41 -07001133 ret = alloc_fresh_huge_page(h);
Adam Litke7893d1d2007-10-16 01:26:18 -07001134 spin_lock(&hugetlb_lock);
1135 if (!ret)
1136 goto out;
1137
1138 }
Adam Litke7893d1d2007-10-16 01:26:18 -07001139
1140 /*
1141 * Decrease the pool size
1142 * First return free pages to the buddy allocator (being careful
1143 * to keep enough around to satisfy reservations). Then place
1144 * pages into surplus state as needed so the pool will shrink
1145 * to the desired size as pages become free.
Nishanth Aravamudand1c3fb12007-12-17 16:20:12 -08001146 *
1147 * By placing pages into the surplus state independent of the
1148 * overcommit value, we are allowing the surplus pool size to
1149 * exceed overcommit. There are few sane options here. Since
1150 * alloc_buddy_huge_page() is checking the global counter,
1151 * though, we'll note that we're not allowed to exceed surplus
1152 * and won't grow the pool anywhere else. Not until one of the
1153 * sysctls are changed, or the surplus pages go out of use.
Adam Litke7893d1d2007-10-16 01:26:18 -07001154 */
Andi Kleena5516432008-07-23 21:27:41 -07001155 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
Adam Litke6b0c8802007-10-16 01:26:23 -07001156 min_count = max(count, min_count);
Andi Kleena5516432008-07-23 21:27:41 -07001157 try_to_free_low(h, min_count);
1158 while (min_count < persistent_huge_pages(h)) {
1159 struct page *page = dequeue_huge_page(h);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 if (!page)
1161 break;
Andi Kleena5516432008-07-23 21:27:41 -07001162 update_and_free_page(h, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 }
Andi Kleena5516432008-07-23 21:27:41 -07001164 while (count < persistent_huge_pages(h)) {
1165 if (!adjust_pool_surplus(h, 1))
Adam Litke7893d1d2007-10-16 01:26:18 -07001166 break;
1167 }
1168out:
Andi Kleena5516432008-07-23 21:27:41 -07001169 ret = persistent_huge_pages(h);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170 spin_unlock(&hugetlb_lock);
Adam Litke7893d1d2007-10-16 01:26:18 -07001171 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172}
1173
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001174#define HSTATE_ATTR_RO(_name) \
1175 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1176
1177#define HSTATE_ATTR(_name) \
1178 static struct kobj_attribute _name##_attr = \
1179 __ATTR(_name, 0644, _name##_show, _name##_store)
1180
1181static struct kobject *hugepages_kobj;
1182static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1183
1184static struct hstate *kobj_to_hstate(struct kobject *kobj)
1185{
1186 int i;
1187 for (i = 0; i < HUGE_MAX_HSTATE; i++)
1188 if (hstate_kobjs[i] == kobj)
1189 return &hstates[i];
1190 BUG();
1191 return NULL;
1192}
1193
1194static ssize_t nr_hugepages_show(struct kobject *kobj,
1195 struct kobj_attribute *attr, char *buf)
1196{
1197 struct hstate *h = kobj_to_hstate(kobj);
1198 return sprintf(buf, "%lu\n", h->nr_huge_pages);
1199}
1200static ssize_t nr_hugepages_store(struct kobject *kobj,
1201 struct kobj_attribute *attr, const char *buf, size_t count)
1202{
1203 int err;
1204 unsigned long input;
1205 struct hstate *h = kobj_to_hstate(kobj);
1206
1207 err = strict_strtoul(buf, 10, &input);
1208 if (err)
1209 return 0;
1210
1211 h->max_huge_pages = set_max_huge_pages(h, input);
1212
1213 return count;
1214}
1215HSTATE_ATTR(nr_hugepages);
1216
1217static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1218 struct kobj_attribute *attr, char *buf)
1219{
1220 struct hstate *h = kobj_to_hstate(kobj);
1221 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1222}
1223static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1224 struct kobj_attribute *attr, const char *buf, size_t count)
1225{
1226 int err;
1227 unsigned long input;
1228 struct hstate *h = kobj_to_hstate(kobj);
1229
1230 err = strict_strtoul(buf, 10, &input);
1231 if (err)
1232 return 0;
1233
1234 spin_lock(&hugetlb_lock);
1235 h->nr_overcommit_huge_pages = input;
1236 spin_unlock(&hugetlb_lock);
1237
1238 return count;
1239}
1240HSTATE_ATTR(nr_overcommit_hugepages);
1241
1242static ssize_t free_hugepages_show(struct kobject *kobj,
1243 struct kobj_attribute *attr, char *buf)
1244{
1245 struct hstate *h = kobj_to_hstate(kobj);
1246 return sprintf(buf, "%lu\n", h->free_huge_pages);
1247}
1248HSTATE_ATTR_RO(free_hugepages);
1249
1250static ssize_t resv_hugepages_show(struct kobject *kobj,
1251 struct kobj_attribute *attr, char *buf)
1252{
1253 struct hstate *h = kobj_to_hstate(kobj);
1254 return sprintf(buf, "%lu\n", h->resv_huge_pages);
1255}
1256HSTATE_ATTR_RO(resv_hugepages);
1257
1258static ssize_t surplus_hugepages_show(struct kobject *kobj,
1259 struct kobj_attribute *attr, char *buf)
1260{
1261 struct hstate *h = kobj_to_hstate(kobj);
1262 return sprintf(buf, "%lu\n", h->surplus_huge_pages);
1263}
1264HSTATE_ATTR_RO(surplus_hugepages);
1265
1266static struct attribute *hstate_attrs[] = {
1267 &nr_hugepages_attr.attr,
1268 &nr_overcommit_hugepages_attr.attr,
1269 &free_hugepages_attr.attr,
1270 &resv_hugepages_attr.attr,
1271 &surplus_hugepages_attr.attr,
1272 NULL,
1273};
1274
1275static struct attribute_group hstate_attr_group = {
1276 .attrs = hstate_attrs,
1277};
1278
1279static int __init hugetlb_sysfs_add_hstate(struct hstate *h)
1280{
1281 int retval;
1282
1283 hstate_kobjs[h - hstates] = kobject_create_and_add(h->name,
1284 hugepages_kobj);
1285 if (!hstate_kobjs[h - hstates])
1286 return -ENOMEM;
1287
1288 retval = sysfs_create_group(hstate_kobjs[h - hstates],
1289 &hstate_attr_group);
1290 if (retval)
1291 kobject_put(hstate_kobjs[h - hstates]);
1292
1293 return retval;
1294}
1295
1296static void __init hugetlb_sysfs_init(void)
1297{
1298 struct hstate *h;
1299 int err;
1300
1301 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
1302 if (!hugepages_kobj)
1303 return;
1304
1305 for_each_hstate(h) {
1306 err = hugetlb_sysfs_add_hstate(h);
1307 if (err)
1308 printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
1309 h->name);
1310 }
1311}
1312
1313static void __exit hugetlb_exit(void)
1314{
1315 struct hstate *h;
1316
1317 for_each_hstate(h) {
1318 kobject_put(hstate_kobjs[h - hstates]);
1319 }
1320
1321 kobject_put(hugepages_kobj);
1322}
1323module_exit(hugetlb_exit);
1324
1325static int __init hugetlb_init(void)
1326{
Benjamin Herrenschmidt0ef89d22008-07-31 00:07:30 -07001327 /* Some platform decide whether they support huge pages at boot
1328 * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
1329 * there is no such support
1330 */
1331 if (HPAGE_SHIFT == 0)
1332 return 0;
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001333
Nick Piggine11bfbf2008-07-23 21:27:52 -07001334 if (!size_to_hstate(default_hstate_size)) {
1335 default_hstate_size = HPAGE_SIZE;
1336 if (!size_to_hstate(default_hstate_size))
1337 hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001338 }
Nick Piggine11bfbf2008-07-23 21:27:52 -07001339 default_hstate_idx = size_to_hstate(default_hstate_size) - hstates;
1340 if (default_hstate_max_huge_pages)
1341 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001342
1343 hugetlb_init_hstates();
1344
Andi Kleenaa888a72008-07-23 21:27:47 -07001345 gather_bootmem_prealloc();
1346
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001347 report_hugepages();
1348
1349 hugetlb_sysfs_init();
1350
1351 return 0;
1352}
1353module_init(hugetlb_init);
1354
1355/* Should be called on processing a hugepagesz=... option */
1356void __init hugetlb_add_hstate(unsigned order)
1357{
1358 struct hstate *h;
Andi Kleen8faa8b02008-07-23 21:27:48 -07001359 unsigned long i;
1360
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001361 if (size_to_hstate(PAGE_SIZE << order)) {
1362 printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
1363 return;
1364 }
1365 BUG_ON(max_hstate >= HUGE_MAX_HSTATE);
1366 BUG_ON(order == 0);
1367 h = &hstates[max_hstate++];
1368 h->order = order;
1369 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
Andi Kleen8faa8b02008-07-23 21:27:48 -07001370 h->nr_huge_pages = 0;
1371 h->free_huge_pages = 0;
1372 for (i = 0; i < MAX_NUMNODES; ++i)
1373 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
1374 h->hugetlb_next_nid = first_node(node_online_map);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001375 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
1376 huge_page_size(h)/1024);
Andi Kleen8faa8b02008-07-23 21:27:48 -07001377
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001378 parsed_hstate = h;
1379}
1380
Nick Piggine11bfbf2008-07-23 21:27:52 -07001381static int __init hugetlb_nrpages_setup(char *s)
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001382{
1383 unsigned long *mhp;
Andi Kleen8faa8b02008-07-23 21:27:48 -07001384 static unsigned long *last_mhp;
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001385
1386 /*
1387 * !max_hstate means we haven't parsed a hugepagesz= parameter yet,
1388 * so this hugepages= parameter goes to the "default hstate".
1389 */
1390 if (!max_hstate)
1391 mhp = &default_hstate_max_huge_pages;
1392 else
1393 mhp = &parsed_hstate->max_huge_pages;
1394
Andi Kleen8faa8b02008-07-23 21:27:48 -07001395 if (mhp == last_mhp) {
1396 printk(KERN_WARNING "hugepages= specified twice without "
1397 "interleaving hugepagesz=, ignoring\n");
1398 return 1;
1399 }
1400
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001401 if (sscanf(s, "%lu", mhp) <= 0)
1402 *mhp = 0;
1403
Andi Kleen8faa8b02008-07-23 21:27:48 -07001404 /*
1405 * Global state is always initialized later in hugetlb_init.
1406 * But we need to allocate >= MAX_ORDER hstates here early to still
1407 * use the bootmem allocator.
1408 */
1409 if (max_hstate && parsed_hstate->order >= MAX_ORDER)
1410 hugetlb_hstate_alloc_pages(parsed_hstate);
1411
1412 last_mhp = mhp;
1413
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001414 return 1;
1415}
Nick Piggine11bfbf2008-07-23 21:27:52 -07001416__setup("hugepages=", hugetlb_nrpages_setup);
1417
1418static int __init hugetlb_default_setup(char *s)
1419{
1420 default_hstate_size = memparse(s, &s);
1421 return 1;
1422}
1423__setup("default_hugepagesz=", hugetlb_default_setup);
Nishanth Aravamudana3437872008-07-23 21:27:44 -07001424
Nishanth Aravamudan8a213462008-07-25 19:44:37 -07001425static unsigned int cpuset_mems_nr(unsigned int *array)
1426{
1427 int node;
1428 unsigned int nr = 0;
1429
1430 for_each_node_mask(node, cpuset_current_mems_allowed)
1431 nr += array[node];
1432
1433 return nr;
1434}
1435
1436#ifdef CONFIG_SYSCTL
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437int hugetlb_sysctl_handler(struct ctl_table *table, int write,
1438 struct file *file, void __user *buffer,
1439 size_t *length, loff_t *ppos)
1440{
Andi Kleene5ff2152008-07-23 21:27:42 -07001441 struct hstate *h = &default_hstate;
1442 unsigned long tmp;
1443
1444 if (!write)
1445 tmp = h->max_huge_pages;
1446
1447 table->data = &tmp;
1448 table->maxlen = sizeof(unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
Andi Kleene5ff2152008-07-23 21:27:42 -07001450
1451 if (write)
1452 h->max_huge_pages = set_max_huge_pages(h, tmp);
1453
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454 return 0;
1455}
Mel Gorman396faf02007-07-17 04:03:13 -07001456
1457int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
1458 struct file *file, void __user *buffer,
1459 size_t *length, loff_t *ppos)
1460{
1461 proc_dointvec(table, write, file, buffer, length, ppos);
1462 if (hugepages_treat_as_movable)
1463 htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
1464 else
1465 htlb_alloc_mask = GFP_HIGHUSER;
1466 return 0;
1467}
1468
Nishanth Aravamudana3d0c6a2008-02-08 04:18:18 -08001469int hugetlb_overcommit_handler(struct ctl_table *table, int write,
1470 struct file *file, void __user *buffer,
1471 size_t *length, loff_t *ppos)
1472{
Andi Kleena5516432008-07-23 21:27:41 -07001473 struct hstate *h = &default_hstate;
Andi Kleene5ff2152008-07-23 21:27:42 -07001474 unsigned long tmp;
1475
1476 if (!write)
1477 tmp = h->nr_overcommit_huge_pages;
1478
1479 table->data = &tmp;
1480 table->maxlen = sizeof(unsigned long);
Nishanth Aravamudana3d0c6a2008-02-08 04:18:18 -08001481 proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
Andi Kleene5ff2152008-07-23 21:27:42 -07001482
1483 if (write) {
1484 spin_lock(&hugetlb_lock);
1485 h->nr_overcommit_huge_pages = tmp;
1486 spin_unlock(&hugetlb_lock);
1487 }
1488
Nishanth Aravamudana3d0c6a2008-02-08 04:18:18 -08001489 return 0;
1490}
1491
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492#endif /* CONFIG_SYSCTL */
1493
Alexey Dobriyane1759c22008-10-15 23:50:22 +04001494void hugetlb_report_meminfo(struct seq_file *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495{
Andi Kleena5516432008-07-23 21:27:41 -07001496 struct hstate *h = &default_hstate;
Alexey Dobriyane1759c22008-10-15 23:50:22 +04001497 seq_printf(m,
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001498 "HugePages_Total: %5lu\n"
1499 "HugePages_Free: %5lu\n"
1500 "HugePages_Rsvd: %5lu\n"
1501 "HugePages_Surp: %5lu\n"
1502 "Hugepagesize: %8lu kB\n",
Andi Kleena5516432008-07-23 21:27:41 -07001503 h->nr_huge_pages,
1504 h->free_huge_pages,
1505 h->resv_huge_pages,
1506 h->surplus_huge_pages,
1507 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508}
1509
1510int hugetlb_report_node_meminfo(int nid, char *buf)
1511{
Andi Kleena5516432008-07-23 21:27:41 -07001512 struct hstate *h = &default_hstate;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 return sprintf(buf,
1514 "Node %d HugePages_Total: %5u\n"
Nishanth Aravamudana1de0912008-03-26 14:37:53 -07001515 "Node %d HugePages_Free: %5u\n"
1516 "Node %d HugePages_Surp: %5u\n",
Andi Kleena5516432008-07-23 21:27:41 -07001517 nid, h->nr_huge_pages_node[nid],
1518 nid, h->free_huge_pages_node[nid],
1519 nid, h->surplus_huge_pages_node[nid]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520}
1521
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
1523unsigned long hugetlb_total_pages(void)
1524{
Andi Kleena5516432008-07-23 21:27:41 -07001525 struct hstate *h = &default_hstate;
1526 return h->nr_huge_pages * pages_per_huge_page(h);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528
Andi Kleena5516432008-07-23 21:27:41 -07001529static int hugetlb_acct_memory(struct hstate *h, long delta)
Mel Gormanfc1b8a72008-07-23 21:27:22 -07001530{
1531 int ret = -ENOMEM;
1532
1533 spin_lock(&hugetlb_lock);
1534 /*
1535 * When cpuset is configured, it breaks the strict hugetlb page
1536 * reservation as the accounting is done on a global variable. Such
1537 * reservation is completely rubbish in the presence of cpuset because
1538 * the reservation is not checked against page availability for the
1539 * current cpuset. Application can still potentially OOM'ed by kernel
1540 * with lack of free htlb page in cpuset that the task is in.
1541 * Attempt to enforce strict accounting with cpuset is almost
1542 * impossible (or too ugly) because cpuset is too fluid that
1543 * task or memory node can be dynamically moved between cpusets.
1544 *
1545 * The change of semantics for shared hugetlb mapping with cpuset is
1546 * undesirable. However, in order to preserve some of the semantics,
1547 * we fall back to check against current free page availability as
1548 * a best attempt and hopefully to minimize the impact of changing
1549 * semantics that cpuset has.
1550 */
1551 if (delta > 0) {
Andi Kleena5516432008-07-23 21:27:41 -07001552 if (gather_surplus_pages(h, delta) < 0)
Mel Gormanfc1b8a72008-07-23 21:27:22 -07001553 goto out;
1554
Andi Kleena5516432008-07-23 21:27:41 -07001555 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
1556 return_unused_surplus_pages(h, delta);
Mel Gormanfc1b8a72008-07-23 21:27:22 -07001557 goto out;
1558 }
1559 }
1560
1561 ret = 0;
1562 if (delta < 0)
Andi Kleena5516432008-07-23 21:27:41 -07001563 return_unused_surplus_pages(h, (unsigned long) -delta);
Mel Gormanfc1b8a72008-07-23 21:27:22 -07001564
1565out:
1566 spin_unlock(&hugetlb_lock);
1567 return ret;
1568}
1569
Andy Whitcroft84afd992008-07-23 21:27:32 -07001570static void hugetlb_vm_op_open(struct vm_area_struct *vma)
1571{
1572 struct resv_map *reservations = vma_resv_map(vma);
1573
1574 /*
1575 * This new VMA should share its siblings reservation map if present.
1576 * The VMA will only ever have a valid reservation map pointer where
1577 * it is being copied for another still existing VMA. As that VMA
1578 * has a reference to the reservation map it cannot dissappear until
1579 * after this open call completes. It is therefore safe to take a
1580 * new reference here without additional locking.
1581 */
1582 if (reservations)
1583 kref_get(&reservations->refs);
1584}
1585
Mel Gormana1e78772008-07-23 21:27:23 -07001586static void hugetlb_vm_op_close(struct vm_area_struct *vma)
1587{
Andi Kleena5516432008-07-23 21:27:41 -07001588 struct hstate *h = hstate_vma(vma);
Andy Whitcroft84afd992008-07-23 21:27:32 -07001589 struct resv_map *reservations = vma_resv_map(vma);
1590 unsigned long reserve;
1591 unsigned long start;
1592 unsigned long end;
1593
1594 if (reservations) {
Andi Kleena5516432008-07-23 21:27:41 -07001595 start = vma_hugecache_offset(h, vma, vma->vm_start);
1596 end = vma_hugecache_offset(h, vma, vma->vm_end);
Andy Whitcroft84afd992008-07-23 21:27:32 -07001597
1598 reserve = (end - start) -
1599 region_count(&reservations->regions, start, end);
1600
1601 kref_put(&reservations->refs, resv_map_release);
1602
Adam Litke7251ff782008-07-23 21:27:59 -07001603 if (reserve) {
Andi Kleena5516432008-07-23 21:27:41 -07001604 hugetlb_acct_memory(h, -reserve);
Adam Litke7251ff782008-07-23 21:27:59 -07001605 hugetlb_put_quota(vma->vm_file->f_mapping, reserve);
1606 }
Andy Whitcroft84afd992008-07-23 21:27:32 -07001607 }
Mel Gormana1e78772008-07-23 21:27:23 -07001608}
1609
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610/*
1611 * We cannot handle pagefaults against hugetlb pages at all. They cause
1612 * handle_mm_fault() to try to instantiate regular-sized pages in the
1613 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
1614 * this far.
1615 */
Nick Piggind0217ac2007-07-19 01:47:03 -07001616static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617{
1618 BUG();
Nick Piggind0217ac2007-07-19 01:47:03 -07001619 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620}
1621
1622struct vm_operations_struct hugetlb_vm_ops = {
Nick Piggind0217ac2007-07-19 01:47:03 -07001623 .fault = hugetlb_vm_op_fault,
Andy Whitcroft84afd992008-07-23 21:27:32 -07001624 .open = hugetlb_vm_op_open,
Mel Gormana1e78772008-07-23 21:27:23 -07001625 .close = hugetlb_vm_op_close,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626};
1627
David Gibson1e8f8892006-01-06 00:10:44 -08001628static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
1629 int writable)
David Gibson63551ae2005-06-21 17:14:44 -07001630{
1631 pte_t entry;
1632
David Gibson1e8f8892006-01-06 00:10:44 -08001633 if (writable) {
David Gibson63551ae2005-06-21 17:14:44 -07001634 entry =
1635 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
1636 } else {
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07001637 entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
David Gibson63551ae2005-06-21 17:14:44 -07001638 }
1639 entry = pte_mkyoung(entry);
1640 entry = pte_mkhuge(entry);
1641
1642 return entry;
1643}
1644
David Gibson1e8f8892006-01-06 00:10:44 -08001645static void set_huge_ptep_writable(struct vm_area_struct *vma,
1646 unsigned long address, pte_t *ptep)
1647{
1648 pte_t entry;
1649
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07001650 entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
1651 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
Benjamin Herrenschmidt8dab5242007-06-16 10:16:12 -07001652 update_mmu_cache(vma, address, entry);
Benjamin Herrenschmidt8dab5242007-06-16 10:16:12 -07001653 }
David Gibson1e8f8892006-01-06 00:10:44 -08001654}
1655
1656
David Gibson63551ae2005-06-21 17:14:44 -07001657int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
1658 struct vm_area_struct *vma)
1659{
1660 pte_t *src_pte, *dst_pte, entry;
1661 struct page *ptepage;
Hugh Dickins1c598272005-10-19 21:23:43 -07001662 unsigned long addr;
David Gibson1e8f8892006-01-06 00:10:44 -08001663 int cow;
Andi Kleena5516432008-07-23 21:27:41 -07001664 struct hstate *h = hstate_vma(vma);
1665 unsigned long sz = huge_page_size(h);
David Gibson1e8f8892006-01-06 00:10:44 -08001666
1667 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
David Gibson63551ae2005-06-21 17:14:44 -07001668
Andi Kleena5516432008-07-23 21:27:41 -07001669 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
Hugh Dickinsc74df322005-10-29 18:16:23 -07001670 src_pte = huge_pte_offset(src, addr);
1671 if (!src_pte)
1672 continue;
Andi Kleena5516432008-07-23 21:27:41 -07001673 dst_pte = huge_pte_alloc(dst, addr, sz);
David Gibson63551ae2005-06-21 17:14:44 -07001674 if (!dst_pte)
1675 goto nomem;
Larry Woodmanc5c99422008-01-24 05:49:25 -08001676
1677 /* If the pagetables are shared don't copy or take references */
1678 if (dst_pte == src_pte)
1679 continue;
1680
Hugh Dickinsc74df322005-10-29 18:16:23 -07001681 spin_lock(&dst->page_table_lock);
Nick Piggin46478752008-06-05 22:45:57 -07001682 spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07001683 if (!huge_pte_none(huge_ptep_get(src_pte))) {
David Gibson1e8f8892006-01-06 00:10:44 -08001684 if (cow)
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07001685 huge_ptep_set_wrprotect(src, addr, src_pte);
1686 entry = huge_ptep_get(src_pte);
Hugh Dickins1c598272005-10-19 21:23:43 -07001687 ptepage = pte_page(entry);
1688 get_page(ptepage);
Hugh Dickins1c598272005-10-19 21:23:43 -07001689 set_huge_pte_at(dst, addr, dst_pte, entry);
1690 }
1691 spin_unlock(&src->page_table_lock);
Hugh Dickinsc74df322005-10-29 18:16:23 -07001692 spin_unlock(&dst->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -07001693 }
1694 return 0;
1695
1696nomem:
1697 return -ENOMEM;
1698}
1699
Chen, Kenneth W502717f2006-10-11 01:20:46 -07001700void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001701 unsigned long end, struct page *ref_page)
David Gibson63551ae2005-06-21 17:14:44 -07001702{
1703 struct mm_struct *mm = vma->vm_mm;
1704 unsigned long address;
David Gibsonc7546f82005-08-05 11:59:35 -07001705 pte_t *ptep;
David Gibson63551ae2005-06-21 17:14:44 -07001706 pte_t pte;
1707 struct page *page;
Chen, Kenneth Wfe1668a2006-10-04 02:15:24 -07001708 struct page *tmp;
Andi Kleena5516432008-07-23 21:27:41 -07001709 struct hstate *h = hstate_vma(vma);
1710 unsigned long sz = huge_page_size(h);
1711
Chen, Kenneth Wc0a499c2006-12-06 20:31:39 -08001712 /*
1713 * A page gathering list, protected by per file i_mmap_lock. The
1714 * lock is used to avoid list corruption from multiple unmapping
1715 * of the same page since we are using page->lru.
1716 */
Chen, Kenneth Wfe1668a2006-10-04 02:15:24 -07001717 LIST_HEAD(page_list);
David Gibson63551ae2005-06-21 17:14:44 -07001718
1719 WARN_ON(!is_vm_hugetlb_page(vma));
Andi Kleena5516432008-07-23 21:27:41 -07001720 BUG_ON(start & ~huge_page_mask(h));
1721 BUG_ON(end & ~huge_page_mask(h));
David Gibson63551ae2005-06-21 17:14:44 -07001722
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07001723 mmu_notifier_invalidate_range_start(mm, start, end);
Hugh Dickins508034a2005-10-29 18:16:30 -07001724 spin_lock(&mm->page_table_lock);
Andi Kleena5516432008-07-23 21:27:41 -07001725 for (address = start; address < end; address += sz) {
David Gibsonc7546f82005-08-05 11:59:35 -07001726 ptep = huge_pte_offset(mm, address);
Adam Litke4c887262005-10-29 18:16:46 -07001727 if (!ptep)
David Gibsonc7546f82005-08-05 11:59:35 -07001728 continue;
1729
Chen, Kenneth W39dde652006-12-06 20:32:03 -08001730 if (huge_pmd_unshare(mm, &address, ptep))
1731 continue;
1732
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001733 /*
1734 * If a reference page is supplied, it is because a specific
1735 * page is being unmapped, not a range. Ensure the page we
1736 * are about to unmap is the actual page of interest.
1737 */
1738 if (ref_page) {
1739 pte = huge_ptep_get(ptep);
1740 if (huge_pte_none(pte))
1741 continue;
1742 page = pte_page(pte);
1743 if (page != ref_page)
1744 continue;
1745
1746 /*
1747 * Mark the VMA as having unmapped its page so that
1748 * future faults in this VMA will fail rather than
1749 * looking like data was lost
1750 */
1751 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
1752 }
1753
David Gibsonc7546f82005-08-05 11:59:35 -07001754 pte = huge_ptep_get_and_clear(mm, address, ptep);
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07001755 if (huge_pte_none(pte))
David Gibson63551ae2005-06-21 17:14:44 -07001756 continue;
David Gibsonc7546f82005-08-05 11:59:35 -07001757
David Gibson63551ae2005-06-21 17:14:44 -07001758 page = pte_page(pte);
Ken Chen6649a382007-02-08 14:20:27 -08001759 if (pte_dirty(pte))
1760 set_page_dirty(page);
Chen, Kenneth Wfe1668a2006-10-04 02:15:24 -07001761 list_add(&page->lru, &page_list);
David Gibson63551ae2005-06-21 17:14:44 -07001762 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763 spin_unlock(&mm->page_table_lock);
Hugh Dickins508034a2005-10-29 18:16:30 -07001764 flush_tlb_range(vma, start, end);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07001765 mmu_notifier_invalidate_range_end(mm, start, end);
Chen, Kenneth Wfe1668a2006-10-04 02:15:24 -07001766 list_for_each_entry_safe(page, tmp, &page_list, lru) {
1767 list_del(&page->lru);
1768 put_page(page);
1769 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770}
David Gibson63551ae2005-06-21 17:14:44 -07001771
Chen, Kenneth W502717f2006-10-11 01:20:46 -07001772void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001773 unsigned long end, struct page *ref_page)
Chen, Kenneth W502717f2006-10-11 01:20:46 -07001774{
Andi Kleena137e1c2008-07-23 21:27:43 -07001775 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
1776 __unmap_hugepage_range(vma, start, end, ref_page);
1777 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
Chen, Kenneth W502717f2006-10-11 01:20:46 -07001778}
1779
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001780/*
1781 * This is called when the original mapper is failing to COW a MAP_PRIVATE
1782 * mappping it owns the reserve page for. The intention is to unmap the page
1783 * from other VMAs and let the children be SIGKILLed if they are faulting the
1784 * same region.
1785 */
Harvey Harrison2a4b3de2008-10-18 20:27:06 -07001786static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
1787 struct page *page, unsigned long address)
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001788{
1789 struct vm_area_struct *iter_vma;
1790 struct address_space *mapping;
1791 struct prio_tree_iter iter;
1792 pgoff_t pgoff;
1793
1794 /*
1795 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
1796 * from page cache lookup which is in HPAGE_SIZE units.
1797 */
1798 address = address & huge_page_mask(hstate_vma(vma));
1799 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
1800 + (vma->vm_pgoff >> PAGE_SHIFT);
1801 mapping = (struct address_space *)page_private(page);
1802
1803 vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1804 /* Do not unmap the current VMA */
1805 if (iter_vma == vma)
1806 continue;
1807
1808 /*
1809 * Unmap the page from other VMAs without their own reserves.
1810 * They get marked to be SIGKILLed if they fault in these
1811 * areas. This is because a future no-page fault on this VMA
1812 * could insert a zeroed page instead of the data existing
1813 * from the time of fork. This would look like data corruption
1814 */
1815 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
1816 unmap_hugepage_range(iter_vma,
1817 address, address + HPAGE_SIZE,
1818 page);
1819 }
1820
1821 return 1;
1822}
1823
David Gibson1e8f8892006-01-06 00:10:44 -08001824static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001825 unsigned long address, pte_t *ptep, pte_t pte,
1826 struct page *pagecache_page)
David Gibson1e8f8892006-01-06 00:10:44 -08001827{
Andi Kleena5516432008-07-23 21:27:41 -07001828 struct hstate *h = hstate_vma(vma);
David Gibson1e8f8892006-01-06 00:10:44 -08001829 struct page *old_page, *new_page;
David Gibson79ac6ba2006-03-22 00:08:51 -08001830 int avoidcopy;
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001831 int outside_reserve = 0;
David Gibson1e8f8892006-01-06 00:10:44 -08001832
1833 old_page = pte_page(pte);
1834
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001835retry_avoidcopy:
David Gibson1e8f8892006-01-06 00:10:44 -08001836 /* If no-one else is actually using this page, avoid the copy
1837 * and just make the page writable */
1838 avoidcopy = (page_count(old_page) == 1);
1839 if (avoidcopy) {
1840 set_huge_ptep_writable(vma, address, ptep);
Nick Piggin83c54072007-07-19 01:47:05 -07001841 return 0;
David Gibson1e8f8892006-01-06 00:10:44 -08001842 }
1843
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001844 /*
1845 * If the process that created a MAP_PRIVATE mapping is about to
1846 * perform a COW due to a shared page count, attempt to satisfy
1847 * the allocation without using the existing reserves. The pagecache
1848 * page is used to determine if the reserve at this address was
1849 * consumed or not. If reserves were used, a partial faulted mapping
1850 * at the time of fork() could consume its reserves on COW instead
1851 * of the full address range.
1852 */
1853 if (!(vma->vm_flags & VM_SHARED) &&
1854 is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
1855 old_page != pagecache_page)
1856 outside_reserve = 1;
1857
David Gibson1e8f8892006-01-06 00:10:44 -08001858 page_cache_get(old_page);
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001859 new_page = alloc_huge_page(vma, address, outside_reserve);
David Gibson1e8f8892006-01-06 00:10:44 -08001860
Adam Litke2fc39ce2007-11-14 16:59:39 -08001861 if (IS_ERR(new_page)) {
David Gibson1e8f8892006-01-06 00:10:44 -08001862 page_cache_release(old_page);
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001863
1864 /*
1865 * If a process owning a MAP_PRIVATE mapping fails to COW,
1866 * it is due to references held by a child and an insufficient
1867 * huge page pool. To guarantee the original mappers
1868 * reliability, unmap the page from child processes. The child
1869 * may get SIGKILLed if it later faults.
1870 */
1871 if (outside_reserve) {
1872 BUG_ON(huge_pte_none(pte));
1873 if (unmap_ref_private(mm, vma, old_page, address)) {
1874 BUG_ON(page_count(old_page) != 1);
1875 BUG_ON(huge_pte_none(pte));
1876 goto retry_avoidcopy;
1877 }
1878 WARN_ON_ONCE(1);
1879 }
1880
Adam Litke2fc39ce2007-11-14 16:59:39 -08001881 return -PTR_ERR(new_page);
David Gibson1e8f8892006-01-06 00:10:44 -08001882 }
1883
1884 spin_unlock(&mm->page_table_lock);
Atsushi Nemoto9de455b2006-12-12 17:14:55 +00001885 copy_huge_page(new_page, old_page, address, vma);
Nick Piggin0ed361d2008-02-04 22:29:34 -08001886 __SetPageUptodate(new_page);
David Gibson1e8f8892006-01-06 00:10:44 -08001887 spin_lock(&mm->page_table_lock);
1888
Andi Kleena5516432008-07-23 21:27:41 -07001889 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07001890 if (likely(pte_same(huge_ptep_get(ptep), pte))) {
David Gibson1e8f8892006-01-06 00:10:44 -08001891 /* Break COW */
Gerald Schaefer8fe627e2008-04-28 02:13:28 -07001892 huge_ptep_clear_flush(vma, address, ptep);
David Gibson1e8f8892006-01-06 00:10:44 -08001893 set_huge_pte_at(mm, address, ptep,
1894 make_huge_pte(vma, new_page, 1));
1895 /* Make the old page be freed below */
1896 new_page = old_page;
1897 }
1898 page_cache_release(new_page);
1899 page_cache_release(old_page);
Nick Piggin83c54072007-07-19 01:47:05 -07001900 return 0;
David Gibson1e8f8892006-01-06 00:10:44 -08001901}
1902
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001903/* Return the pagecache page at a given address within a VMA */
Andi Kleena5516432008-07-23 21:27:41 -07001904static struct page *hugetlbfs_pagecache_page(struct hstate *h,
1905 struct vm_area_struct *vma, unsigned long address)
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001906{
1907 struct address_space *mapping;
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -07001908 pgoff_t idx;
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001909
1910 mapping = vma->vm_file->f_mapping;
Andi Kleena5516432008-07-23 21:27:41 -07001911 idx = vma_hugecache_offset(h, vma, address);
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001912
1913 return find_lock_page(mapping, idx);
1914}
1915
Robert P. J. Daya1ed3dd2007-07-17 04:03:33 -07001916static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
David Gibson1e8f8892006-01-06 00:10:44 -08001917 unsigned long address, pte_t *ptep, int write_access)
Hugh Dickinsac9b9c62005-10-20 16:24:28 +01001918{
Andi Kleena5516432008-07-23 21:27:41 -07001919 struct hstate *h = hstate_vma(vma);
Hugh Dickinsac9b9c62005-10-20 16:24:28 +01001920 int ret = VM_FAULT_SIGBUS;
Andy Whitcrofte7c4b0b2008-07-23 21:27:26 -07001921 pgoff_t idx;
Adam Litke4c887262005-10-29 18:16:46 -07001922 unsigned long size;
Adam Litke4c887262005-10-29 18:16:46 -07001923 struct page *page;
1924 struct address_space *mapping;
David Gibson1e8f8892006-01-06 00:10:44 -08001925 pte_t new_pte;
Adam Litke4c887262005-10-29 18:16:46 -07001926
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001927 /*
1928 * Currently, we are forced to kill the process in the event the
1929 * original mapper has unmapped pages from the child due to a failed
1930 * COW. Warn that such a situation has occured as it may not be obvious
1931 */
1932 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
1933 printk(KERN_WARNING
1934 "PID %d killed due to inadequate hugepage pool\n",
1935 current->pid);
1936 return ret;
1937 }
1938
Adam Litke4c887262005-10-29 18:16:46 -07001939 mapping = vma->vm_file->f_mapping;
Andi Kleena5516432008-07-23 21:27:41 -07001940 idx = vma_hugecache_offset(h, vma, address);
Adam Litke4c887262005-10-29 18:16:46 -07001941
1942 /*
1943 * Use page lock to guard against racing truncation
1944 * before we get page_table_lock.
1945 */
Christoph Lameter6bda6662006-01-06 00:10:49 -08001946retry:
1947 page = find_lock_page(mapping, idx);
1948 if (!page) {
Andi Kleena5516432008-07-23 21:27:41 -07001949 size = i_size_read(mapping->host) >> huge_page_shift(h);
Hugh Dickinsebed4bf2006-10-28 10:38:43 -07001950 if (idx >= size)
1951 goto out;
Mel Gorman04f2cbe2008-07-23 21:27:25 -07001952 page = alloc_huge_page(vma, address, 0);
Adam Litke2fc39ce2007-11-14 16:59:39 -08001953 if (IS_ERR(page)) {
1954 ret = -PTR_ERR(page);
Christoph Lameter6bda6662006-01-06 00:10:49 -08001955 goto out;
1956 }
Andi Kleena5516432008-07-23 21:27:41 -07001957 clear_huge_page(page, address, huge_page_size(h));
Nick Piggin0ed361d2008-02-04 22:29:34 -08001958 __SetPageUptodate(page);
Hugh Dickinsac9b9c62005-10-20 16:24:28 +01001959
Christoph Lameter6bda6662006-01-06 00:10:49 -08001960 if (vma->vm_flags & VM_SHARED) {
1961 int err;
Ken Chen45c682a2007-11-14 16:59:44 -08001962 struct inode *inode = mapping->host;
Christoph Lameter6bda6662006-01-06 00:10:49 -08001963
1964 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
1965 if (err) {
1966 put_page(page);
Christoph Lameter6bda6662006-01-06 00:10:49 -08001967 if (err == -EEXIST)
1968 goto retry;
1969 goto out;
1970 }
Ken Chen45c682a2007-11-14 16:59:44 -08001971
1972 spin_lock(&inode->i_lock);
Andi Kleena5516432008-07-23 21:27:41 -07001973 inode->i_blocks += blocks_per_huge_page(h);
Ken Chen45c682a2007-11-14 16:59:44 -08001974 spin_unlock(&inode->i_lock);
Christoph Lameter6bda6662006-01-06 00:10:49 -08001975 } else
1976 lock_page(page);
1977 }
David Gibson1e8f8892006-01-06 00:10:44 -08001978
Andy Whitcroft57303d82008-08-12 15:08:47 -07001979 /*
1980 * If we are going to COW a private mapping later, we examine the
1981 * pending reservations for this page now. This will ensure that
1982 * any allocations necessary to record that reservation occur outside
1983 * the spinlock.
1984 */
1985 if (write_access && !(vma->vm_flags & VM_SHARED))
Andy Whitcroft2b267362008-08-12 15:08:49 -07001986 if (vma_needs_reservation(h, vma, address) < 0) {
1987 ret = VM_FAULT_OOM;
1988 goto backout_unlocked;
1989 }
Andy Whitcroft57303d82008-08-12 15:08:47 -07001990
Hugh Dickinsac9b9c62005-10-20 16:24:28 +01001991 spin_lock(&mm->page_table_lock);
Andi Kleena5516432008-07-23 21:27:41 -07001992 size = i_size_read(mapping->host) >> huge_page_shift(h);
Adam Litke4c887262005-10-29 18:16:46 -07001993 if (idx >= size)
1994 goto backout;
1995
Nick Piggin83c54072007-07-19 01:47:05 -07001996 ret = 0;
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07001997 if (!huge_pte_none(huge_ptep_get(ptep)))
Adam Litke4c887262005-10-29 18:16:46 -07001998 goto backout;
1999
David Gibson1e8f8892006-01-06 00:10:44 -08002000 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
2001 && (vma->vm_flags & VM_SHARED)));
2002 set_huge_pte_at(mm, address, ptep, new_pte);
2003
2004 if (write_access && !(vma->vm_flags & VM_SHARED)) {
2005 /* Optimization, do the COW without a second fault */
Mel Gorman04f2cbe2008-07-23 21:27:25 -07002006 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
David Gibson1e8f8892006-01-06 00:10:44 -08002007 }
2008
Hugh Dickinsac9b9c62005-10-20 16:24:28 +01002009 spin_unlock(&mm->page_table_lock);
Adam Litke4c887262005-10-29 18:16:46 -07002010 unlock_page(page);
2011out:
Hugh Dickinsac9b9c62005-10-20 16:24:28 +01002012 return ret;
Adam Litke4c887262005-10-29 18:16:46 -07002013
2014backout:
2015 spin_unlock(&mm->page_table_lock);
Andy Whitcroft2b267362008-08-12 15:08:49 -07002016backout_unlocked:
Adam Litke4c887262005-10-29 18:16:46 -07002017 unlock_page(page);
2018 put_page(page);
2019 goto out;
Hugh Dickinsac9b9c62005-10-20 16:24:28 +01002020}
2021
Adam Litke86e52162006-01-06 00:10:43 -08002022int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2023 unsigned long address, int write_access)
2024{
2025 pte_t *ptep;
2026 pte_t entry;
David Gibson1e8f8892006-01-06 00:10:44 -08002027 int ret;
Andy Whitcroft57303d82008-08-12 15:08:47 -07002028 struct page *pagecache_page = NULL;
David Gibson3935baa2006-03-22 00:08:53 -08002029 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
Andi Kleena5516432008-07-23 21:27:41 -07002030 struct hstate *h = hstate_vma(vma);
Adam Litke86e52162006-01-06 00:10:43 -08002031
Andi Kleena5516432008-07-23 21:27:41 -07002032 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
Adam Litke86e52162006-01-06 00:10:43 -08002033 if (!ptep)
2034 return VM_FAULT_OOM;
2035
David Gibson3935baa2006-03-22 00:08:53 -08002036 /*
2037 * Serialize hugepage allocation and instantiation, so that we don't
2038 * get spurious allocation failures if two CPUs race to instantiate
2039 * the same page in the page cache.
2040 */
2041 mutex_lock(&hugetlb_instantiation_mutex);
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07002042 entry = huge_ptep_get(ptep);
2043 if (huge_pte_none(entry)) {
David Gibson3935baa2006-03-22 00:08:53 -08002044 ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
David Gibsonb4d1d992008-10-15 22:01:11 -07002045 goto out_mutex;
David Gibson3935baa2006-03-22 00:08:53 -08002046 }
Adam Litke86e52162006-01-06 00:10:43 -08002047
Nick Piggin83c54072007-07-19 01:47:05 -07002048 ret = 0;
David Gibson1e8f8892006-01-06 00:10:44 -08002049
Andy Whitcroft57303d82008-08-12 15:08:47 -07002050 /*
2051 * If we are going to COW the mapping later, we examine the pending
2052 * reservations for this page now. This will ensure that any
2053 * allocations necessary to record that reservation occur outside the
2054 * spinlock. For private mappings, we also lookup the pagecache
2055 * page now as it is used to determine if a reservation has been
2056 * consumed.
2057 */
2058 if (write_access && !pte_write(entry)) {
Andy Whitcroft2b267362008-08-12 15:08:49 -07002059 if (vma_needs_reservation(h, vma, address) < 0) {
2060 ret = VM_FAULT_OOM;
David Gibsonb4d1d992008-10-15 22:01:11 -07002061 goto out_mutex;
Andy Whitcroft2b267362008-08-12 15:08:49 -07002062 }
Andy Whitcroft57303d82008-08-12 15:08:47 -07002063
2064 if (!(vma->vm_flags & VM_SHARED))
2065 pagecache_page = hugetlbfs_pagecache_page(h,
2066 vma, address);
2067 }
2068
David Gibson1e8f8892006-01-06 00:10:44 -08002069 spin_lock(&mm->page_table_lock);
2070 /* Check for a racing update before calling hugetlb_cow */
David Gibsonb4d1d992008-10-15 22:01:11 -07002071 if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
2072 goto out_page_table_lock;
2073
2074
2075 if (write_access) {
2076 if (!pte_write(entry)) {
Andy Whitcroft57303d82008-08-12 15:08:47 -07002077 ret = hugetlb_cow(mm, vma, address, ptep, entry,
2078 pagecache_page);
David Gibsonb4d1d992008-10-15 22:01:11 -07002079 goto out_page_table_lock;
2080 }
2081 entry = pte_mkdirty(entry);
2082 }
2083 entry = pte_mkyoung(entry);
2084 if (huge_ptep_set_access_flags(vma, address, ptep, entry, write_access))
2085 update_mmu_cache(vma, address, entry);
2086
2087out_page_table_lock:
David Gibson1e8f8892006-01-06 00:10:44 -08002088 spin_unlock(&mm->page_table_lock);
Andy Whitcroft57303d82008-08-12 15:08:47 -07002089
2090 if (pagecache_page) {
2091 unlock_page(pagecache_page);
2092 put_page(pagecache_page);
2093 }
2094
David Gibsonb4d1d992008-10-15 22:01:11 -07002095out_mutex:
David Gibson3935baa2006-03-22 00:08:53 -08002096 mutex_unlock(&hugetlb_instantiation_mutex);
David Gibson1e8f8892006-01-06 00:10:44 -08002097
2098 return ret;
Adam Litke86e52162006-01-06 00:10:43 -08002099}
2100
Andi Kleenceb86872008-07-23 21:27:50 -07002101/* Can be overriden by architectures */
2102__attribute__((weak)) struct page *
2103follow_huge_pud(struct mm_struct *mm, unsigned long address,
2104 pud_t *pud, int write)
2105{
2106 BUG();
2107 return NULL;
2108}
2109
KOSAKI Motohiro4b2e38a2008-10-18 20:27:10 -07002110static int huge_zeropage_ok(pte_t *ptep, int write, int shared)
2111{
2112 if (!ptep || write || shared)
2113 return 0;
2114 else
2115 return huge_pte_none(huge_ptep_get(ptep));
2116}
2117
David Gibson63551ae2005-06-21 17:14:44 -07002118int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
2119 struct page **pages, struct vm_area_struct **vmas,
Adam Litke5b23dbe2007-11-14 16:59:33 -08002120 unsigned long *position, int *length, int i,
2121 int write)
David Gibson63551ae2005-06-21 17:14:44 -07002122{
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -08002123 unsigned long pfn_offset;
2124 unsigned long vaddr = *position;
David Gibson63551ae2005-06-21 17:14:44 -07002125 int remainder = *length;
Andi Kleena5516432008-07-23 21:27:41 -07002126 struct hstate *h = hstate_vma(vma);
KOSAKI Motohiro4b2e38a2008-10-18 20:27:10 -07002127 int zeropage_ok = 0;
2128 int shared = vma->vm_flags & VM_SHARED;
David Gibson63551ae2005-06-21 17:14:44 -07002129
Hugh Dickins1c598272005-10-19 21:23:43 -07002130 spin_lock(&mm->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -07002131 while (vaddr < vma->vm_end && remainder) {
Adam Litke4c887262005-10-29 18:16:46 -07002132 pte_t *pte;
2133 struct page *page;
2134
2135 /*
2136 * Some archs (sparc64, sh*) have multiple pte_ts to
2137 * each hugepage. We have to make * sure we get the
2138 * first, for the page indexing below to work.
2139 */
Andi Kleena5516432008-07-23 21:27:41 -07002140 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
KOSAKI Motohiro4b2e38a2008-10-18 20:27:10 -07002141 if (huge_zeropage_ok(pte, write, shared))
2142 zeropage_ok = 1;
Adam Litke4c887262005-10-29 18:16:46 -07002143
KOSAKI Motohiro4b2e38a2008-10-18 20:27:10 -07002144 if (!pte ||
2145 (huge_pte_none(huge_ptep_get(pte)) && !zeropage_ok) ||
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07002146 (write && !pte_write(huge_ptep_get(pte)))) {
Adam Litke4c887262005-10-29 18:16:46 -07002147 int ret;
2148
2149 spin_unlock(&mm->page_table_lock);
Adam Litke5b23dbe2007-11-14 16:59:33 -08002150 ret = hugetlb_fault(mm, vma, vaddr, write);
Adam Litke4c887262005-10-29 18:16:46 -07002151 spin_lock(&mm->page_table_lock);
Adam Litkea89182c2007-08-22 14:01:51 -07002152 if (!(ret & VM_FAULT_ERROR))
Adam Litke4c887262005-10-29 18:16:46 -07002153 continue;
2154
2155 remainder = 0;
2156 if (!i)
2157 i = -EFAULT;
2158 break;
2159 }
David Gibson63551ae2005-06-21 17:14:44 -07002160
Andi Kleena5516432008-07-23 21:27:41 -07002161 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07002162 page = pte_page(huge_ptep_get(pte));
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -08002163same_page:
Chen, Kenneth Wd6692182006-03-31 02:29:57 -08002164 if (pages) {
KOSAKI Motohiro4b2e38a2008-10-18 20:27:10 -07002165 if (zeropage_ok)
2166 pages[i] = ZERO_PAGE(0);
2167 else
Andy Whitcroft69d177c2008-11-06 12:53:26 -08002168 pages[i] = mem_map_offset(page, pfn_offset);
KOSAKI Motohiro4b2e38a2008-10-18 20:27:10 -07002169 get_page(pages[i]);
Chen, Kenneth Wd6692182006-03-31 02:29:57 -08002170 }
David Gibson63551ae2005-06-21 17:14:44 -07002171
2172 if (vmas)
2173 vmas[i] = vma;
2174
2175 vaddr += PAGE_SIZE;
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -08002176 ++pfn_offset;
David Gibson63551ae2005-06-21 17:14:44 -07002177 --remainder;
2178 ++i;
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -08002179 if (vaddr < vma->vm_end && remainder &&
Andi Kleena5516432008-07-23 21:27:41 -07002180 pfn_offset < pages_per_huge_page(h)) {
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -08002181 /*
2182 * We use pfn_offset to avoid touching the pageframes
2183 * of this compound page.
2184 */
2185 goto same_page;
2186 }
David Gibson63551ae2005-06-21 17:14:44 -07002187 }
Hugh Dickins1c598272005-10-19 21:23:43 -07002188 spin_unlock(&mm->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -07002189 *length = remainder;
2190 *position = vaddr;
2191
2192 return i;
2193}
Zhang, Yanmin8f860592006-03-22 00:08:50 -08002194
2195void hugetlb_change_protection(struct vm_area_struct *vma,
2196 unsigned long address, unsigned long end, pgprot_t newprot)
2197{
2198 struct mm_struct *mm = vma->vm_mm;
2199 unsigned long start = address;
2200 pte_t *ptep;
2201 pte_t pte;
Andi Kleena5516432008-07-23 21:27:41 -07002202 struct hstate *h = hstate_vma(vma);
Zhang, Yanmin8f860592006-03-22 00:08:50 -08002203
2204 BUG_ON(address >= end);
2205 flush_cache_range(vma, address, end);
2206
Chen, Kenneth W39dde652006-12-06 20:32:03 -08002207 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
Zhang, Yanmin8f860592006-03-22 00:08:50 -08002208 spin_lock(&mm->page_table_lock);
Andi Kleena5516432008-07-23 21:27:41 -07002209 for (; address < end; address += huge_page_size(h)) {
Zhang, Yanmin8f860592006-03-22 00:08:50 -08002210 ptep = huge_pte_offset(mm, address);
2211 if (!ptep)
2212 continue;
Chen, Kenneth W39dde652006-12-06 20:32:03 -08002213 if (huge_pmd_unshare(mm, &address, ptep))
2214 continue;
Gerald Schaefer7f2e9522008-04-28 02:13:29 -07002215 if (!huge_pte_none(huge_ptep_get(ptep))) {
Zhang, Yanmin8f860592006-03-22 00:08:50 -08002216 pte = huge_ptep_get_and_clear(mm, address, ptep);
2217 pte = pte_mkhuge(pte_modify(pte, newprot));
2218 set_huge_pte_at(mm, address, ptep, pte);
Zhang, Yanmin8f860592006-03-22 00:08:50 -08002219 }
2220 }
2221 spin_unlock(&mm->page_table_lock);
Chen, Kenneth W39dde652006-12-06 20:32:03 -08002222 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
Zhang, Yanmin8f860592006-03-22 00:08:50 -08002223
2224 flush_tlb_range(vma, start, end);
2225}
2226
Mel Gormana1e78772008-07-23 21:27:23 -07002227int hugetlb_reserve_pages(struct inode *inode,
2228 long from, long to,
2229 struct vm_area_struct *vma)
Adam Litkee4e574b2007-10-16 01:26:19 -07002230{
2231 long ret, chg;
Andi Kleena5516432008-07-23 21:27:41 -07002232 struct hstate *h = hstate_inode(inode);
Adam Litkee4e574b2007-10-16 01:26:19 -07002233
Andy Whitcroftc37f9fb2008-07-23 21:27:30 -07002234 if (vma && vma->vm_flags & VM_NORESERVE)
2235 return 0;
2236
Mel Gormana1e78772008-07-23 21:27:23 -07002237 /*
2238 * Shared mappings base their reservation on the number of pages that
2239 * are already allocated on behalf of the file. Private mappings need
2240 * to reserve the full area even if read-only as mprotect() may be
2241 * called to make the mapping read-write. Assume !vma is a shm mapping
2242 */
2243 if (!vma || vma->vm_flags & VM_SHARED)
2244 chg = region_chg(&inode->i_mapping->private_list, from, to);
2245 else {
Andy Whitcroft84afd992008-07-23 21:27:32 -07002246 struct resv_map *resv_map = resv_map_alloc();
2247 if (!resv_map)
2248 return -ENOMEM;
2249
Mel Gormana1e78772008-07-23 21:27:23 -07002250 chg = to - from;
Andy Whitcroft84afd992008-07-23 21:27:32 -07002251
2252 set_vma_resv_map(vma, resv_map);
Mel Gorman04f2cbe2008-07-23 21:27:25 -07002253 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
Mel Gormana1e78772008-07-23 21:27:23 -07002254 }
2255
Adam Litkee4e574b2007-10-16 01:26:19 -07002256 if (chg < 0)
2257 return chg;
Ken Chen8a630112007-05-09 02:33:34 -07002258
Adam Litke90d8b7e2007-11-14 16:59:42 -08002259 if (hugetlb_get_quota(inode->i_mapping, chg))
2260 return -ENOSPC;
Andi Kleena5516432008-07-23 21:27:41 -07002261 ret = hugetlb_acct_memory(h, chg);
Ken Chen68842c92008-01-14 00:55:19 -08002262 if (ret < 0) {
2263 hugetlb_put_quota(inode->i_mapping, chg);
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -07002264 return ret;
Ken Chen68842c92008-01-14 00:55:19 -08002265 }
Mel Gormana1e78772008-07-23 21:27:23 -07002266 if (!vma || vma->vm_flags & VM_SHARED)
2267 region_add(&inode->i_mapping->private_list, from, to);
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -07002268 return 0;
2269}
2270
2271void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
2272{
Andi Kleena5516432008-07-23 21:27:41 -07002273 struct hstate *h = hstate_inode(inode);
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -07002274 long chg = region_truncate(&inode->i_mapping->private_list, offset);
Ken Chen45c682a2007-11-14 16:59:44 -08002275
2276 spin_lock(&inode->i_lock);
Andi Kleena5516432008-07-23 21:27:41 -07002277 inode->i_blocks -= blocks_per_huge_page(h);
Ken Chen45c682a2007-11-14 16:59:44 -08002278 spin_unlock(&inode->i_lock);
2279
Adam Litke90d8b7e2007-11-14 16:59:42 -08002280 hugetlb_put_quota(inode->i_mapping, (chg - freed));
Andi Kleena5516432008-07-23 21:27:41 -07002281 hugetlb_acct_memory(h, -(chg - freed));
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -07002282}