blob: aec3252afcf5f4f48df2bedc9f3747f62a241bc5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_PAGEMAP_H
2#define _LINUX_PAGEMAP_H
3
4/*
5 * Copyright 1995 Linus Torvalds
6 */
7#include <linux/mm.h>
8#include <linux/fs.h>
9#include <linux/list.h>
10#include <linux/highmem.h>
11#include <linux/compiler.h>
12#include <asm/uaccess.h>
13#include <linux/gfp.h>
Guillaume Chazarain3e9f45b2007-05-08 00:23:25 -070014#include <linux/bitops.h>
Nick Piggine2867812008-07-25 19:45:30 -070015#include <linux/hardirq.h> /* for in_interrupt() */
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
17/*
18 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
19 * allocation mode flags.
20 */
Lee Schermerhorn9a896c92009-04-02 16:56:45 -070021enum mapping_flags {
22 AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */
23 AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
24 AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
Lee Schermerhorn9a896c92009-04-02 16:56:45 -070025 AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
Lee Schermerhorn9a896c92009-04-02 16:56:45 -070026};
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Guillaume Chazarain3e9f45b2007-05-08 00:23:25 -070028static inline void mapping_set_error(struct address_space *mapping, int error)
29{
Andrew Morton2185e692008-07-23 21:27:19 -070030 if (unlikely(error)) {
Guillaume Chazarain3e9f45b2007-05-08 00:23:25 -070031 if (error == -ENOSPC)
32 set_bit(AS_ENOSPC, &mapping->flags);
33 else
34 set_bit(AS_EIO, &mapping->flags);
35 }
36}
37
Lee Schermerhornba9ddf42008-10-18 20:26:42 -070038static inline void mapping_set_unevictable(struct address_space *mapping)
39{
40 set_bit(AS_UNEVICTABLE, &mapping->flags);
41}
42
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -070043static inline void mapping_clear_unevictable(struct address_space *mapping)
44{
45 clear_bit(AS_UNEVICTABLE, &mapping->flags);
46}
47
Lee Schermerhornba9ddf42008-10-18 20:26:42 -070048static inline int mapping_unevictable(struct address_space *mapping)
49{
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -070050 if (likely(mapping))
51 return test_bit(AS_UNEVICTABLE, &mapping->flags);
52 return !!mapping;
Lee Schermerhornba9ddf42008-10-18 20:26:42 -070053}
Lee Schermerhornba9ddf42008-10-18 20:26:42 -070054
Al Virodd0fc662005-10-07 07:46:04 +010055static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
Linus Torvalds1da177e2005-04-16 15:20:36 -070056{
Al Viro260b2362005-10-21 03:22:44 -040057 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070058}
59
60/*
61 * This is non-atomic. Only to be used before the mapping is activated.
62 * Probably needs a barrier...
63 */
Al Viro260b2362005-10-21 03:22:44 -040064static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -070065{
Al Viro260b2362005-10-21 03:22:44 -040066 m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
67 (__force unsigned long)mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070068}
69
70/*
71 * The page cache can done in larger chunks than
72 * one page, because it allows for more efficient
73 * throughput (it can then be mapped into user
74 * space in smaller chunks for same flexibility).
75 *
76 * Or rather, it _will_ be done in larger chunks.
77 */
78#define PAGE_CACHE_SHIFT PAGE_SHIFT
79#define PAGE_CACHE_SIZE PAGE_SIZE
80#define PAGE_CACHE_MASK PAGE_MASK
81#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
82
83#define page_cache_get(page) get_page(page)
84#define page_cache_release(page) put_page(page)
85void release_pages(struct page **pages, int nr, int cold);
86
Nick Piggine2867812008-07-25 19:45:30 -070087/*
88 * speculatively take a reference to a page.
89 * If the page is free (_count == 0), then _count is untouched, and 0
90 * is returned. Otherwise, _count is incremented by 1 and 1 is returned.
91 *
92 * This function must be called inside the same rcu_read_lock() section as has
93 * been used to lookup the page in the pagecache radix-tree (or page table):
94 * this allows allocators to use a synchronize_rcu() to stabilize _count.
95 *
96 * Unless an RCU grace period has passed, the count of all pages coming out
97 * of the allocator must be considered unstable. page_count may return higher
98 * than expected, and put_page must be able to do the right thing when the
99 * page has been finished with, no matter what it is subsequently allocated
100 * for (because put_page is what is used here to drop an invalid speculative
101 * reference).
102 *
103 * This is the interesting part of the lockless pagecache (and lockless
104 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
105 * has the following pattern:
106 * 1. find page in radix tree
107 * 2. conditionally increment refcount
108 * 3. check the page is still in pagecache (if no, goto 1)
109 *
110 * Remove-side that cares about stability of _count (eg. reclaim) has the
111 * following (with tree_lock held for write):
112 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
113 * B. remove page from pagecache
114 * C. free the page
115 *
116 * There are 2 critical interleavings that matter:
117 * - 2 runs before A: in this case, A sees elevated refcount and bails out
118 * - A runs before 2: in this case, 2 sees zero refcount and retries;
119 * subsequently, B will complete and 1 will find no page, causing the
120 * lookup to return NULL.
121 *
122 * It is possible that between 1 and 2, the page is removed then the exact same
123 * page is inserted into the same position in pagecache. That's OK: the
124 * old find_get_page using tree_lock could equally have run before or after
125 * such a re-insertion, depending on order that locks are granted.
126 *
127 * Lookups racing against pagecache insertion isn't a big problem: either 1
128 * will find the page or it will not. Likewise, the old find_get_page could run
129 * either before the insertion or afterwards, depending on timing.
130 */
131static inline int page_cache_get_speculative(struct page *page)
132{
133 VM_BUG_ON(in_interrupt());
134
135#if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU)
136# ifdef CONFIG_PREEMPT
137 VM_BUG_ON(!in_atomic());
138# endif
139 /*
140 * Preempt must be disabled here - we rely on rcu_read_lock doing
141 * this for us.
142 *
143 * Pagecache won't be truncated from interrupt context, so if we have
144 * found a page in the radix tree here, we have pinned its refcount by
145 * disabling preempt, and hence no need for the "speculative get" that
146 * SMP requires.
147 */
148 VM_BUG_ON(page_count(page) == 0);
149 atomic_inc(&page->_count);
150
151#else
152 if (unlikely(!get_page_unless_zero(page))) {
153 /*
154 * Either the page has been freed, or will be freed.
155 * In either case, retry here and the caller should
156 * do the right thing (see comments above).
157 */
158 return 0;
159 }
160#endif
161 VM_BUG_ON(PageTail(page));
162
163 return 1;
164}
165
Nick Piggince0ad7f2008-07-30 15:23:13 +1000166/*
167 * Same as above, but add instead of inc (could just be merged)
168 */
169static inline int page_cache_add_speculative(struct page *page, int count)
170{
171 VM_BUG_ON(in_interrupt());
172
173#if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU)
174# ifdef CONFIG_PREEMPT
175 VM_BUG_ON(!in_atomic());
176# endif
177 VM_BUG_ON(page_count(page) == 0);
178 atomic_add(count, &page->_count);
179
180#else
181 if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
182 return 0;
183#endif
184 VM_BUG_ON(PageCompound(page) && page != compound_head(page));
185
186 return 1;
187}
188
Nick Piggine2867812008-07-25 19:45:30 -0700189static inline int page_freeze_refs(struct page *page, int count)
190{
191 return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
192}
193
194static inline void page_unfreeze_refs(struct page *page, int count)
195{
196 VM_BUG_ON(page_count(page) != 0);
197 VM_BUG_ON(count == 0);
198
199 atomic_set(&page->_count, count);
200}
201
Paul Jackson44110fe2006-03-24 03:16:04 -0800202#ifdef CONFIG_NUMA
Nick Piggin2ae88142006-10-28 10:38:23 -0700203extern struct page *__page_cache_alloc(gfp_t gfp);
Paul Jackson44110fe2006-03-24 03:16:04 -0800204#else
Nick Piggin2ae88142006-10-28 10:38:23 -0700205static inline struct page *__page_cache_alloc(gfp_t gfp)
206{
207 return alloc_pages(gfp, 0);
208}
209#endif
210
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211static inline struct page *page_cache_alloc(struct address_space *x)
212{
Nick Piggin2ae88142006-10-28 10:38:23 -0700213 return __page_cache_alloc(mapping_gfp_mask(x));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214}
215
216static inline struct page *page_cache_alloc_cold(struct address_space *x)
217{
Nick Piggin2ae88142006-10-28 10:38:23 -0700218 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219}
220
221typedef int filler_t(void *, struct page *);
222
223extern struct page * find_get_page(struct address_space *mapping,
Fengguang Wu57f6b962007-10-16 01:24:37 -0700224 pgoff_t index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225extern struct page * find_lock_page(struct address_space *mapping,
Fengguang Wu57f6b962007-10-16 01:24:37 -0700226 pgoff_t index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227extern struct page * find_or_create_page(struct address_space *mapping,
Fengguang Wu57f6b962007-10-16 01:24:37 -0700228 pgoff_t index, gfp_t gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
230 unsigned int nr_pages, struct page **pages);
Jens Axboeebf43502006-04-27 08:46:01 +0200231unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
232 unsigned int nr_pages, struct page **pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
234 int tag, unsigned int nr_pages, struct page **pages);
235
Nick Piggin54566b22009-01-04 12:00:53 -0800236struct page *grab_cache_page_write_begin(struct address_space *mapping,
237 pgoff_t index, unsigned flags);
Nick Pigginafddba42007-10-16 01:25:01 -0700238
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239/*
240 * Returns locked page at given index in given cache, creating it if needed.
241 */
Fengguang Wu57f6b962007-10-16 01:24:37 -0700242static inline struct page *grab_cache_page(struct address_space *mapping,
243 pgoff_t index)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244{
245 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
246}
247
248extern struct page * grab_cache_page_nowait(struct address_space *mapping,
Fengguang Wu57f6b962007-10-16 01:24:37 -0700249 pgoff_t index);
Nick Piggin6fe69002007-05-06 14:49:04 -0700250extern struct page * read_cache_page_async(struct address_space *mapping,
Fengguang Wu57f6b962007-10-16 01:24:37 -0700251 pgoff_t index, filler_t *filler,
Nick Piggin6fe69002007-05-06 14:49:04 -0700252 void *data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253extern struct page * read_cache_page(struct address_space *mapping,
Fengguang Wu57f6b962007-10-16 01:24:37 -0700254 pgoff_t index, filler_t *filler,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 void *data);
256extern int read_cache_pages(struct address_space *mapping,
257 struct list_head *pages, filler_t *filler, void *data);
258
Nick Piggin6fe69002007-05-06 14:49:04 -0700259static inline struct page *read_mapping_page_async(
260 struct address_space *mapping,
Fengguang Wu57f6b962007-10-16 01:24:37 -0700261 pgoff_t index, void *data)
Nick Piggin6fe69002007-05-06 14:49:04 -0700262{
263 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
264 return read_cache_page_async(mapping, index, filler, data);
265}
266
Pekka Enberg090d2b12006-06-23 02:05:08 -0700267static inline struct page *read_mapping_page(struct address_space *mapping,
Fengguang Wu57f6b962007-10-16 01:24:37 -0700268 pgoff_t index, void *data)
Pekka Enberg090d2b12006-06-23 02:05:08 -0700269{
270 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
271 return read_cache_page(mapping, index, filler, data);
272}
273
Nick Piggine2867812008-07-25 19:45:30 -0700274/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 * Return byte-offset into filesystem object for page.
276 */
277static inline loff_t page_offset(struct page *page)
278{
279 return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
280}
281
282static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
283 unsigned long address)
284{
285 pgoff_t pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
286 pgoff += vma->vm_pgoff;
287 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
288}
289
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800290extern void __lock_page(struct page *page);
291extern int __lock_page_killable(struct page *page);
292extern void __lock_page_nosync(struct page *page);
293extern void unlock_page(struct page *page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294
Nick Pigginf45840b2008-10-18 20:26:57 -0700295static inline void __set_page_locked(struct page *page)
Nick Piggin529ae9a2008-08-02 12:01:03 +0200296{
Nick Pigginf45840b2008-10-18 20:26:57 -0700297 __set_bit(PG_locked, &page->flags);
Nick Piggin529ae9a2008-08-02 12:01:03 +0200298}
299
Nick Pigginf45840b2008-10-18 20:26:57 -0700300static inline void __clear_page_locked(struct page *page)
Nick Piggin529ae9a2008-08-02 12:01:03 +0200301{
Nick Pigginf45840b2008-10-18 20:26:57 -0700302 __clear_bit(PG_locked, &page->flags);
Nick Piggin529ae9a2008-08-02 12:01:03 +0200303}
304
305static inline int trylock_page(struct page *page)
306{
Nick Piggin8413ac92008-10-18 20:26:59 -0700307 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
Nick Piggin529ae9a2008-08-02 12:01:03 +0200308}
309
Nick Piggindb376482006-09-25 23:31:24 -0700310/*
311 * lock_page may only be called if we have the page's inode pinned.
312 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313static inline void lock_page(struct page *page)
314{
315 might_sleep();
Nick Piggin529ae9a2008-08-02 12:01:03 +0200316 if (!trylock_page(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 __lock_page(page);
318}
Nick Piggindb376482006-09-25 23:31:24 -0700319
320/*
Matthew Wilcox2687a352007-12-06 11:18:49 -0500321 * lock_page_killable is like lock_page but can be interrupted by fatal
322 * signals. It returns 0 if it locked the page and -EINTR if it was
323 * killed while waiting.
324 */
325static inline int lock_page_killable(struct page *page)
326{
327 might_sleep();
Nick Piggin529ae9a2008-08-02 12:01:03 +0200328 if (!trylock_page(page))
Matthew Wilcox2687a352007-12-06 11:18:49 -0500329 return __lock_page_killable(page);
330 return 0;
331}
332
333/*
Nick Piggindb376482006-09-25 23:31:24 -0700334 * lock_page_nosync should only be used if we can't pin the page's inode.
335 * Doesn't play quite so well with block device plugging.
336 */
337static inline void lock_page_nosync(struct page *page)
338{
339 might_sleep();
Nick Piggin529ae9a2008-08-02 12:01:03 +0200340 if (!trylock_page(page))
Nick Piggindb376482006-09-25 23:31:24 -0700341 __lock_page_nosync(page);
342}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
344/*
345 * This is exported only for wait_on_page_locked/wait_on_page_writeback.
346 * Never use this directly!
347 */
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800348extern void wait_on_page_bit(struct page *page, int bit_nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349
350/*
351 * Wait for a page to be unlocked.
352 *
353 * This must be called with the caller "holding" the page,
354 * ie with increased "page->count" so that the page won't
355 * go away during the wait..
356 */
357static inline void wait_on_page_locked(struct page *page)
358{
359 if (PageLocked(page))
360 wait_on_page_bit(page, PG_locked);
361}
362
363/*
364 * Wait for a page to complete writeback
365 */
366static inline void wait_on_page_writeback(struct page *page)
367{
368 if (PageWriteback(page))
369 wait_on_page_bit(page, PG_writeback);
370}
371
372extern void end_page_writeback(struct page *page);
373
374/*
David Howells385e1ca5f2009-04-03 16:42:39 +0100375 * Add an arbitrary waiter to a page's wait queue
376 */
377extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
378
379/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 * Fault a userspace page into pagetables. Return non-zero on a fault.
381 *
382 * This assumes that two userspace pages are always sufficient. That's
383 * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
384 */
385static inline int fault_in_pages_writeable(char __user *uaddr, int size)
386{
387 int ret;
388
Nick Piggin08291422007-10-16 01:24:59 -0700389 if (unlikely(size == 0))
390 return 0;
391
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 /*
393 * Writing zeroes into userspace here is OK, because we know that if
394 * the zero gets there, we'll be overwriting it.
395 */
396 ret = __put_user(0, uaddr);
397 if (ret == 0) {
398 char __user *end = uaddr + size - 1;
399
400 /*
401 * If the page was already mapped, this will get a cache miss
402 * for sure, so try to avoid doing it.
403 */
404 if (((unsigned long)uaddr & PAGE_MASK) !=
405 ((unsigned long)end & PAGE_MASK))
406 ret = __put_user(0, end);
407 }
408 return ret;
409}
410
Nick Piggin08291422007-10-16 01:24:59 -0700411static inline int fault_in_pages_readable(const char __user *uaddr, int size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412{
413 volatile char c;
414 int ret;
415
Nick Piggin08291422007-10-16 01:24:59 -0700416 if (unlikely(size == 0))
417 return 0;
418
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 ret = __get_user(c, uaddr);
420 if (ret == 0) {
421 const char __user *end = uaddr + size - 1;
422
423 if (((unsigned long)uaddr & PAGE_MASK) !=
424 ((unsigned long)end & PAGE_MASK))
Nick Piggin08291422007-10-16 01:24:59 -0700425 ret = __get_user(c, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 }
Nick Piggin08291422007-10-16 01:24:59 -0700427 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428}
429
Nick Piggin529ae9a2008-08-02 12:01:03 +0200430int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
431 pgoff_t index, gfp_t gfp_mask);
432int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
433 pgoff_t index, gfp_t gfp_mask);
434extern void remove_from_page_cache(struct page *page);
435extern void __remove_from_page_cache(struct page *page);
436
437/*
438 * Like add_to_page_cache_locked, but used to add newly allocated pages:
Nick Pigginf45840b2008-10-18 20:26:57 -0700439 * the page is new, so we can just run __set_page_locked() against it.
Nick Piggin529ae9a2008-08-02 12:01:03 +0200440 */
441static inline int add_to_page_cache(struct page *page,
442 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
443{
444 int error;
445
Nick Pigginf45840b2008-10-18 20:26:57 -0700446 __set_page_locked(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +0200447 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
448 if (unlikely(error))
Nick Pigginf45840b2008-10-18 20:26:57 -0700449 __clear_page_locked(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +0200450 return error;
451}
452
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453#endif /* _LINUX_PAGEMAP_H */