blob: 7cfad3bbb0cc214d37c6312a86a0a317fde12edd [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_PAGEMAP_H
2#define _LINUX_PAGEMAP_H
3
4/*
5 * Copyright 1995 Linus Torvalds
6 */
7#include <linux/mm.h>
8#include <linux/fs.h>
9#include <linux/list.h>
10#include <linux/highmem.h>
11#include <linux/compiler.h>
12#include <asm/uaccess.h>
13#include <linux/gfp.h>
Guillaume Chazarain3e9f45b2007-05-08 00:23:25 -070014#include <linux/bitops.h>
Nick Piggine2867812008-07-25 19:45:30 -070015#include <linux/hardirq.h> /* for in_interrupt() */
Naoya Horiguchi8edf3442010-05-28 09:29:15 +090016#include <linux/hugetlb_inline.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
18/*
19 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
20 * allocation mode flags.
21 */
Lee Schermerhorn9a896c92009-04-02 16:56:45 -070022enum mapping_flags {
23 AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */
24 AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
25 AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
Lee Schermerhorn9a896c92009-04-02 16:56:45 -070026 AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
Lee Schermerhorn9a896c92009-04-02 16:56:45 -070027};
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Guillaume Chazarain3e9f45b2007-05-08 00:23:25 -070029static inline void mapping_set_error(struct address_space *mapping, int error)
30{
Andrew Morton2185e692008-07-23 21:27:19 -070031 if (unlikely(error)) {
Guillaume Chazarain3e9f45b2007-05-08 00:23:25 -070032 if (error == -ENOSPC)
33 set_bit(AS_ENOSPC, &mapping->flags);
34 else
35 set_bit(AS_EIO, &mapping->flags);
36 }
37}
38
Lee Schermerhornba9ddf42008-10-18 20:26:42 -070039static inline void mapping_set_unevictable(struct address_space *mapping)
40{
41 set_bit(AS_UNEVICTABLE, &mapping->flags);
42}
43
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -070044static inline void mapping_clear_unevictable(struct address_space *mapping)
45{
46 clear_bit(AS_UNEVICTABLE, &mapping->flags);
47}
48
Lee Schermerhornba9ddf42008-10-18 20:26:42 -070049static inline int mapping_unevictable(struct address_space *mapping)
50{
Steven Rostedt088e5462011-01-13 15:46:16 -080051 if (mapping)
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -070052 return test_bit(AS_UNEVICTABLE, &mapping->flags);
53 return !!mapping;
Lee Schermerhornba9ddf42008-10-18 20:26:42 -070054}
Lee Schermerhornba9ddf42008-10-18 20:26:42 -070055
Al Virodd0fc662005-10-07 07:46:04 +010056static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
Linus Torvalds1da177e2005-04-16 15:20:36 -070057{
Al Viro260b2362005-10-21 03:22:44 -040058 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070059}
60
61/*
62 * This is non-atomic. Only to be used before the mapping is activated.
63 * Probably needs a barrier...
64 */
Al Viro260b2362005-10-21 03:22:44 -040065static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -070066{
Al Viro260b2362005-10-21 03:22:44 -040067 m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
68 (__force unsigned long)mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070069}
70
71/*
72 * The page cache can done in larger chunks than
73 * one page, because it allows for more efficient
74 * throughput (it can then be mapped into user
75 * space in smaller chunks for same flexibility).
76 *
77 * Or rather, it _will_ be done in larger chunks.
78 */
79#define PAGE_CACHE_SHIFT PAGE_SHIFT
80#define PAGE_CACHE_SIZE PAGE_SIZE
81#define PAGE_CACHE_MASK PAGE_MASK
82#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
83
84#define page_cache_get(page) get_page(page)
85#define page_cache_release(page) put_page(page)
86void release_pages(struct page **pages, int nr, int cold);
87
Nick Piggine2867812008-07-25 19:45:30 -070088/*
89 * speculatively take a reference to a page.
90 * If the page is free (_count == 0), then _count is untouched, and 0
91 * is returned. Otherwise, _count is incremented by 1 and 1 is returned.
92 *
93 * This function must be called inside the same rcu_read_lock() section as has
94 * been used to lookup the page in the pagecache radix-tree (or page table):
95 * this allows allocators to use a synchronize_rcu() to stabilize _count.
96 *
97 * Unless an RCU grace period has passed, the count of all pages coming out
98 * of the allocator must be considered unstable. page_count may return higher
99 * than expected, and put_page must be able to do the right thing when the
100 * page has been finished with, no matter what it is subsequently allocated
101 * for (because put_page is what is used here to drop an invalid speculative
102 * reference).
103 *
104 * This is the interesting part of the lockless pagecache (and lockless
105 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
106 * has the following pattern:
107 * 1. find page in radix tree
108 * 2. conditionally increment refcount
109 * 3. check the page is still in pagecache (if no, goto 1)
110 *
111 * Remove-side that cares about stability of _count (eg. reclaim) has the
112 * following (with tree_lock held for write):
113 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
114 * B. remove page from pagecache
115 * C. free the page
116 *
117 * There are 2 critical interleavings that matter:
118 * - 2 runs before A: in this case, A sees elevated refcount and bails out
119 * - A runs before 2: in this case, 2 sees zero refcount and retries;
120 * subsequently, B will complete and 1 will find no page, causing the
121 * lookup to return NULL.
122 *
123 * It is possible that between 1 and 2, the page is removed then the exact same
124 * page is inserted into the same position in pagecache. That's OK: the
125 * old find_get_page using tree_lock could equally have run before or after
126 * such a re-insertion, depending on order that locks are granted.
127 *
128 * Lookups racing against pagecache insertion isn't a big problem: either 1
129 * will find the page or it will not. Likewise, the old find_get_page could run
130 * either before the insertion or afterwards, depending on timing.
131 */
132static inline int page_cache_get_speculative(struct page *page)
133{
134 VM_BUG_ON(in_interrupt());
135
Paul E. McKenneyb560d8a2009-08-21 22:08:51 -0700136#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200137# ifdef CONFIG_PREEMPT_COUNT
Nick Piggine2867812008-07-25 19:45:30 -0700138 VM_BUG_ON(!in_atomic());
139# endif
140 /*
141 * Preempt must be disabled here - we rely on rcu_read_lock doing
142 * this for us.
143 *
144 * Pagecache won't be truncated from interrupt context, so if we have
145 * found a page in the radix tree here, we have pinned its refcount by
146 * disabling preempt, and hence no need for the "speculative get" that
147 * SMP requires.
148 */
149 VM_BUG_ON(page_count(page) == 0);
150 atomic_inc(&page->_count);
151
152#else
153 if (unlikely(!get_page_unless_zero(page))) {
154 /*
155 * Either the page has been freed, or will be freed.
156 * In either case, retry here and the caller should
157 * do the right thing (see comments above).
158 */
159 return 0;
160 }
161#endif
162 VM_BUG_ON(PageTail(page));
163
164 return 1;
165}
166
Nick Piggince0ad7f2008-07-30 15:23:13 +1000167/*
168 * Same as above, but add instead of inc (could just be merged)
169 */
170static inline int page_cache_add_speculative(struct page *page, int count)
171{
172 VM_BUG_ON(in_interrupt());
173
Paul E. McKenneyb560d8a2009-08-21 22:08:51 -0700174#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200175# ifdef CONFIG_PREEMPT_COUNT
Nick Piggince0ad7f2008-07-30 15:23:13 +1000176 VM_BUG_ON(!in_atomic());
177# endif
178 VM_BUG_ON(page_count(page) == 0);
179 atomic_add(count, &page->_count);
180
181#else
182 if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
183 return 0;
184#endif
185 VM_BUG_ON(PageCompound(page) && page != compound_head(page));
186
187 return 1;
188}
189
Nick Piggine2867812008-07-25 19:45:30 -0700190static inline int page_freeze_refs(struct page *page, int count)
191{
192 return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
193}
194
195static inline void page_unfreeze_refs(struct page *page, int count)
196{
197 VM_BUG_ON(page_count(page) != 0);
198 VM_BUG_ON(count == 0);
199
200 atomic_set(&page->_count, count);
201}
202
Paul Jackson44110fe2006-03-24 03:16:04 -0800203#ifdef CONFIG_NUMA
Nick Piggin2ae88142006-10-28 10:38:23 -0700204extern struct page *__page_cache_alloc(gfp_t gfp);
Paul Jackson44110fe2006-03-24 03:16:04 -0800205#else
Nick Piggin2ae88142006-10-28 10:38:23 -0700206static inline struct page *__page_cache_alloc(gfp_t gfp)
207{
208 return alloc_pages(gfp, 0);
209}
210#endif
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212static inline struct page *page_cache_alloc(struct address_space *x)
213{
Nick Piggin2ae88142006-10-28 10:38:23 -0700214 return __page_cache_alloc(mapping_gfp_mask(x));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215}
216
217static inline struct page *page_cache_alloc_cold(struct address_space *x)
218{
Nick Piggin2ae88142006-10-28 10:38:23 -0700219 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220}
221
Wu Fengguang7b1de582011-05-24 17:12:25 -0700222static inline struct page *page_cache_alloc_readahead(struct address_space *x)
223{
224 return __page_cache_alloc(mapping_gfp_mask(x) |
225 __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN);
226}
227
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228typedef int filler_t(void *, struct page *);
229
230extern struct page * find_get_page(struct address_space *mapping,
Fengguang Wu57f6b962007-10-16 01:24:37 -0700231 pgoff_t index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232extern struct page * find_lock_page(struct address_space *mapping,
Fengguang Wu57f6b962007-10-16 01:24:37 -0700233 pgoff_t index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234extern struct page * find_or_create_page(struct address_space *mapping,
Fengguang Wu57f6b962007-10-16 01:24:37 -0700235 pgoff_t index, gfp_t gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
237 unsigned int nr_pages, struct page **pages);
Jens Axboeebf43502006-04-27 08:46:01 +0200238unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
239 unsigned int nr_pages, struct page **pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
241 int tag, unsigned int nr_pages, struct page **pages);
242
Nick Piggin54566b22009-01-04 12:00:53 -0800243struct page *grab_cache_page_write_begin(struct address_space *mapping,
244 pgoff_t index, unsigned flags);
Nick Pigginafddba42007-10-16 01:25:01 -0700245
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246/*
247 * Returns locked page at given index in given cache, creating it if needed.
248 */
Fengguang Wu57f6b962007-10-16 01:24:37 -0700249static inline struct page *grab_cache_page(struct address_space *mapping,
250 pgoff_t index)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251{
252 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
253}
254
255extern struct page * grab_cache_page_nowait(struct address_space *mapping,
Fengguang Wu57f6b962007-10-16 01:24:37 -0700256 pgoff_t index);
Nick Piggin6fe69002007-05-06 14:49:04 -0700257extern struct page * read_cache_page_async(struct address_space *mapping,
Hugh Dickins5e5358e2011-07-25 17:12:23 -0700258 pgoff_t index, filler_t *filler, void *data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259extern struct page * read_cache_page(struct address_space *mapping,
Hugh Dickins5e5358e2011-07-25 17:12:23 -0700260 pgoff_t index, filler_t *filler, void *data);
Linus Torvalds0531b2a2010-01-27 09:20:03 -0800261extern struct page * read_cache_page_gfp(struct address_space *mapping,
262 pgoff_t index, gfp_t gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263extern int read_cache_pages(struct address_space *mapping,
264 struct list_head *pages, filler_t *filler, void *data);
265
Nick Piggin6fe69002007-05-06 14:49:04 -0700266static inline struct page *read_mapping_page_async(
Hugh Dickins5e5358e2011-07-25 17:12:23 -0700267 struct address_space *mapping,
268 pgoff_t index, void *data)
Nick Piggin6fe69002007-05-06 14:49:04 -0700269{
270 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
271 return read_cache_page_async(mapping, index, filler, data);
272}
273
Pekka Enberg090d2b12006-06-23 02:05:08 -0700274static inline struct page *read_mapping_page(struct address_space *mapping,
Hugh Dickins5e5358e2011-07-25 17:12:23 -0700275 pgoff_t index, void *data)
Pekka Enberg090d2b12006-06-23 02:05:08 -0700276{
277 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
278 return read_cache_page(mapping, index, filler, data);
279}
280
Nick Piggine2867812008-07-25 19:45:30 -0700281/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 * Return byte-offset into filesystem object for page.
283 */
284static inline loff_t page_offset(struct page *page)
285{
286 return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
287}
288
Naoya Horiguchi0fe6e202010-05-28 09:29:16 +0900289extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
290 unsigned long address);
291
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
293 unsigned long address)
294{
Naoya Horiguchi0fe6e202010-05-28 09:29:16 +0900295 pgoff_t pgoff;
296 if (unlikely(is_vm_hugetlb_page(vma)))
297 return linear_hugepage_index(vma, address);
298 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 pgoff += vma->vm_pgoff;
300 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
301}
302
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800303extern void __lock_page(struct page *page);
304extern int __lock_page_killable(struct page *page);
Michel Lespinassed065bd82010-10-26 14:21:57 -0700305extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
306 unsigned int flags);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800307extern void unlock_page(struct page *page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308
Nick Pigginf45840b2008-10-18 20:26:57 -0700309static inline void __set_page_locked(struct page *page)
Nick Piggin529ae9a2008-08-02 12:01:03 +0200310{
Nick Pigginf45840b2008-10-18 20:26:57 -0700311 __set_bit(PG_locked, &page->flags);
Nick Piggin529ae9a2008-08-02 12:01:03 +0200312}
313
Nick Pigginf45840b2008-10-18 20:26:57 -0700314static inline void __clear_page_locked(struct page *page)
Nick Piggin529ae9a2008-08-02 12:01:03 +0200315{
Nick Pigginf45840b2008-10-18 20:26:57 -0700316 __clear_bit(PG_locked, &page->flags);
Nick Piggin529ae9a2008-08-02 12:01:03 +0200317}
318
319static inline int trylock_page(struct page *page)
320{
Nick Piggin8413ac92008-10-18 20:26:59 -0700321 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
Nick Piggin529ae9a2008-08-02 12:01:03 +0200322}
323
Nick Piggindb376482006-09-25 23:31:24 -0700324/*
325 * lock_page may only be called if we have the page's inode pinned.
326 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327static inline void lock_page(struct page *page)
328{
329 might_sleep();
Nick Piggin529ae9a2008-08-02 12:01:03 +0200330 if (!trylock_page(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 __lock_page(page);
332}
Nick Piggindb376482006-09-25 23:31:24 -0700333
334/*
Matthew Wilcox2687a352007-12-06 11:18:49 -0500335 * lock_page_killable is like lock_page but can be interrupted by fatal
336 * signals. It returns 0 if it locked the page and -EINTR if it was
337 * killed while waiting.
338 */
339static inline int lock_page_killable(struct page *page)
340{
341 might_sleep();
Nick Piggin529ae9a2008-08-02 12:01:03 +0200342 if (!trylock_page(page))
Matthew Wilcox2687a352007-12-06 11:18:49 -0500343 return __lock_page_killable(page);
344 return 0;
345}
346
347/*
Michel Lespinassed065bd82010-10-26 14:21:57 -0700348 * lock_page_or_retry - Lock the page, unless this would block and the
349 * caller indicated that it can handle a retry.
350 */
351static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
352 unsigned int flags)
353{
354 might_sleep();
355 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
356}
357
358/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 * This is exported only for wait_on_page_locked/wait_on_page_writeback.
360 * Never use this directly!
361 */
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800362extern void wait_on_page_bit(struct page *page, int bit_nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363
KOSAKI Motohirof62e00c2011-05-24 17:11:29 -0700364extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
365
366static inline int wait_on_page_locked_killable(struct page *page)
367{
368 if (PageLocked(page))
369 return wait_on_page_bit_killable(page, PG_locked);
370 return 0;
371}
372
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373/*
374 * Wait for a page to be unlocked.
375 *
376 * This must be called with the caller "holding" the page,
377 * ie with increased "page->count" so that the page won't
378 * go away during the wait..
379 */
380static inline void wait_on_page_locked(struct page *page)
381{
382 if (PageLocked(page))
383 wait_on_page_bit(page, PG_locked);
384}
385
386/*
387 * Wait for a page to complete writeback
388 */
389static inline void wait_on_page_writeback(struct page *page)
390{
391 if (PageWriteback(page))
392 wait_on_page_bit(page, PG_writeback);
393}
394
395extern void end_page_writeback(struct page *page);
396
397/*
David Howells385e1ca5f2009-04-03 16:42:39 +0100398 * Add an arbitrary waiter to a page's wait queue
399 */
400extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
401
402/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 * Fault a userspace page into pagetables. Return non-zero on a fault.
404 *
405 * This assumes that two userspace pages are always sufficient. That's
406 * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
407 */
408static inline int fault_in_pages_writeable(char __user *uaddr, int size)
409{
410 int ret;
411
Nick Piggin08291422007-10-16 01:24:59 -0700412 if (unlikely(size == 0))
413 return 0;
414
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 /*
416 * Writing zeroes into userspace here is OK, because we know that if
417 * the zero gets there, we'll be overwriting it.
418 */
419 ret = __put_user(0, uaddr);
420 if (ret == 0) {
421 char __user *end = uaddr + size - 1;
422
423 /*
424 * If the page was already mapped, this will get a cache miss
425 * for sure, so try to avoid doing it.
426 */
427 if (((unsigned long)uaddr & PAGE_MASK) !=
428 ((unsigned long)end & PAGE_MASK))
Daniel Vetterf56f8212012-03-25 19:47:41 +0200429 ret = __put_user(0, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 }
431 return ret;
432}
433
Nick Piggin08291422007-10-16 01:24:59 -0700434static inline int fault_in_pages_readable(const char __user *uaddr, int size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435{
436 volatile char c;
437 int ret;
438
Nick Piggin08291422007-10-16 01:24:59 -0700439 if (unlikely(size == 0))
440 return 0;
441
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 ret = __get_user(c, uaddr);
443 if (ret == 0) {
444 const char __user *end = uaddr + size - 1;
445
446 if (((unsigned long)uaddr & PAGE_MASK) !=
Andi Kleen627295e2010-08-09 17:19:02 -0700447 ((unsigned long)end & PAGE_MASK)) {
Daniel Vetterf56f8212012-03-25 19:47:41 +0200448 ret = __get_user(c, end);
Andi Kleen627295e2010-08-09 17:19:02 -0700449 (void)c;
450 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 }
Nick Piggin08291422007-10-16 01:24:59 -0700452 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453}
454
Daniel Vetterf56f8212012-03-25 19:47:41 +0200455/*
456 * Multipage variants of the above prefault helpers, useful if more than
457 * PAGE_SIZE of data needs to be prefaulted. These are separate from the above
458 * functions (which only handle up to PAGE_SIZE) to avoid clobbering the
459 * filemap.c hotpaths.
460 */
461static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
462{
Paul Gortmakeraf2e8402012-05-29 15:06:14 -0700463 int ret = 0;
Daniel Vetter99237772012-04-14 18:03:10 +0200464 char __user *end = uaddr + size - 1;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200465
466 if (unlikely(size == 0))
Paul Gortmakeraf2e8402012-05-29 15:06:14 -0700467 return ret;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200468
469 /*
470 * Writing zeroes into userspace here is OK, because we know that if
471 * the zero gets there, we'll be overwriting it.
472 */
473 while (uaddr <= end) {
474 ret = __put_user(0, uaddr);
475 if (ret != 0)
476 return ret;
477 uaddr += PAGE_SIZE;
478 }
479
480 /* Check whether the range spilled into the next page. */
481 if (((unsigned long)uaddr & PAGE_MASK) ==
482 ((unsigned long)end & PAGE_MASK))
483 ret = __put_user(0, end);
484
485 return ret;
486}
487
488static inline int fault_in_multipages_readable(const char __user *uaddr,
489 int size)
490{
491 volatile char c;
Paul Gortmakeraf2e8402012-05-29 15:06:14 -0700492 int ret = 0;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200493 const char __user *end = uaddr + size - 1;
494
495 if (unlikely(size == 0))
Paul Gortmakeraf2e8402012-05-29 15:06:14 -0700496 return ret;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200497
498 while (uaddr <= end) {
499 ret = __get_user(c, uaddr);
500 if (ret != 0)
501 return ret;
502 uaddr += PAGE_SIZE;
503 }
504
505 /* Check whether the range spilled into the next page. */
506 if (((unsigned long)uaddr & PAGE_MASK) ==
507 ((unsigned long)end & PAGE_MASK)) {
508 ret = __get_user(c, end);
509 (void)c;
510 }
511
512 return ret;
513}
514
Nick Piggin529ae9a2008-08-02 12:01:03 +0200515int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
516 pgoff_t index, gfp_t gfp_mask);
517int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
518 pgoff_t index, gfp_t gfp_mask);
Minchan Kim97cecb52011-03-22 16:30:53 -0700519extern void delete_from_page_cache(struct page *page);
Minchan Kime64a7822011-03-22 16:32:44 -0700520extern void __delete_from_page_cache(struct page *page);
Miklos Szeredief6a3c62011-03-22 16:30:52 -0700521int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
Nick Piggin529ae9a2008-08-02 12:01:03 +0200522
523/*
524 * Like add_to_page_cache_locked, but used to add newly allocated pages:
Nick Pigginf45840b2008-10-18 20:26:57 -0700525 * the page is new, so we can just run __set_page_locked() against it.
Nick Piggin529ae9a2008-08-02 12:01:03 +0200526 */
527static inline int add_to_page_cache(struct page *page,
528 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
529{
530 int error;
531
Nick Pigginf45840b2008-10-18 20:26:57 -0700532 __set_page_locked(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +0200533 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
534 if (unlikely(error))
Nick Pigginf45840b2008-10-18 20:26:57 -0700535 __clear_page_locked(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +0200536 return error;
537}
538
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539#endif /* _LINUX_PAGEMAP_H */