blob: 0a97b583ee8d12ae696cd3d33a13ee17c62b518f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_PAGEMAP_H
2#define _LINUX_PAGEMAP_H
3
4/*
5 * Copyright 1995 Linus Torvalds
6 */
7#include <linux/mm.h>
8#include <linux/fs.h>
9#include <linux/list.h>
10#include <linux/highmem.h>
11#include <linux/compiler.h>
12#include <asm/uaccess.h>
13#include <linux/gfp.h>
Guillaume Chazarain3e9f45b2007-05-08 00:23:25 -070014#include <linux/bitops.h>
Nick Piggine2867812008-07-25 19:45:30 -070015#include <linux/hardirq.h> /* for in_interrupt() */
Naoya Horiguchi8edf3442010-05-28 09:29:15 +090016#include <linux/hugetlb_inline.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
18/*
19 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
20 * allocation mode flags.
21 */
Lee Schermerhorn9a896c92009-04-02 16:56:45 -070022enum mapping_flags {
23 AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */
24 AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
25 AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
Lee Schermerhorn9a896c92009-04-02 16:56:45 -070026 AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
Rafael Aquini18468d92012-12-11 16:02:38 -080027 AS_BALLOON_MAP = __GFP_BITS_SHIFT + 4, /* balloon page special map */
Johannes Weiner91b0abe2014-04-03 14:47:49 -070028 AS_EXITING = __GFP_BITS_SHIFT + 5, /* final truncate in progress */
Lee Schermerhorn9a896c92009-04-02 16:56:45 -070029};
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Guillaume Chazarain3e9f45b2007-05-08 00:23:25 -070031static inline void mapping_set_error(struct address_space *mapping, int error)
32{
Andrew Morton2185e692008-07-23 21:27:19 -070033 if (unlikely(error)) {
Guillaume Chazarain3e9f45b2007-05-08 00:23:25 -070034 if (error == -ENOSPC)
35 set_bit(AS_ENOSPC, &mapping->flags);
36 else
37 set_bit(AS_EIO, &mapping->flags);
38 }
39}
40
Lee Schermerhornba9ddf42008-10-18 20:26:42 -070041static inline void mapping_set_unevictable(struct address_space *mapping)
42{
43 set_bit(AS_UNEVICTABLE, &mapping->flags);
44}
45
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -070046static inline void mapping_clear_unevictable(struct address_space *mapping)
47{
48 clear_bit(AS_UNEVICTABLE, &mapping->flags);
49}
50
Lee Schermerhornba9ddf42008-10-18 20:26:42 -070051static inline int mapping_unevictable(struct address_space *mapping)
52{
Steven Rostedt088e5462011-01-13 15:46:16 -080053 if (mapping)
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -070054 return test_bit(AS_UNEVICTABLE, &mapping->flags);
55 return !!mapping;
Lee Schermerhornba9ddf42008-10-18 20:26:42 -070056}
Lee Schermerhornba9ddf42008-10-18 20:26:42 -070057
Rafael Aquini18468d92012-12-11 16:02:38 -080058static inline void mapping_set_balloon(struct address_space *mapping)
59{
60 set_bit(AS_BALLOON_MAP, &mapping->flags);
61}
62
63static inline void mapping_clear_balloon(struct address_space *mapping)
64{
65 clear_bit(AS_BALLOON_MAP, &mapping->flags);
66}
67
68static inline int mapping_balloon(struct address_space *mapping)
69{
70 return mapping && test_bit(AS_BALLOON_MAP, &mapping->flags);
71}
72
Johannes Weiner91b0abe2014-04-03 14:47:49 -070073static inline void mapping_set_exiting(struct address_space *mapping)
74{
75 set_bit(AS_EXITING, &mapping->flags);
76}
77
78static inline int mapping_exiting(struct address_space *mapping)
79{
80 return test_bit(AS_EXITING, &mapping->flags);
81}
82
Al Virodd0fc662005-10-07 07:46:04 +010083static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
Linus Torvalds1da177e2005-04-16 15:20:36 -070084{
Al Viro260b2362005-10-21 03:22:44 -040085 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070086}
87
88/*
89 * This is non-atomic. Only to be used before the mapping is activated.
90 * Probably needs a barrier...
91 */
Al Viro260b2362005-10-21 03:22:44 -040092static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -070093{
Al Viro260b2362005-10-21 03:22:44 -040094 m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
95 (__force unsigned long)mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070096}
97
98/*
99 * The page cache can done in larger chunks than
100 * one page, because it allows for more efficient
101 * throughput (it can then be mapped into user
102 * space in smaller chunks for same flexibility).
103 *
104 * Or rather, it _will_ be done in larger chunks.
105 */
106#define PAGE_CACHE_SHIFT PAGE_SHIFT
107#define PAGE_CACHE_SIZE PAGE_SIZE
108#define PAGE_CACHE_MASK PAGE_MASK
109#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
110
111#define page_cache_get(page) get_page(page)
112#define page_cache_release(page) put_page(page)
Mel Gormanb745bc82014-06-04 16:10:22 -0700113void release_pages(struct page **pages, int nr, bool cold);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
Nick Piggine2867812008-07-25 19:45:30 -0700115/*
116 * speculatively take a reference to a page.
117 * If the page is free (_count == 0), then _count is untouched, and 0
118 * is returned. Otherwise, _count is incremented by 1 and 1 is returned.
119 *
120 * This function must be called inside the same rcu_read_lock() section as has
121 * been used to lookup the page in the pagecache radix-tree (or page table):
122 * this allows allocators to use a synchronize_rcu() to stabilize _count.
123 *
124 * Unless an RCU grace period has passed, the count of all pages coming out
125 * of the allocator must be considered unstable. page_count may return higher
126 * than expected, and put_page must be able to do the right thing when the
127 * page has been finished with, no matter what it is subsequently allocated
128 * for (because put_page is what is used here to drop an invalid speculative
129 * reference).
130 *
131 * This is the interesting part of the lockless pagecache (and lockless
132 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
133 * has the following pattern:
134 * 1. find page in radix tree
135 * 2. conditionally increment refcount
136 * 3. check the page is still in pagecache (if no, goto 1)
137 *
138 * Remove-side that cares about stability of _count (eg. reclaim) has the
139 * following (with tree_lock held for write):
140 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
141 * B. remove page from pagecache
142 * C. free the page
143 *
144 * There are 2 critical interleavings that matter:
145 * - 2 runs before A: in this case, A sees elevated refcount and bails out
146 * - A runs before 2: in this case, 2 sees zero refcount and retries;
147 * subsequently, B will complete and 1 will find no page, causing the
148 * lookup to return NULL.
149 *
150 * It is possible that between 1 and 2, the page is removed then the exact same
151 * page is inserted into the same position in pagecache. That's OK: the
152 * old find_get_page using tree_lock could equally have run before or after
153 * such a re-insertion, depending on order that locks are granted.
154 *
155 * Lookups racing against pagecache insertion isn't a big problem: either 1
156 * will find the page or it will not. Likewise, the old find_get_page could run
157 * either before the insertion or afterwards, depending on timing.
158 */
159static inline int page_cache_get_speculative(struct page *page)
160{
161 VM_BUG_ON(in_interrupt());
162
Paul E. McKenney8375ad92013-04-29 15:06:13 -0700163#ifdef CONFIG_TINY_RCU
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200164# ifdef CONFIG_PREEMPT_COUNT
Nick Piggine2867812008-07-25 19:45:30 -0700165 VM_BUG_ON(!in_atomic());
166# endif
167 /*
168 * Preempt must be disabled here - we rely on rcu_read_lock doing
169 * this for us.
170 *
171 * Pagecache won't be truncated from interrupt context, so if we have
172 * found a page in the radix tree here, we have pinned its refcount by
173 * disabling preempt, and hence no need for the "speculative get" that
174 * SMP requires.
175 */
Sasha Levin309381fea2014-01-23 15:52:54 -0800176 VM_BUG_ON_PAGE(page_count(page) == 0, page);
Nick Piggine2867812008-07-25 19:45:30 -0700177 atomic_inc(&page->_count);
178
179#else
180 if (unlikely(!get_page_unless_zero(page))) {
181 /*
182 * Either the page has been freed, or will be freed.
183 * In either case, retry here and the caller should
184 * do the right thing (see comments above).
185 */
186 return 0;
187 }
188#endif
Sasha Levin309381fea2014-01-23 15:52:54 -0800189 VM_BUG_ON_PAGE(PageTail(page), page);
Nick Piggine2867812008-07-25 19:45:30 -0700190
191 return 1;
192}
193
Nick Piggince0ad7f2008-07-30 15:23:13 +1000194/*
195 * Same as above, but add instead of inc (could just be merged)
196 */
197static inline int page_cache_add_speculative(struct page *page, int count)
198{
199 VM_BUG_ON(in_interrupt());
200
Paul E. McKenneyb560d8a2009-08-21 22:08:51 -0700201#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200202# ifdef CONFIG_PREEMPT_COUNT
Nick Piggince0ad7f2008-07-30 15:23:13 +1000203 VM_BUG_ON(!in_atomic());
204# endif
Sasha Levin309381fea2014-01-23 15:52:54 -0800205 VM_BUG_ON_PAGE(page_count(page) == 0, page);
Nick Piggince0ad7f2008-07-30 15:23:13 +1000206 atomic_add(count, &page->_count);
207
208#else
209 if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
210 return 0;
211#endif
Sasha Levin309381fea2014-01-23 15:52:54 -0800212 VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
Nick Piggince0ad7f2008-07-30 15:23:13 +1000213
214 return 1;
215}
216
Nick Piggine2867812008-07-25 19:45:30 -0700217static inline int page_freeze_refs(struct page *page, int count)
218{
219 return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
220}
221
222static inline void page_unfreeze_refs(struct page *page, int count)
223{
Sasha Levin309381fea2014-01-23 15:52:54 -0800224 VM_BUG_ON_PAGE(page_count(page) != 0, page);
Nick Piggine2867812008-07-25 19:45:30 -0700225 VM_BUG_ON(count == 0);
226
227 atomic_set(&page->_count, count);
228}
229
Paul Jackson44110fe2006-03-24 03:16:04 -0800230#ifdef CONFIG_NUMA
Nick Piggin2ae88142006-10-28 10:38:23 -0700231extern struct page *__page_cache_alloc(gfp_t gfp);
Paul Jackson44110fe2006-03-24 03:16:04 -0800232#else
Nick Piggin2ae88142006-10-28 10:38:23 -0700233static inline struct page *__page_cache_alloc(gfp_t gfp)
234{
235 return alloc_pages(gfp, 0);
236}
237#endif
238
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239static inline struct page *page_cache_alloc(struct address_space *x)
240{
Nick Piggin2ae88142006-10-28 10:38:23 -0700241 return __page_cache_alloc(mapping_gfp_mask(x));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242}
243
244static inline struct page *page_cache_alloc_cold(struct address_space *x)
245{
Nick Piggin2ae88142006-10-28 10:38:23 -0700246 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247}
248
Wu Fengguang7b1de582011-05-24 17:12:25 -0700249static inline struct page *page_cache_alloc_readahead(struct address_space *x)
250{
251 return __page_cache_alloc(mapping_gfp_mask(x) |
252 __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN);
253}
254
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255typedef int filler_t(void *, struct page *);
256
Johannes Weinere7b563b2014-04-03 14:47:44 -0700257pgoff_t page_cache_next_hole(struct address_space *mapping,
258 pgoff_t index, unsigned long max_scan);
259pgoff_t page_cache_prev_hole(struct address_space *mapping,
260 pgoff_t index, unsigned long max_scan);
261
Mel Gorman2457aec2014-06-04 16:10:31 -0700262#define FGP_ACCESSED 0x00000001
263#define FGP_LOCK 0x00000002
264#define FGP_CREAT 0x00000004
265#define FGP_WRITE 0x00000008
266#define FGP_NOFS 0x00000010
267#define FGP_NOWAIT 0x00000020
268
269struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
270 int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask);
271
272/**
273 * find_get_page - find and get a page reference
274 * @mapping: the address_space to search
275 * @offset: the page index
276 *
277 * Looks up the page cache slot at @mapping & @offset. If there is a
278 * page cache page, it is returned with an increased refcount.
279 *
280 * Otherwise, %NULL is returned.
281 */
282static inline struct page *find_get_page(struct address_space *mapping,
283 pgoff_t offset)
284{
285 return pagecache_get_page(mapping, offset, 0, 0, 0);
286}
287
288static inline struct page *find_get_page_flags(struct address_space *mapping,
289 pgoff_t offset, int fgp_flags)
290{
291 return pagecache_get_page(mapping, offset, fgp_flags, 0, 0);
292}
293
294/**
295 * find_lock_page - locate, pin and lock a pagecache page
296 * pagecache_get_page - find and get a page reference
297 * @mapping: the address_space to search
298 * @offset: the page index
299 *
300 * Looks up the page cache slot at @mapping & @offset. If there is a
301 * page cache page, it is returned locked and with an increased
302 * refcount.
303 *
304 * Otherwise, %NULL is returned.
305 *
306 * find_lock_page() may sleep.
307 */
308static inline struct page *find_lock_page(struct address_space *mapping,
309 pgoff_t offset)
310{
311 return pagecache_get_page(mapping, offset, FGP_LOCK, 0, 0);
312}
313
314/**
315 * find_or_create_page - locate or add a pagecache page
316 * @mapping: the page's address_space
317 * @index: the page's index into the mapping
318 * @gfp_mask: page allocation mode
319 *
320 * Looks up the page cache slot at @mapping & @offset. If there is a
321 * page cache page, it is returned locked and with an increased
322 * refcount.
323 *
324 * If the page is not present, a new page is allocated using @gfp_mask
325 * and added to the page cache and the VM's LRU list. The page is
326 * returned locked and with an increased refcount.
327 *
328 * On memory exhaustion, %NULL is returned.
329 *
330 * find_or_create_page() may sleep, even if @gfp_flags specifies an
331 * atomic allocation!
332 */
333static inline struct page *find_or_create_page(struct address_space *mapping,
334 pgoff_t offset, gfp_t gfp_mask)
335{
336 return pagecache_get_page(mapping, offset,
337 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
338 gfp_mask, gfp_mask & GFP_RECLAIM_MASK);
339}
340
341/**
342 * grab_cache_page_nowait - returns locked page at given index in given cache
343 * @mapping: target address_space
344 * @index: the page index
345 *
346 * Same as grab_cache_page(), but do not wait if the page is unavailable.
347 * This is intended for speculative data generators, where the data can
348 * be regenerated if the page couldn't be grabbed. This routine should
349 * be safe to call while holding the lock for another page.
350 *
351 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
352 * and deadlock against the caller's locked page.
353 */
354static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
355 pgoff_t index)
356{
357 return pagecache_get_page(mapping, index,
358 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
359 mapping_gfp_mask(mapping),
360 GFP_NOFS);
361}
362
Johannes Weiner0cd61442014-04-03 14:47:46 -0700363struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
Johannes Weiner0cd61442014-04-03 14:47:46 -0700364struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
Johannes Weiner0cd61442014-04-03 14:47:46 -0700365unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
366 unsigned int nr_entries, struct page **entries,
367 pgoff_t *indices);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
369 unsigned int nr_pages, struct page **pages);
Jens Axboeebf43502006-04-27 08:46:01 +0200370unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
371 unsigned int nr_pages, struct page **pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
373 int tag, unsigned int nr_pages, struct page **pages);
374
Nick Piggin54566b22009-01-04 12:00:53 -0800375struct page *grab_cache_page_write_begin(struct address_space *mapping,
376 pgoff_t index, unsigned flags);
Nick Pigginafddba42007-10-16 01:25:01 -0700377
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378/*
379 * Returns locked page at given index in given cache, creating it if needed.
380 */
Fengguang Wu57f6b962007-10-16 01:24:37 -0700381static inline struct page *grab_cache_page(struct address_space *mapping,
382 pgoff_t index)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383{
384 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
385}
386
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387extern struct page * read_cache_page(struct address_space *mapping,
Hugh Dickins5e5358e2011-07-25 17:12:23 -0700388 pgoff_t index, filler_t *filler, void *data);
Linus Torvalds0531b2a2010-01-27 09:20:03 -0800389extern struct page * read_cache_page_gfp(struct address_space *mapping,
390 pgoff_t index, gfp_t gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391extern int read_cache_pages(struct address_space *mapping,
392 struct list_head *pages, filler_t *filler, void *data);
393
Pekka Enberg090d2b12006-06-23 02:05:08 -0700394static inline struct page *read_mapping_page(struct address_space *mapping,
Hugh Dickins5e5358e2011-07-25 17:12:23 -0700395 pgoff_t index, void *data)
Pekka Enberg090d2b12006-06-23 02:05:08 -0700396{
397 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
398 return read_cache_page(mapping, index, filler, data);
399}
400
Nick Piggine2867812008-07-25 19:45:30 -0700401/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 * Return byte-offset into filesystem object for page.
403 */
404static inline loff_t page_offset(struct page *page)
405{
406 return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
407}
408
Mel Gormanf981c592012-07-31 16:44:47 -0700409static inline loff_t page_file_offset(struct page *page)
410{
411 return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT;
412}
413
Naoya Horiguchi0fe6e202010-05-28 09:29:16 +0900414extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
415 unsigned long address);
416
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
418 unsigned long address)
419{
Naoya Horiguchi0fe6e202010-05-28 09:29:16 +0900420 pgoff_t pgoff;
421 if (unlikely(is_vm_hugetlb_page(vma)))
422 return linear_hugepage_index(vma, address);
423 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 pgoff += vma->vm_pgoff;
425 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
426}
427
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800428extern void __lock_page(struct page *page);
429extern int __lock_page_killable(struct page *page);
Michel Lespinassed065bd82010-10-26 14:21:57 -0700430extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
431 unsigned int flags);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800432extern void unlock_page(struct page *page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433
Nick Pigginf45840b2008-10-18 20:26:57 -0700434static inline void __set_page_locked(struct page *page)
Nick Piggin529ae9a2008-08-02 12:01:03 +0200435{
Nick Pigginf45840b2008-10-18 20:26:57 -0700436 __set_bit(PG_locked, &page->flags);
Nick Piggin529ae9a2008-08-02 12:01:03 +0200437}
438
Nick Pigginf45840b2008-10-18 20:26:57 -0700439static inline void __clear_page_locked(struct page *page)
Nick Piggin529ae9a2008-08-02 12:01:03 +0200440{
Nick Pigginf45840b2008-10-18 20:26:57 -0700441 __clear_bit(PG_locked, &page->flags);
Nick Piggin529ae9a2008-08-02 12:01:03 +0200442}
443
444static inline int trylock_page(struct page *page)
445{
Nick Piggin8413ac92008-10-18 20:26:59 -0700446 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
Nick Piggin529ae9a2008-08-02 12:01:03 +0200447}
448
Nick Piggindb376482006-09-25 23:31:24 -0700449/*
450 * lock_page may only be called if we have the page's inode pinned.
451 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452static inline void lock_page(struct page *page)
453{
454 might_sleep();
Nick Piggin529ae9a2008-08-02 12:01:03 +0200455 if (!trylock_page(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 __lock_page(page);
457}
Nick Piggindb376482006-09-25 23:31:24 -0700458
459/*
Matthew Wilcox2687a352007-12-06 11:18:49 -0500460 * lock_page_killable is like lock_page but can be interrupted by fatal
461 * signals. It returns 0 if it locked the page and -EINTR if it was
462 * killed while waiting.
463 */
464static inline int lock_page_killable(struct page *page)
465{
466 might_sleep();
Nick Piggin529ae9a2008-08-02 12:01:03 +0200467 if (!trylock_page(page))
Matthew Wilcox2687a352007-12-06 11:18:49 -0500468 return __lock_page_killable(page);
469 return 0;
470}
471
472/*
Michel Lespinassed065bd82010-10-26 14:21:57 -0700473 * lock_page_or_retry - Lock the page, unless this would block and the
474 * caller indicated that it can handle a retry.
475 */
476static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
477 unsigned int flags)
478{
479 might_sleep();
480 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
481}
482
483/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 * This is exported only for wait_on_page_locked/wait_on_page_writeback.
485 * Never use this directly!
486 */
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800487extern void wait_on_page_bit(struct page *page, int bit_nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488
KOSAKI Motohirof62e00c2011-05-24 17:11:29 -0700489extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
490
491static inline int wait_on_page_locked_killable(struct page *page)
492{
493 if (PageLocked(page))
494 return wait_on_page_bit_killable(page, PG_locked);
495 return 0;
496}
497
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498/*
499 * Wait for a page to be unlocked.
500 *
501 * This must be called with the caller "holding" the page,
502 * ie with increased "page->count" so that the page won't
503 * go away during the wait..
504 */
505static inline void wait_on_page_locked(struct page *page)
506{
507 if (PageLocked(page))
508 wait_on_page_bit(page, PG_locked);
509}
510
511/*
512 * Wait for a page to complete writeback
513 */
514static inline void wait_on_page_writeback(struct page *page)
515{
516 if (PageWriteback(page))
517 wait_on_page_bit(page, PG_writeback);
518}
519
520extern void end_page_writeback(struct page *page);
Darrick J. Wong1d1d1a72013-02-21 16:42:51 -0800521void wait_for_stable_page(struct page *page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522
Matthew Wilcox57d99842014-06-04 16:07:45 -0700523void page_endio(struct page *page, int rw, int err);
524
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525/*
David Howells385e1ca5f2009-04-03 16:42:39 +0100526 * Add an arbitrary waiter to a page's wait queue
527 */
528extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
529
530/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 * Fault a userspace page into pagetables. Return non-zero on a fault.
532 *
533 * This assumes that two userspace pages are always sufficient. That's
534 * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
535 */
536static inline int fault_in_pages_writeable(char __user *uaddr, int size)
537{
538 int ret;
539
Nick Piggin08291422007-10-16 01:24:59 -0700540 if (unlikely(size == 0))
541 return 0;
542
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 /*
544 * Writing zeroes into userspace here is OK, because we know that if
545 * the zero gets there, we'll be overwriting it.
546 */
547 ret = __put_user(0, uaddr);
548 if (ret == 0) {
549 char __user *end = uaddr + size - 1;
550
551 /*
552 * If the page was already mapped, this will get a cache miss
553 * for sure, so try to avoid doing it.
554 */
555 if (((unsigned long)uaddr & PAGE_MASK) !=
556 ((unsigned long)end & PAGE_MASK))
Daniel Vetterf56f8212012-03-25 19:47:41 +0200557 ret = __put_user(0, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 }
559 return ret;
560}
561
Nick Piggin08291422007-10-16 01:24:59 -0700562static inline int fault_in_pages_readable(const char __user *uaddr, int size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563{
564 volatile char c;
565 int ret;
566
Nick Piggin08291422007-10-16 01:24:59 -0700567 if (unlikely(size == 0))
568 return 0;
569
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 ret = __get_user(c, uaddr);
571 if (ret == 0) {
572 const char __user *end = uaddr + size - 1;
573
574 if (((unsigned long)uaddr & PAGE_MASK) !=
Andi Kleen627295e2010-08-09 17:19:02 -0700575 ((unsigned long)end & PAGE_MASK)) {
Daniel Vetterf56f8212012-03-25 19:47:41 +0200576 ret = __get_user(c, end);
Andi Kleen627295e2010-08-09 17:19:02 -0700577 (void)c;
578 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 }
Nick Piggin08291422007-10-16 01:24:59 -0700580 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581}
582
Daniel Vetterf56f8212012-03-25 19:47:41 +0200583/*
584 * Multipage variants of the above prefault helpers, useful if more than
585 * PAGE_SIZE of data needs to be prefaulted. These are separate from the above
586 * functions (which only handle up to PAGE_SIZE) to avoid clobbering the
587 * filemap.c hotpaths.
588 */
589static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
590{
Paul Gortmakeraf2e8402012-05-29 15:06:14 -0700591 int ret = 0;
Daniel Vetter99237772012-04-14 18:03:10 +0200592 char __user *end = uaddr + size - 1;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200593
594 if (unlikely(size == 0))
Paul Gortmakeraf2e8402012-05-29 15:06:14 -0700595 return ret;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200596
597 /*
598 * Writing zeroes into userspace here is OK, because we know that if
599 * the zero gets there, we'll be overwriting it.
600 */
601 while (uaddr <= end) {
602 ret = __put_user(0, uaddr);
603 if (ret != 0)
604 return ret;
605 uaddr += PAGE_SIZE;
606 }
607
608 /* Check whether the range spilled into the next page. */
609 if (((unsigned long)uaddr & PAGE_MASK) ==
610 ((unsigned long)end & PAGE_MASK))
611 ret = __put_user(0, end);
612
613 return ret;
614}
615
616static inline int fault_in_multipages_readable(const char __user *uaddr,
617 int size)
618{
619 volatile char c;
Paul Gortmakeraf2e8402012-05-29 15:06:14 -0700620 int ret = 0;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200621 const char __user *end = uaddr + size - 1;
622
623 if (unlikely(size == 0))
Paul Gortmakeraf2e8402012-05-29 15:06:14 -0700624 return ret;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200625
626 while (uaddr <= end) {
627 ret = __get_user(c, uaddr);
628 if (ret != 0)
629 return ret;
630 uaddr += PAGE_SIZE;
631 }
632
633 /* Check whether the range spilled into the next page. */
634 if (((unsigned long)uaddr & PAGE_MASK) ==
635 ((unsigned long)end & PAGE_MASK)) {
636 ret = __get_user(c, end);
637 (void)c;
638 }
639
640 return ret;
641}
642
Nick Piggin529ae9a2008-08-02 12:01:03 +0200643int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
644 pgoff_t index, gfp_t gfp_mask);
645int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
646 pgoff_t index, gfp_t gfp_mask);
Minchan Kim97cecb52011-03-22 16:30:53 -0700647extern void delete_from_page_cache(struct page *page);
Johannes Weiner91b0abe2014-04-03 14:47:49 -0700648extern void __delete_from_page_cache(struct page *page, void *shadow);
Miklos Szeredief6a3c62011-03-22 16:30:52 -0700649int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
Nick Piggin529ae9a2008-08-02 12:01:03 +0200650
651/*
652 * Like add_to_page_cache_locked, but used to add newly allocated pages:
Nick Pigginf45840b2008-10-18 20:26:57 -0700653 * the page is new, so we can just run __set_page_locked() against it.
Nick Piggin529ae9a2008-08-02 12:01:03 +0200654 */
655static inline int add_to_page_cache(struct page *page,
656 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
657{
658 int error;
659
Nick Pigginf45840b2008-10-18 20:26:57 -0700660 __set_page_locked(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +0200661 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
662 if (unlikely(error))
Nick Pigginf45840b2008-10-18 20:26:57 -0700663 __clear_page_locked(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +0200664 return error;
665}
666
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667#endif /* _LINUX_PAGEMAP_H */