blob: 79b36f57c3ba42271b4f09f1d771fb80a835c0ad [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_PAGEMAP_H
2#define _LINUX_PAGEMAP_H
3
4/*
5 * Copyright 1995 Linus Torvalds
6 */
7#include <linux/mm.h>
8#include <linux/fs.h>
9#include <linux/list.h>
10#include <linux/highmem.h>
11#include <linux/compiler.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080012#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/gfp.h>
Guillaume Chazarain3e9f45b2007-05-08 00:23:25 -070014#include <linux/bitops.h>
Nick Piggine2867812008-07-25 19:45:30 -070015#include <linux/hardirq.h> /* for in_interrupt() */
Naoya Horiguchi8edf3442010-05-28 09:29:15 +090016#include <linux/hugetlb_inline.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
18/*
Michal Hocko9c5d7602016-10-11 13:56:04 -070019 * Bits in mapping->flags.
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 */
Lee Schermerhorn9a896c92009-04-02 16:56:45 -070021enum mapping_flags {
Michal Hocko9c5d7602016-10-11 13:56:04 -070022 AS_EIO = 0, /* IO error on async write */
23 AS_ENOSPC = 1, /* ENOSPC on async write */
24 AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */
25 AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */
26 AS_EXITING = 4, /* final truncate in progress */
Huang Ying371a0962016-10-07 16:59:30 -070027 /* writeback related tags are not used */
Michal Hocko9c5d7602016-10-11 13:56:04 -070028 AS_NO_WRITEBACK_TAGS = 5,
Lee Schermerhorn9a896c92009-04-02 16:56:45 -070029};
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Jeff Layton8ed1e462017-07-06 07:02:26 -040031/**
32 * mapping_set_error - record a writeback error in the address_space
33 * @mapping - the mapping in which an error should be set
34 * @error - the error to set in the mapping
35 *
36 * When writeback fails in some way, we must record that error so that
37 * userspace can be informed when fsync and the like are called. We endeavor
38 * to report errors on any file that was open at the time of the error. Some
39 * internal callers also need to know when writeback errors have occurred.
40 *
41 * When a writeback error occurs, most filesystems will want to call
42 * mapping_set_error to record the error in the mapping so that it can be
43 * reported when the application calls fsync(2).
44 */
Guillaume Chazarain3e9f45b2007-05-08 00:23:25 -070045static inline void mapping_set_error(struct address_space *mapping, int error)
46{
Jeff Layton8ed1e462017-07-06 07:02:26 -040047 if (likely(!error))
48 return;
49
50 /* Record in wb_err for checkers using errseq_t based tracking */
51 filemap_set_wb_err(mapping, error);
52
53 /* Record it in flags for now, for legacy callers */
54 if (error == -ENOSPC)
55 set_bit(AS_ENOSPC, &mapping->flags);
56 else
57 set_bit(AS_EIO, &mapping->flags);
Guillaume Chazarain3e9f45b2007-05-08 00:23:25 -070058}
59
Lee Schermerhornba9ddf42008-10-18 20:26:42 -070060static inline void mapping_set_unevictable(struct address_space *mapping)
61{
62 set_bit(AS_UNEVICTABLE, &mapping->flags);
63}
64
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -070065static inline void mapping_clear_unevictable(struct address_space *mapping)
66{
67 clear_bit(AS_UNEVICTABLE, &mapping->flags);
68}
69
Lee Schermerhornba9ddf42008-10-18 20:26:42 -070070static inline int mapping_unevictable(struct address_space *mapping)
71{
Steven Rostedt088e5462011-01-13 15:46:16 -080072 if (mapping)
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -070073 return test_bit(AS_UNEVICTABLE, &mapping->flags);
74 return !!mapping;
Lee Schermerhornba9ddf42008-10-18 20:26:42 -070075}
Lee Schermerhornba9ddf42008-10-18 20:26:42 -070076
Johannes Weiner91b0abe2014-04-03 14:47:49 -070077static inline void mapping_set_exiting(struct address_space *mapping)
78{
79 set_bit(AS_EXITING, &mapping->flags);
80}
81
82static inline int mapping_exiting(struct address_space *mapping)
83{
84 return test_bit(AS_EXITING, &mapping->flags);
85}
86
Huang Ying371a0962016-10-07 16:59:30 -070087static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
88{
89 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
90}
91
92static inline int mapping_use_writeback_tags(struct address_space *mapping)
93{
94 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
95}
96
Al Virodd0fc662005-10-07 07:46:04 +010097static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
Linus Torvalds1da177e2005-04-16 15:20:36 -070098{
Michal Hocko9c5d7602016-10-11 13:56:04 -070099 return mapping->gfp_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100}
101
Michal Hockoc62d2552015-11-06 16:28:49 -0800102/* Restricts the given gfp_mask to what the mapping allows. */
103static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
104 gfp_t gfp_mask)
105{
106 return mapping_gfp_mask(mapping) & gfp_mask;
107}
108
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109/*
110 * This is non-atomic. Only to be used before the mapping is activated.
111 * Probably needs a barrier...
112 */
Al Viro260b2362005-10-21 03:22:44 -0400113static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114{
Michal Hocko9c5d7602016-10-11 13:56:04 -0700115 m->gfp_mask = mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116}
117
Mel Gormanb745bc82014-06-04 16:10:22 -0700118void release_pages(struct page **pages, int nr, bool cold);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
Nick Piggine2867812008-07-25 19:45:30 -0700120/*
121 * speculatively take a reference to a page.
Joonsoo Kim0139aa72016-05-19 17:10:49 -0700122 * If the page is free (_refcount == 0), then _refcount is untouched, and 0
123 * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
Nick Piggine2867812008-07-25 19:45:30 -0700124 *
125 * This function must be called inside the same rcu_read_lock() section as has
126 * been used to lookup the page in the pagecache radix-tree (or page table):
Joonsoo Kim0139aa72016-05-19 17:10:49 -0700127 * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
Nick Piggine2867812008-07-25 19:45:30 -0700128 *
129 * Unless an RCU grace period has passed, the count of all pages coming out
130 * of the allocator must be considered unstable. page_count may return higher
131 * than expected, and put_page must be able to do the right thing when the
132 * page has been finished with, no matter what it is subsequently allocated
133 * for (because put_page is what is used here to drop an invalid speculative
134 * reference).
135 *
136 * This is the interesting part of the lockless pagecache (and lockless
137 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
138 * has the following pattern:
139 * 1. find page in radix tree
140 * 2. conditionally increment refcount
141 * 3. check the page is still in pagecache (if no, goto 1)
142 *
Joonsoo Kim0139aa72016-05-19 17:10:49 -0700143 * Remove-side that cares about stability of _refcount (eg. reclaim) has the
Nick Piggine2867812008-07-25 19:45:30 -0700144 * following (with tree_lock held for write):
145 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
146 * B. remove page from pagecache
147 * C. free the page
148 *
149 * There are 2 critical interleavings that matter:
150 * - 2 runs before A: in this case, A sees elevated refcount and bails out
151 * - A runs before 2: in this case, 2 sees zero refcount and retries;
152 * subsequently, B will complete and 1 will find no page, causing the
153 * lookup to return NULL.
154 *
155 * It is possible that between 1 and 2, the page is removed then the exact same
156 * page is inserted into the same position in pagecache. That's OK: the
157 * old find_get_page using tree_lock could equally have run before or after
158 * such a re-insertion, depending on order that locks are granted.
159 *
160 * Lookups racing against pagecache insertion isn't a big problem: either 1
161 * will find the page or it will not. Likewise, the old find_get_page could run
162 * either before the insertion or afterwards, depending on timing.
163 */
164static inline int page_cache_get_speculative(struct page *page)
165{
Paul E. McKenney8375ad92013-04-29 15:06:13 -0700166#ifdef CONFIG_TINY_RCU
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200167# ifdef CONFIG_PREEMPT_COUNT
Kirill A. Shutemov591a3d72017-03-24 14:13:05 +0300168 VM_BUG_ON(!in_atomic() && !irqs_disabled());
Nick Piggine2867812008-07-25 19:45:30 -0700169# endif
170 /*
171 * Preempt must be disabled here - we rely on rcu_read_lock doing
172 * this for us.
173 *
174 * Pagecache won't be truncated from interrupt context, so if we have
175 * found a page in the radix tree here, we have pinned its refcount by
176 * disabling preempt, and hence no need for the "speculative get" that
177 * SMP requires.
178 */
Sasha Levin309381fea2014-01-23 15:52:54 -0800179 VM_BUG_ON_PAGE(page_count(page) == 0, page);
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700180 page_ref_inc(page);
Nick Piggine2867812008-07-25 19:45:30 -0700181
182#else
183 if (unlikely(!get_page_unless_zero(page))) {
184 /*
185 * Either the page has been freed, or will be freed.
186 * In either case, retry here and the caller should
187 * do the right thing (see comments above).
188 */
189 return 0;
190 }
191#endif
Sasha Levin309381fea2014-01-23 15:52:54 -0800192 VM_BUG_ON_PAGE(PageTail(page), page);
Nick Piggine2867812008-07-25 19:45:30 -0700193
194 return 1;
195}
196
Nick Piggince0ad7f2008-07-30 15:23:13 +1000197/*
198 * Same as above, but add instead of inc (could just be merged)
199 */
200static inline int page_cache_add_speculative(struct page *page, int count)
201{
202 VM_BUG_ON(in_interrupt());
203
Paul E. McKenneyb560d8a2009-08-21 22:08:51 -0700204#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200205# ifdef CONFIG_PREEMPT_COUNT
Kirill A. Shutemov591a3d72017-03-24 14:13:05 +0300206 VM_BUG_ON(!in_atomic() && !irqs_disabled());
Nick Piggince0ad7f2008-07-30 15:23:13 +1000207# endif
Sasha Levin309381fea2014-01-23 15:52:54 -0800208 VM_BUG_ON_PAGE(page_count(page) == 0, page);
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700209 page_ref_add(page, count);
Nick Piggince0ad7f2008-07-30 15:23:13 +1000210
211#else
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700212 if (unlikely(!page_ref_add_unless(page, count, 0)))
Nick Piggince0ad7f2008-07-30 15:23:13 +1000213 return 0;
214#endif
Sasha Levin309381fea2014-01-23 15:52:54 -0800215 VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
Nick Piggince0ad7f2008-07-30 15:23:13 +1000216
217 return 1;
218}
219
Paul Jackson44110fe2006-03-24 03:16:04 -0800220#ifdef CONFIG_NUMA
Nick Piggin2ae88142006-10-28 10:38:23 -0700221extern struct page *__page_cache_alloc(gfp_t gfp);
Paul Jackson44110fe2006-03-24 03:16:04 -0800222#else
Nick Piggin2ae88142006-10-28 10:38:23 -0700223static inline struct page *__page_cache_alloc(gfp_t gfp)
224{
225 return alloc_pages(gfp, 0);
226}
227#endif
228
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229static inline struct page *page_cache_alloc(struct address_space *x)
230{
Nick Piggin2ae88142006-10-28 10:38:23 -0700231 return __page_cache_alloc(mapping_gfp_mask(x));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232}
233
234static inline struct page *page_cache_alloc_cold(struct address_space *x)
235{
Nick Piggin2ae88142006-10-28 10:38:23 -0700236 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237}
238
Michal Hocko8a5c7432016-07-26 15:24:53 -0700239static inline gfp_t readahead_gfp_mask(struct address_space *x)
Wu Fengguang7b1de582011-05-24 17:12:25 -0700240{
Michal Hocko8a5c7432016-07-26 15:24:53 -0700241 return mapping_gfp_mask(x) |
242 __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN;
Wu Fengguang7b1de582011-05-24 17:12:25 -0700243}
244
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245typedef int filler_t(void *, struct page *);
246
Johannes Weinere7b563b2014-04-03 14:47:44 -0700247pgoff_t page_cache_next_hole(struct address_space *mapping,
248 pgoff_t index, unsigned long max_scan);
249pgoff_t page_cache_prev_hole(struct address_space *mapping,
250 pgoff_t index, unsigned long max_scan);
251
Mel Gorman2457aec2014-06-04 16:10:31 -0700252#define FGP_ACCESSED 0x00000001
253#define FGP_LOCK 0x00000002
254#define FGP_CREAT 0x00000004
255#define FGP_WRITE 0x00000008
256#define FGP_NOFS 0x00000010
257#define FGP_NOWAIT 0x00000020
258
259struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
Michal Hocko45f87de2014-12-29 20:30:35 +0100260 int fgp_flags, gfp_t cache_gfp_mask);
Mel Gorman2457aec2014-06-04 16:10:31 -0700261
262/**
263 * find_get_page - find and get a page reference
264 * @mapping: the address_space to search
265 * @offset: the page index
266 *
267 * Looks up the page cache slot at @mapping & @offset. If there is a
268 * page cache page, it is returned with an increased refcount.
269 *
270 * Otherwise, %NULL is returned.
271 */
272static inline struct page *find_get_page(struct address_space *mapping,
273 pgoff_t offset)
274{
Michal Hocko45f87de2014-12-29 20:30:35 +0100275 return pagecache_get_page(mapping, offset, 0, 0);
Mel Gorman2457aec2014-06-04 16:10:31 -0700276}
277
278static inline struct page *find_get_page_flags(struct address_space *mapping,
279 pgoff_t offset, int fgp_flags)
280{
Michal Hocko45f87de2014-12-29 20:30:35 +0100281 return pagecache_get_page(mapping, offset, fgp_flags, 0);
Mel Gorman2457aec2014-06-04 16:10:31 -0700282}
283
284/**
285 * find_lock_page - locate, pin and lock a pagecache page
Mel Gorman2457aec2014-06-04 16:10:31 -0700286 * @mapping: the address_space to search
287 * @offset: the page index
288 *
289 * Looks up the page cache slot at @mapping & @offset. If there is a
290 * page cache page, it is returned locked and with an increased
291 * refcount.
292 *
293 * Otherwise, %NULL is returned.
294 *
295 * find_lock_page() may sleep.
296 */
297static inline struct page *find_lock_page(struct address_space *mapping,
298 pgoff_t offset)
299{
Michal Hocko45f87de2014-12-29 20:30:35 +0100300 return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
Mel Gorman2457aec2014-06-04 16:10:31 -0700301}
302
303/**
304 * find_or_create_page - locate or add a pagecache page
305 * @mapping: the page's address_space
306 * @index: the page's index into the mapping
307 * @gfp_mask: page allocation mode
308 *
309 * Looks up the page cache slot at @mapping & @offset. If there is a
310 * page cache page, it is returned locked and with an increased
311 * refcount.
312 *
313 * If the page is not present, a new page is allocated using @gfp_mask
314 * and added to the page cache and the VM's LRU list. The page is
315 * returned locked and with an increased refcount.
316 *
317 * On memory exhaustion, %NULL is returned.
318 *
319 * find_or_create_page() may sleep, even if @gfp_flags specifies an
320 * atomic allocation!
321 */
322static inline struct page *find_or_create_page(struct address_space *mapping,
323 pgoff_t offset, gfp_t gfp_mask)
324{
325 return pagecache_get_page(mapping, offset,
326 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
Michal Hocko45f87de2014-12-29 20:30:35 +0100327 gfp_mask);
Mel Gorman2457aec2014-06-04 16:10:31 -0700328}
329
330/**
331 * grab_cache_page_nowait - returns locked page at given index in given cache
332 * @mapping: target address_space
333 * @index: the page index
334 *
335 * Same as grab_cache_page(), but do not wait if the page is unavailable.
336 * This is intended for speculative data generators, where the data can
337 * be regenerated if the page couldn't be grabbed. This routine should
338 * be safe to call while holding the lock for another page.
339 *
340 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
341 * and deadlock against the caller's locked page.
342 */
343static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
344 pgoff_t index)
345{
346 return pagecache_get_page(mapping, index,
347 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
Michal Hocko45f87de2014-12-29 20:30:35 +0100348 mapping_gfp_mask(mapping));
Mel Gorman2457aec2014-06-04 16:10:31 -0700349}
350
Johannes Weiner0cd61442014-04-03 14:47:46 -0700351struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
Johannes Weiner0cd61442014-04-03 14:47:46 -0700352struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
Johannes Weiner0cd61442014-04-03 14:47:46 -0700353unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
354 unsigned int nr_entries, struct page **entries,
355 pgoff_t *indices);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
357 unsigned int nr_pages, struct page **pages);
Jens Axboeebf43502006-04-27 08:46:01 +0200358unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
359 unsigned int nr_pages, struct page **pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
361 int tag, unsigned int nr_pages, struct page **pages);
Ross Zwisler7e7f7742016-01-22 15:10:44 -0800362unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
363 int tag, unsigned int nr_entries,
364 struct page **entries, pgoff_t *indices);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365
Nick Piggin54566b22009-01-04 12:00:53 -0800366struct page *grab_cache_page_write_begin(struct address_space *mapping,
367 pgoff_t index, unsigned flags);
Nick Pigginafddba42007-10-16 01:25:01 -0700368
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369/*
370 * Returns locked page at given index in given cache, creating it if needed.
371 */
Fengguang Wu57f6b962007-10-16 01:24:37 -0700372static inline struct page *grab_cache_page(struct address_space *mapping,
373 pgoff_t index)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374{
375 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
376}
377
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378extern struct page * read_cache_page(struct address_space *mapping,
Hugh Dickins5e5358e2011-07-25 17:12:23 -0700379 pgoff_t index, filler_t *filler, void *data);
Linus Torvalds0531b2a2010-01-27 09:20:03 -0800380extern struct page * read_cache_page_gfp(struct address_space *mapping,
381 pgoff_t index, gfp_t gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382extern int read_cache_pages(struct address_space *mapping,
383 struct list_head *pages, filler_t *filler, void *data);
384
Pekka Enberg090d2b12006-06-23 02:05:08 -0700385static inline struct page *read_mapping_page(struct address_space *mapping,
Hugh Dickins5e5358e2011-07-25 17:12:23 -0700386 pgoff_t index, void *data)
Pekka Enberg090d2b12006-06-23 02:05:08 -0700387{
388 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
389 return read_cache_page(mapping, index, filler, data);
390}
391
Nick Piggine2867812008-07-25 19:45:30 -0700392/*
Kirill A. Shutemov5cbc1982016-11-30 15:54:19 -0800393 * Get index of the page with in radix-tree
394 * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
Naoya Horiguchia0f7a752014-07-23 14:00:01 -0700395 */
Kirill A. Shutemov5cbc1982016-11-30 15:54:19 -0800396static inline pgoff_t page_to_index(struct page *page)
Naoya Horiguchia0f7a752014-07-23 14:00:01 -0700397{
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -0800398 pgoff_t pgoff;
399
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -0800400 if (likely(!PageTransTail(page)))
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300401 return page->index;
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -0800402
403 /*
404 * We don't initialize ->index for tail pages: calculate based on
405 * head page
406 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300407 pgoff = compound_head(page)->index;
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -0800408 pgoff += page - compound_head(page);
409 return pgoff;
Naoya Horiguchia0f7a752014-07-23 14:00:01 -0700410}
411
412/*
Kirill A. Shutemov5cbc1982016-11-30 15:54:19 -0800413 * Get the offset in PAGE_SIZE.
414 * (TODO: hugepage should have ->index in PAGE_SIZE)
415 */
416static inline pgoff_t page_to_pgoff(struct page *page)
417{
418 if (unlikely(PageHeadHuge(page)))
419 return page->index << compound_order(page);
420
421 return page_to_index(page);
422}
423
424/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 * Return byte-offset into filesystem object for page.
426 */
427static inline loff_t page_offset(struct page *page)
428{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300429 return ((loff_t)page->index) << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430}
431
Mel Gormanf981c592012-07-31 16:44:47 -0700432static inline loff_t page_file_offset(struct page *page)
433{
Huang Ying8cd79782016-10-07 17:00:24 -0700434 return ((loff_t)page_index(page)) << PAGE_SHIFT;
Mel Gormanf981c592012-07-31 16:44:47 -0700435}
436
Naoya Horiguchi0fe6e202010-05-28 09:29:16 +0900437extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
438 unsigned long address);
439
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
441 unsigned long address)
442{
Naoya Horiguchi0fe6e202010-05-28 09:29:16 +0900443 pgoff_t pgoff;
444 if (unlikely(is_vm_hugetlb_page(vma)))
445 return linear_hugepage_index(vma, address);
446 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 pgoff += vma->vm_pgoff;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300448 return pgoff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449}
450
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800451extern void __lock_page(struct page *page);
452extern int __lock_page_killable(struct page *page);
Michel Lespinassed065bd82010-10-26 14:21:57 -0700453extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
454 unsigned int flags);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800455extern void unlock_page(struct page *page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456
Nick Piggin529ae9a2008-08-02 12:01:03 +0200457static inline int trylock_page(struct page *page)
458{
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -0800459 page = compound_head(page);
Nick Piggin8413ac92008-10-18 20:26:59 -0700460 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
Nick Piggin529ae9a2008-08-02 12:01:03 +0200461}
462
Nick Piggindb376482006-09-25 23:31:24 -0700463/*
464 * lock_page may only be called if we have the page's inode pinned.
465 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466static inline void lock_page(struct page *page)
467{
468 might_sleep();
Nick Piggin529ae9a2008-08-02 12:01:03 +0200469 if (!trylock_page(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 __lock_page(page);
471}
Nick Piggindb376482006-09-25 23:31:24 -0700472
473/*
Matthew Wilcox2687a352007-12-06 11:18:49 -0500474 * lock_page_killable is like lock_page but can be interrupted by fatal
475 * signals. It returns 0 if it locked the page and -EINTR if it was
476 * killed while waiting.
477 */
478static inline int lock_page_killable(struct page *page)
479{
480 might_sleep();
Nick Piggin529ae9a2008-08-02 12:01:03 +0200481 if (!trylock_page(page))
Matthew Wilcox2687a352007-12-06 11:18:49 -0500482 return __lock_page_killable(page);
483 return 0;
484}
485
486/*
Michel Lespinassed065bd82010-10-26 14:21:57 -0700487 * lock_page_or_retry - Lock the page, unless this would block and the
488 * caller indicated that it can handle a retry.
Paul Cassella9a95f3c2014-08-06 16:07:24 -0700489 *
490 * Return value and mmap_sem implications depend on flags; see
491 * __lock_page_or_retry().
Michel Lespinassed065bd82010-10-26 14:21:57 -0700492 */
493static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
494 unsigned int flags)
495{
496 might_sleep();
497 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
498}
499
500/*
Nicholas Piggin74d81bf2017-02-22 15:44:41 -0800501 * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
502 * and should not be used directly.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 */
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800504extern void wait_on_page_bit(struct page *page, int bit_nr);
KOSAKI Motohirof62e00c2011-05-24 17:11:29 -0700505extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
NeilBrowna4796e32014-09-24 11:28:32 +1000506
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507/*
508 * Wait for a page to be unlocked.
509 *
510 * This must be called with the caller "holding" the page,
511 * ie with increased "page->count" so that the page won't
512 * go away during the wait..
513 */
514static inline void wait_on_page_locked(struct page *page)
515{
516 if (PageLocked(page))
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -0800517 wait_on_page_bit(compound_head(page), PG_locked);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518}
519
Nicholas Piggin62906022016-12-25 13:00:30 +1000520static inline int wait_on_page_locked_killable(struct page *page)
521{
522 if (!PageLocked(page))
523 return 0;
524 return wait_on_page_bit_killable(compound_head(page), PG_locked);
525}
526
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527/*
528 * Wait for a page to complete writeback
529 */
530static inline void wait_on_page_writeback(struct page *page)
531{
532 if (PageWriteback(page))
533 wait_on_page_bit(page, PG_writeback);
534}
535
536extern void end_page_writeback(struct page *page);
Darrick J. Wong1d1d1a72013-02-21 16:42:51 -0800537void wait_for_stable_page(struct page *page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538
Jens Axboec11f0c02016-08-05 08:11:04 -0600539void page_endio(struct page *page, bool is_write, int err);
Matthew Wilcox57d99842014-06-04 16:07:45 -0700540
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541/*
David Howells385e1ca5f2009-04-03 16:42:39 +0100542 * Add an arbitrary waiter to a page's wait queue
543 */
Ingo Molnarac6424b2017-06-20 12:06:13 +0200544extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
David Howells385e1ca5f2009-04-03 16:42:39 +0100545
546/*
Al Viro4bce9f62016-09-17 18:02:44 -0400547 * Fault everything in given userspace address range in.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 */
549static inline int fault_in_pages_writeable(char __user *uaddr, int size)
550{
Daniel Vetter99237772012-04-14 18:03:10 +0200551 char __user *end = uaddr + size - 1;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200552
553 if (unlikely(size == 0))
Al Viroe23d4152016-09-20 20:07:42 +0100554 return 0;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200555
Al Viroe23d4152016-09-20 20:07:42 +0100556 if (unlikely(uaddr > end))
557 return -EFAULT;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200558 /*
559 * Writing zeroes into userspace here is OK, because we know that if
560 * the zero gets there, we'll be overwriting it.
561 */
Al Viroe23d4152016-09-20 20:07:42 +0100562 do {
563 if (unlikely(__put_user(0, uaddr) != 0))
564 return -EFAULT;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200565 uaddr += PAGE_SIZE;
Al Viroe23d4152016-09-20 20:07:42 +0100566 } while (uaddr <= end);
Daniel Vetterf56f8212012-03-25 19:47:41 +0200567
568 /* Check whether the range spilled into the next page. */
569 if (((unsigned long)uaddr & PAGE_MASK) ==
570 ((unsigned long)end & PAGE_MASK))
Al Viroe23d4152016-09-20 20:07:42 +0100571 return __put_user(0, end);
Daniel Vetterf56f8212012-03-25 19:47:41 +0200572
Al Viroe23d4152016-09-20 20:07:42 +0100573 return 0;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200574}
575
Al Viro4bce9f62016-09-17 18:02:44 -0400576static inline int fault_in_pages_readable(const char __user *uaddr, int size)
Daniel Vetterf56f8212012-03-25 19:47:41 +0200577{
578 volatile char c;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200579 const char __user *end = uaddr + size - 1;
580
581 if (unlikely(size == 0))
Al Viroe23d4152016-09-20 20:07:42 +0100582 return 0;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200583
Al Viroe23d4152016-09-20 20:07:42 +0100584 if (unlikely(uaddr > end))
585 return -EFAULT;
586
587 do {
588 if (unlikely(__get_user(c, uaddr) != 0))
589 return -EFAULT;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200590 uaddr += PAGE_SIZE;
Al Viroe23d4152016-09-20 20:07:42 +0100591 } while (uaddr <= end);
Daniel Vetterf56f8212012-03-25 19:47:41 +0200592
593 /* Check whether the range spilled into the next page. */
594 if (((unsigned long)uaddr & PAGE_MASK) ==
595 ((unsigned long)end & PAGE_MASK)) {
Al Viroe23d4152016-09-20 20:07:42 +0100596 return __get_user(c, end);
Daniel Vetterf56f8212012-03-25 19:47:41 +0200597 }
598
Dave Chinner90b75db2016-09-26 09:57:33 +1000599 (void)c;
Al Viroe23d4152016-09-20 20:07:42 +0100600 return 0;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200601}
602
Nick Piggin529ae9a2008-08-02 12:01:03 +0200603int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
604 pgoff_t index, gfp_t gfp_mask);
605int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
606 pgoff_t index, gfp_t gfp_mask);
Minchan Kim97cecb52011-03-22 16:30:53 -0700607extern void delete_from_page_cache(struct page *page);
Johannes Weiner62cccb82016-03-15 14:57:22 -0700608extern void __delete_from_page_cache(struct page *page, void *shadow);
Miklos Szeredief6a3c62011-03-22 16:30:52 -0700609int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
Nick Piggin529ae9a2008-08-02 12:01:03 +0200610
611/*
612 * Like add_to_page_cache_locked, but used to add newly allocated pages:
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -0800613 * the page is new, so we can just run __SetPageLocked() against it.
Nick Piggin529ae9a2008-08-02 12:01:03 +0200614 */
615static inline int add_to_page_cache(struct page *page,
616 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
617{
618 int error;
619
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -0800620 __SetPageLocked(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +0200621 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
622 if (unlikely(error))
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -0800623 __ClearPageLocked(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +0200624 return error;
625}
626
Fabian Frederickb57c2cb2015-05-24 17:19:41 +0200627static inline unsigned long dir_pages(struct inode *inode)
628{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300629 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
630 PAGE_SHIFT;
Fabian Frederickb57c2cb2015-05-24 17:19:41 +0200631}
632
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633#endif /* _LINUX_PAGEMAP_H */