blob: 0dfc605b3c6a2c63a22bf903b816d7a920d3c998 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_PAGEMAP_H
2#define _LINUX_PAGEMAP_H
3
4/*
5 * Copyright 1995 Linus Torvalds
6 */
7#include <linux/mm.h>
8#include <linux/fs.h>
9#include <linux/list.h>
10#include <linux/highmem.h>
11#include <linux/compiler.h>
12#include <asm/uaccess.h>
13#include <linux/gfp.h>
Guillaume Chazarain3e9f45b2007-05-08 00:23:25 -070014#include <linux/bitops.h>
Nick Piggine2867812008-07-25 19:45:30 -070015#include <linux/hardirq.h> /* for in_interrupt() */
Naoya Horiguchi8edf3442010-05-28 09:29:15 +090016#include <linux/hugetlb_inline.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
18/*
Michal Hocko9c5d7602016-10-11 13:56:04 -070019 * Bits in mapping->flags.
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 */
Lee Schermerhorn9a896c92009-04-02 16:56:45 -070021enum mapping_flags {
Michal Hocko9c5d7602016-10-11 13:56:04 -070022 AS_EIO = 0, /* IO error on async write */
23 AS_ENOSPC = 1, /* ENOSPC on async write */
24 AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */
25 AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */
26 AS_EXITING = 4, /* final truncate in progress */
Huang Ying371a0962016-10-07 16:59:30 -070027 /* writeback related tags are not used */
Michal Hocko9c5d7602016-10-11 13:56:04 -070028 AS_NO_WRITEBACK_TAGS = 5,
Lee Schermerhorn9a896c92009-04-02 16:56:45 -070029};
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Guillaume Chazarain3e9f45b2007-05-08 00:23:25 -070031static inline void mapping_set_error(struct address_space *mapping, int error)
32{
Andrew Morton2185e692008-07-23 21:27:19 -070033 if (unlikely(error)) {
Guillaume Chazarain3e9f45b2007-05-08 00:23:25 -070034 if (error == -ENOSPC)
35 set_bit(AS_ENOSPC, &mapping->flags);
36 else
37 set_bit(AS_EIO, &mapping->flags);
38 }
39}
40
Lee Schermerhornba9ddf42008-10-18 20:26:42 -070041static inline void mapping_set_unevictable(struct address_space *mapping)
42{
43 set_bit(AS_UNEVICTABLE, &mapping->flags);
44}
45
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -070046static inline void mapping_clear_unevictable(struct address_space *mapping)
47{
48 clear_bit(AS_UNEVICTABLE, &mapping->flags);
49}
50
Lee Schermerhornba9ddf42008-10-18 20:26:42 -070051static inline int mapping_unevictable(struct address_space *mapping)
52{
Steven Rostedt088e5462011-01-13 15:46:16 -080053 if (mapping)
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -070054 return test_bit(AS_UNEVICTABLE, &mapping->flags);
55 return !!mapping;
Lee Schermerhornba9ddf42008-10-18 20:26:42 -070056}
Lee Schermerhornba9ddf42008-10-18 20:26:42 -070057
Johannes Weiner91b0abe2014-04-03 14:47:49 -070058static inline void mapping_set_exiting(struct address_space *mapping)
59{
60 set_bit(AS_EXITING, &mapping->flags);
61}
62
63static inline int mapping_exiting(struct address_space *mapping)
64{
65 return test_bit(AS_EXITING, &mapping->flags);
66}
67
Huang Ying371a0962016-10-07 16:59:30 -070068static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
69{
70 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
71}
72
73static inline int mapping_use_writeback_tags(struct address_space *mapping)
74{
75 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
76}
77
Al Virodd0fc662005-10-07 07:46:04 +010078static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
Linus Torvalds1da177e2005-04-16 15:20:36 -070079{
Michal Hocko9c5d7602016-10-11 13:56:04 -070080 return mapping->gfp_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070081}
82
Michal Hockoc62d2552015-11-06 16:28:49 -080083/* Restricts the given gfp_mask to what the mapping allows. */
84static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
85 gfp_t gfp_mask)
86{
87 return mapping_gfp_mask(mapping) & gfp_mask;
88}
89
Linus Torvalds1da177e2005-04-16 15:20:36 -070090/*
91 * This is non-atomic. Only to be used before the mapping is activated.
92 * Probably needs a barrier...
93 */
Al Viro260b2362005-10-21 03:22:44 -040094static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095{
Michal Hocko9c5d7602016-10-11 13:56:04 -070096 m->gfp_mask = mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097}
98
Mel Gormanb745bc82014-06-04 16:10:22 -070099void release_pages(struct page **pages, int nr, bool cold);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
Nick Piggine2867812008-07-25 19:45:30 -0700101/*
102 * speculatively take a reference to a page.
Joonsoo Kim0139aa72016-05-19 17:10:49 -0700103 * If the page is free (_refcount == 0), then _refcount is untouched, and 0
104 * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
Nick Piggine2867812008-07-25 19:45:30 -0700105 *
106 * This function must be called inside the same rcu_read_lock() section as has
107 * been used to lookup the page in the pagecache radix-tree (or page table):
Joonsoo Kim0139aa72016-05-19 17:10:49 -0700108 * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
Nick Piggine2867812008-07-25 19:45:30 -0700109 *
110 * Unless an RCU grace period has passed, the count of all pages coming out
111 * of the allocator must be considered unstable. page_count may return higher
112 * than expected, and put_page must be able to do the right thing when the
113 * page has been finished with, no matter what it is subsequently allocated
114 * for (because put_page is what is used here to drop an invalid speculative
115 * reference).
116 *
117 * This is the interesting part of the lockless pagecache (and lockless
118 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
119 * has the following pattern:
120 * 1. find page in radix tree
121 * 2. conditionally increment refcount
122 * 3. check the page is still in pagecache (if no, goto 1)
123 *
Joonsoo Kim0139aa72016-05-19 17:10:49 -0700124 * Remove-side that cares about stability of _refcount (eg. reclaim) has the
Nick Piggine2867812008-07-25 19:45:30 -0700125 * following (with tree_lock held for write):
126 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
127 * B. remove page from pagecache
128 * C. free the page
129 *
130 * There are 2 critical interleavings that matter:
131 * - 2 runs before A: in this case, A sees elevated refcount and bails out
132 * - A runs before 2: in this case, 2 sees zero refcount and retries;
133 * subsequently, B will complete and 1 will find no page, causing the
134 * lookup to return NULL.
135 *
136 * It is possible that between 1 and 2, the page is removed then the exact same
137 * page is inserted into the same position in pagecache. That's OK: the
138 * old find_get_page using tree_lock could equally have run before or after
139 * such a re-insertion, depending on order that locks are granted.
140 *
141 * Lookups racing against pagecache insertion isn't a big problem: either 1
142 * will find the page or it will not. Likewise, the old find_get_page could run
143 * either before the insertion or afterwards, depending on timing.
144 */
145static inline int page_cache_get_speculative(struct page *page)
146{
147 VM_BUG_ON(in_interrupt());
148
Paul E. McKenney8375ad92013-04-29 15:06:13 -0700149#ifdef CONFIG_TINY_RCU
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200150# ifdef CONFIG_PREEMPT_COUNT
Kirill A. Shutemov5bf86f32017-03-24 14:13:05 +0300151 VM_BUG_ON(!in_atomic() && !irqs_disabled());
Nick Piggine2867812008-07-25 19:45:30 -0700152# endif
153 /*
154 * Preempt must be disabled here - we rely on rcu_read_lock doing
155 * this for us.
156 *
157 * Pagecache won't be truncated from interrupt context, so if we have
158 * found a page in the radix tree here, we have pinned its refcount by
159 * disabling preempt, and hence no need for the "speculative get" that
160 * SMP requires.
161 */
Sasha Levin309381fea2014-01-23 15:52:54 -0800162 VM_BUG_ON_PAGE(page_count(page) == 0, page);
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700163 page_ref_inc(page);
Nick Piggine2867812008-07-25 19:45:30 -0700164
165#else
166 if (unlikely(!get_page_unless_zero(page))) {
167 /*
168 * Either the page has been freed, or will be freed.
169 * In either case, retry here and the caller should
170 * do the right thing (see comments above).
171 */
172 return 0;
173 }
174#endif
Sasha Levin309381fea2014-01-23 15:52:54 -0800175 VM_BUG_ON_PAGE(PageTail(page), page);
Nick Piggine2867812008-07-25 19:45:30 -0700176
177 return 1;
178}
179
Nick Piggince0ad7f2008-07-30 15:23:13 +1000180/*
181 * Same as above, but add instead of inc (could just be merged)
182 */
183static inline int page_cache_add_speculative(struct page *page, int count)
184{
185 VM_BUG_ON(in_interrupt());
186
Paul E. McKenneyb560d8a2009-08-21 22:08:51 -0700187#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200188# ifdef CONFIG_PREEMPT_COUNT
Kirill A. Shutemov5bf86f32017-03-24 14:13:05 +0300189 VM_BUG_ON(!in_atomic() && !irqs_disabled());
Nick Piggince0ad7f2008-07-30 15:23:13 +1000190# endif
Sasha Levin309381fea2014-01-23 15:52:54 -0800191 VM_BUG_ON_PAGE(page_count(page) == 0, page);
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700192 page_ref_add(page, count);
Nick Piggince0ad7f2008-07-30 15:23:13 +1000193
194#else
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700195 if (unlikely(!page_ref_add_unless(page, count, 0)))
Nick Piggince0ad7f2008-07-30 15:23:13 +1000196 return 0;
197#endif
Sasha Levin309381fea2014-01-23 15:52:54 -0800198 VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
Nick Piggince0ad7f2008-07-30 15:23:13 +1000199
200 return 1;
201}
202
Paul Jackson44110fe2006-03-24 03:16:04 -0800203#ifdef CONFIG_NUMA
Nick Piggin2ae88142006-10-28 10:38:23 -0700204extern struct page *__page_cache_alloc(gfp_t gfp);
Paul Jackson44110fe2006-03-24 03:16:04 -0800205#else
Nick Piggin2ae88142006-10-28 10:38:23 -0700206static inline struct page *__page_cache_alloc(gfp_t gfp)
207{
208 return alloc_pages(gfp, 0);
209}
210#endif
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212static inline struct page *page_cache_alloc(struct address_space *x)
213{
Nick Piggin2ae88142006-10-28 10:38:23 -0700214 return __page_cache_alloc(mapping_gfp_mask(x));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215}
216
217static inline struct page *page_cache_alloc_cold(struct address_space *x)
218{
Nick Piggin2ae88142006-10-28 10:38:23 -0700219 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220}
221
Michal Hocko8a5c7432016-07-26 15:24:53 -0700222static inline gfp_t readahead_gfp_mask(struct address_space *x)
Wu Fengguang7b1de582011-05-24 17:12:25 -0700223{
Michal Hocko8a5c7432016-07-26 15:24:53 -0700224 return mapping_gfp_mask(x) |
225 __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN;
Wu Fengguang7b1de582011-05-24 17:12:25 -0700226}
227
Sami Tolvanen4fd840d2017-08-16 14:38:13 -0700228typedef int filler_t(struct file *, struct page *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229
Johannes Weinere7b563b2014-04-03 14:47:44 -0700230pgoff_t page_cache_next_hole(struct address_space *mapping,
231 pgoff_t index, unsigned long max_scan);
232pgoff_t page_cache_prev_hole(struct address_space *mapping,
233 pgoff_t index, unsigned long max_scan);
234
Mel Gorman2457aec2014-06-04 16:10:31 -0700235#define FGP_ACCESSED 0x00000001
236#define FGP_LOCK 0x00000002
237#define FGP_CREAT 0x00000004
238#define FGP_WRITE 0x00000008
239#define FGP_NOFS 0x00000010
240#define FGP_NOWAIT 0x00000020
Josef Bacik091c93e2019-01-02 00:21:01 +0900241#define FGP_FOR_MMAP 0x00000040
Mel Gorman2457aec2014-06-04 16:10:31 -0700242
243struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
Michal Hocko45f87de2014-12-29 20:30:35 +0100244 int fgp_flags, gfp_t cache_gfp_mask);
Mel Gorman2457aec2014-06-04 16:10:31 -0700245
246/**
247 * find_get_page - find and get a page reference
248 * @mapping: the address_space to search
249 * @offset: the page index
250 *
251 * Looks up the page cache slot at @mapping & @offset. If there is a
252 * page cache page, it is returned with an increased refcount.
253 *
254 * Otherwise, %NULL is returned.
255 */
256static inline struct page *find_get_page(struct address_space *mapping,
257 pgoff_t offset)
258{
Michal Hocko45f87de2014-12-29 20:30:35 +0100259 return pagecache_get_page(mapping, offset, 0, 0);
Mel Gorman2457aec2014-06-04 16:10:31 -0700260}
261
262static inline struct page *find_get_page_flags(struct address_space *mapping,
263 pgoff_t offset, int fgp_flags)
264{
Michal Hocko45f87de2014-12-29 20:30:35 +0100265 return pagecache_get_page(mapping, offset, fgp_flags, 0);
Mel Gorman2457aec2014-06-04 16:10:31 -0700266}
267
268/**
269 * find_lock_page - locate, pin and lock a pagecache page
270 * pagecache_get_page - find and get a page reference
271 * @mapping: the address_space to search
272 * @offset: the page index
273 *
274 * Looks up the page cache slot at @mapping & @offset. If there is a
275 * page cache page, it is returned locked and with an increased
276 * refcount.
277 *
278 * Otherwise, %NULL is returned.
279 *
280 * find_lock_page() may sleep.
281 */
282static inline struct page *find_lock_page(struct address_space *mapping,
283 pgoff_t offset)
284{
Michal Hocko45f87de2014-12-29 20:30:35 +0100285 return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
Mel Gorman2457aec2014-06-04 16:10:31 -0700286}
287
288/**
289 * find_or_create_page - locate or add a pagecache page
290 * @mapping: the page's address_space
291 * @index: the page's index into the mapping
292 * @gfp_mask: page allocation mode
293 *
294 * Looks up the page cache slot at @mapping & @offset. If there is a
295 * page cache page, it is returned locked and with an increased
296 * refcount.
297 *
298 * If the page is not present, a new page is allocated using @gfp_mask
299 * and added to the page cache and the VM's LRU list. The page is
300 * returned locked and with an increased refcount.
301 *
302 * On memory exhaustion, %NULL is returned.
303 *
304 * find_or_create_page() may sleep, even if @gfp_flags specifies an
305 * atomic allocation!
306 */
307static inline struct page *find_or_create_page(struct address_space *mapping,
308 pgoff_t offset, gfp_t gfp_mask)
309{
310 return pagecache_get_page(mapping, offset,
311 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
Michal Hocko45f87de2014-12-29 20:30:35 +0100312 gfp_mask);
Mel Gorman2457aec2014-06-04 16:10:31 -0700313}
314
315/**
316 * grab_cache_page_nowait - returns locked page at given index in given cache
317 * @mapping: target address_space
318 * @index: the page index
319 *
320 * Same as grab_cache_page(), but do not wait if the page is unavailable.
321 * This is intended for speculative data generators, where the data can
322 * be regenerated if the page couldn't be grabbed. This routine should
323 * be safe to call while holding the lock for another page.
324 *
325 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
326 * and deadlock against the caller's locked page.
327 */
328static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
329 pgoff_t index)
330{
331 return pagecache_get_page(mapping, index,
332 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
Michal Hocko45f87de2014-12-29 20:30:35 +0100333 mapping_gfp_mask(mapping));
Mel Gorman2457aec2014-06-04 16:10:31 -0700334}
335
Johannes Weiner0cd61442014-04-03 14:47:46 -0700336struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
Johannes Weiner0cd61442014-04-03 14:47:46 -0700337struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
Johannes Weiner0cd61442014-04-03 14:47:46 -0700338unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
339 unsigned int nr_entries, struct page **entries,
340 pgoff_t *indices);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
342 unsigned int nr_pages, struct page **pages);
Jens Axboeebf43502006-04-27 08:46:01 +0200343unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
344 unsigned int nr_pages, struct page **pages);
Jan Karafb296a22017-11-15 17:34:33 -0800345unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
346 pgoff_t end, int tag, unsigned int nr_pages,
347 struct page **pages);
348static inline unsigned find_get_pages_tag(struct address_space *mapping,
349 pgoff_t *index, int tag, unsigned int nr_pages,
350 struct page **pages)
351{
352 return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
353 nr_pages, pages);
354}
Ross Zwisler7e7f7742016-01-22 15:10:44 -0800355unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
356 int tag, unsigned int nr_entries,
357 struct page **entries, pgoff_t *indices);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358
Nick Piggin54566b22009-01-04 12:00:53 -0800359struct page *grab_cache_page_write_begin(struct address_space *mapping,
360 pgoff_t index, unsigned flags);
Nick Pigginafddba42007-10-16 01:25:01 -0700361
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362/*
363 * Returns locked page at given index in given cache, creating it if needed.
364 */
Fengguang Wu57f6b962007-10-16 01:24:37 -0700365static inline struct page *grab_cache_page(struct address_space *mapping,
366 pgoff_t index)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367{
368 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
369}
370
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371extern struct page * read_cache_page(struct address_space *mapping,
Hugh Dickins5e5358e2011-07-25 17:12:23 -0700372 pgoff_t index, filler_t *filler, void *data);
Linus Torvalds0531b2a2010-01-27 09:20:03 -0800373extern struct page * read_cache_page_gfp(struct address_space *mapping,
374 pgoff_t index, gfp_t gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375extern int read_cache_pages(struct address_space *mapping,
376 struct list_head *pages, filler_t *filler, void *data);
377
Pekka Enberg090d2b12006-06-23 02:05:08 -0700378static inline struct page *read_mapping_page(struct address_space *mapping,
Hugh Dickins5e5358e2011-07-25 17:12:23 -0700379 pgoff_t index, void *data)
Pekka Enberg090d2b12006-06-23 02:05:08 -0700380{
Sami Tolvanen4fd840d2017-08-16 14:38:13 -0700381 filler_t *filler = mapping->a_ops->readpage;
Pekka Enberg090d2b12006-06-23 02:05:08 -0700382 return read_cache_page(mapping, index, filler, data);
383}
384
Nick Piggine2867812008-07-25 19:45:30 -0700385/*
Kirill A. Shutemov5cbc1982016-11-30 15:54:19 -0800386 * Get index of the page with in radix-tree
387 * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
Naoya Horiguchia0f7a752014-07-23 14:00:01 -0700388 */
Kirill A. Shutemov5cbc1982016-11-30 15:54:19 -0800389static inline pgoff_t page_to_index(struct page *page)
Naoya Horiguchia0f7a752014-07-23 14:00:01 -0700390{
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -0800391 pgoff_t pgoff;
392
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -0800393 if (likely(!PageTransTail(page)))
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300394 return page->index;
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -0800395
396 /*
397 * We don't initialize ->index for tail pages: calculate based on
398 * head page
399 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300400 pgoff = compound_head(page)->index;
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -0800401 pgoff += page - compound_head(page);
402 return pgoff;
Naoya Horiguchia0f7a752014-07-23 14:00:01 -0700403}
404
405/*
Kirill A. Shutemov5cbc1982016-11-30 15:54:19 -0800406 * Get the offset in PAGE_SIZE.
407 * (TODO: hugepage should have ->index in PAGE_SIZE)
408 */
409static inline pgoff_t page_to_pgoff(struct page *page)
410{
411 if (unlikely(PageHeadHuge(page)))
412 return page->index << compound_order(page);
413
414 return page_to_index(page);
415}
416
417/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 * Return byte-offset into filesystem object for page.
419 */
420static inline loff_t page_offset(struct page *page)
421{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300422 return ((loff_t)page->index) << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423}
424
Mel Gormanf981c592012-07-31 16:44:47 -0700425static inline loff_t page_file_offset(struct page *page)
426{
Huang Ying8cd79782016-10-07 17:00:24 -0700427 return ((loff_t)page_index(page)) << PAGE_SHIFT;
Mel Gormanf981c592012-07-31 16:44:47 -0700428}
429
Naoya Horiguchi0fe6e202010-05-28 09:29:16 +0900430extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
431 unsigned long address);
432
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
434 unsigned long address)
435{
Naoya Horiguchi0fe6e202010-05-28 09:29:16 +0900436 pgoff_t pgoff;
437 if (unlikely(is_vm_hugetlb_page(vma)))
438 return linear_hugepage_index(vma, address);
Peter Zijlstra65f79de2018-04-17 16:33:24 +0200439 pgoff = (address - READ_ONCE(vma->vm_start)) >> PAGE_SHIFT;
440 pgoff += READ_ONCE(vma->vm_pgoff);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300441 return pgoff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442}
443
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800444extern void __lock_page(struct page *page);
445extern int __lock_page_killable(struct page *page);
Michel Lespinassed065bd82010-10-26 14:21:57 -0700446extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
447 unsigned int flags);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800448extern void unlock_page(struct page *page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449
Nick Piggin529ae9a2008-08-02 12:01:03 +0200450static inline int trylock_page(struct page *page)
451{
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -0800452 page = compound_head(page);
Nick Piggin8413ac92008-10-18 20:26:59 -0700453 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
Nick Piggin529ae9a2008-08-02 12:01:03 +0200454}
455
Nick Piggindb376482006-09-25 23:31:24 -0700456/*
457 * lock_page may only be called if we have the page's inode pinned.
458 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459static inline void lock_page(struct page *page)
460{
461 might_sleep();
Nick Piggin529ae9a2008-08-02 12:01:03 +0200462 if (!trylock_page(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 __lock_page(page);
464}
Nick Piggindb376482006-09-25 23:31:24 -0700465
466/*
Matthew Wilcox2687a352007-12-06 11:18:49 -0500467 * lock_page_killable is like lock_page but can be interrupted by fatal
468 * signals. It returns 0 if it locked the page and -EINTR if it was
469 * killed while waiting.
470 */
471static inline int lock_page_killable(struct page *page)
472{
473 might_sleep();
Nick Piggin529ae9a2008-08-02 12:01:03 +0200474 if (!trylock_page(page))
Matthew Wilcox2687a352007-12-06 11:18:49 -0500475 return __lock_page_killable(page);
476 return 0;
477}
478
479/*
Michel Lespinassed065bd82010-10-26 14:21:57 -0700480 * lock_page_or_retry - Lock the page, unless this would block and the
481 * caller indicated that it can handle a retry.
Paul Cassella9a95f3c2014-08-06 16:07:24 -0700482 *
483 * Return value and mmap_sem implications depend on flags; see
484 * __lock_page_or_retry().
Michel Lespinassed065bd82010-10-26 14:21:57 -0700485 */
486static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
487 unsigned int flags)
488{
489 might_sleep();
490 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
491}
492
493/*
NeilBrowna4796e32014-09-24 11:28:32 +1000494 * This is exported only for wait_on_page_locked/wait_on_page_writeback,
495 * and for filesystems which need to wait on PG_private.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 */
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800497extern void wait_on_page_bit(struct page *page, int bit_nr);
KOSAKI Motohirof62e00c2011-05-24 17:11:29 -0700498extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
Nicholas Piggin49e19db2016-12-25 13:00:30 +1000499extern void wake_up_page_bit(struct page *page, int bit_nr);
KOSAKI Motohirof62e00c2011-05-24 17:11:29 -0700500
NeilBrowna4796e32014-09-24 11:28:32 +1000501static inline void wake_up_page(struct page *page, int bit)
502{
Nicholas Piggin49e19db2016-12-25 13:00:30 +1000503 if (!PageWaiters(page))
504 return;
505 wake_up_page_bit(page, bit);
NeilBrowna4796e32014-09-24 11:28:32 +1000506}
507
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508/*
509 * Wait for a page to be unlocked.
510 *
511 * This must be called with the caller "holding" the page,
512 * ie with increased "page->count" so that the page won't
513 * go away during the wait..
514 */
515static inline void wait_on_page_locked(struct page *page)
516{
517 if (PageLocked(page))
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -0800518 wait_on_page_bit(compound_head(page), PG_locked);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519}
520
Nicholas Piggin49e19db2016-12-25 13:00:30 +1000521static inline int wait_on_page_locked_killable(struct page *page)
522{
523 if (!PageLocked(page))
524 return 0;
525 return wait_on_page_bit_killable(compound_head(page), PG_locked);
526}
527
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528/*
529 * Wait for a page to complete writeback
530 */
531static inline void wait_on_page_writeback(struct page *page)
532{
533 if (PageWriteback(page))
534 wait_on_page_bit(page, PG_writeback);
535}
536
537extern void end_page_writeback(struct page *page);
Darrick J. Wong1d1d1a72013-02-21 16:42:51 -0800538void wait_for_stable_page(struct page *page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539
Jens Axboec11f0c02016-08-05 08:11:04 -0600540void page_endio(struct page *page, bool is_write, int err);
Matthew Wilcox57d99842014-06-04 16:07:45 -0700541
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542/*
David Howells385e1ca5f2009-04-03 16:42:39 +0100543 * Add an arbitrary waiter to a page's wait queue
544 */
545extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
546
547/*
Al Viro4bce9f62016-09-17 18:02:44 -0400548 * Fault everything in given userspace address range in.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 */
550static inline int fault_in_pages_writeable(char __user *uaddr, int size)
551{
Daniel Vetter99237772012-04-14 18:03:10 +0200552 char __user *end = uaddr + size - 1;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200553
554 if (unlikely(size == 0))
Al Viroe23d4152016-09-20 20:07:42 +0100555 return 0;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200556
Al Viroe23d4152016-09-20 20:07:42 +0100557 if (unlikely(uaddr > end))
558 return -EFAULT;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200559 /*
560 * Writing zeroes into userspace here is OK, because we know that if
561 * the zero gets there, we'll be overwriting it.
562 */
Al Viroe23d4152016-09-20 20:07:42 +0100563 do {
564 if (unlikely(__put_user(0, uaddr) != 0))
565 return -EFAULT;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200566 uaddr += PAGE_SIZE;
Al Viroe23d4152016-09-20 20:07:42 +0100567 } while (uaddr <= end);
Daniel Vetterf56f8212012-03-25 19:47:41 +0200568
569 /* Check whether the range spilled into the next page. */
570 if (((unsigned long)uaddr & PAGE_MASK) ==
571 ((unsigned long)end & PAGE_MASK))
Al Viroe23d4152016-09-20 20:07:42 +0100572 return __put_user(0, end);
Daniel Vetterf56f8212012-03-25 19:47:41 +0200573
Al Viroe23d4152016-09-20 20:07:42 +0100574 return 0;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200575}
576
Al Viro4bce9f62016-09-17 18:02:44 -0400577static inline int fault_in_pages_readable(const char __user *uaddr, int size)
Daniel Vetterf56f8212012-03-25 19:47:41 +0200578{
579 volatile char c;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200580 const char __user *end = uaddr + size - 1;
581
582 if (unlikely(size == 0))
Al Viroe23d4152016-09-20 20:07:42 +0100583 return 0;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200584
Al Viroe23d4152016-09-20 20:07:42 +0100585 if (unlikely(uaddr > end))
586 return -EFAULT;
587
588 do {
589 if (unlikely(__get_user(c, uaddr) != 0))
590 return -EFAULT;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200591 uaddr += PAGE_SIZE;
Al Viroe23d4152016-09-20 20:07:42 +0100592 } while (uaddr <= end);
Daniel Vetterf56f8212012-03-25 19:47:41 +0200593
594 /* Check whether the range spilled into the next page. */
595 if (((unsigned long)uaddr & PAGE_MASK) ==
596 ((unsigned long)end & PAGE_MASK)) {
Al Viroe23d4152016-09-20 20:07:42 +0100597 return __get_user(c, end);
Daniel Vetterf56f8212012-03-25 19:47:41 +0200598 }
599
Dave Chinner90b75db2016-09-26 09:57:33 +1000600 (void)c;
Al Viroe23d4152016-09-20 20:07:42 +0100601 return 0;
Daniel Vetterf56f8212012-03-25 19:47:41 +0200602}
603
Nick Piggin529ae9a2008-08-02 12:01:03 +0200604int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
605 pgoff_t index, gfp_t gfp_mask);
606int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
607 pgoff_t index, gfp_t gfp_mask);
Minchan Kim97cecb52011-03-22 16:30:53 -0700608extern void delete_from_page_cache(struct page *page);
Johannes Weiner62cccb82016-03-15 14:57:22 -0700609extern void __delete_from_page_cache(struct page *page, void *shadow);
Miklos Szeredief6a3c62011-03-22 16:30:52 -0700610int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
Nick Piggin529ae9a2008-08-02 12:01:03 +0200611
612/*
613 * Like add_to_page_cache_locked, but used to add newly allocated pages:
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -0800614 * the page is new, so we can just run __SetPageLocked() against it.
Nick Piggin529ae9a2008-08-02 12:01:03 +0200615 */
616static inline int add_to_page_cache(struct page *page,
617 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
618{
619 int error;
620
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -0800621 __SetPageLocked(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +0200622 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
623 if (unlikely(error))
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -0800624 __ClearPageLocked(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +0200625 return error;
626}
627
Fabian Frederickb57c2cb2015-05-24 17:19:41 +0200628static inline unsigned long dir_pages(struct inode *inode)
629{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300630 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
631 PAGE_SHIFT;
Fabian Frederickb57c2cb2015-05-24 17:19:41 +0200632}
633
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634#endif /* _LINUX_PAGEMAP_H */