Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_PAGEMAP_H |
| 2 | #define _LINUX_PAGEMAP_H |
| 3 | |
| 4 | /* |
| 5 | * Copyright 1995 Linus Torvalds |
| 6 | */ |
| 7 | #include <linux/mm.h> |
| 8 | #include <linux/fs.h> |
| 9 | #include <linux/list.h> |
| 10 | #include <linux/highmem.h> |
| 11 | #include <linux/compiler.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 12 | #include <linux/uaccess.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/gfp.h> |
Guillaume Chazarain | 3e9f45b | 2007-05-08 00:23:25 -0700 | [diff] [blame] | 14 | #include <linux/bitops.h> |
Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 15 | #include <linux/hardirq.h> /* for in_interrupt() */ |
Naoya Horiguchi | 8edf344 | 2010-05-28 09:29:15 +0900 | [diff] [blame] | 16 | #include <linux/hugetlb_inline.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | |
| 18 | /* |
Michal Hocko | 9c5d760 | 2016-10-11 13:56:04 -0700 | [diff] [blame] | 19 | * Bits in mapping->flags. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | */ |
Lee Schermerhorn | 9a896c9 | 2009-04-02 16:56:45 -0700 | [diff] [blame] | 21 | enum mapping_flags { |
Michal Hocko | 9c5d760 | 2016-10-11 13:56:04 -0700 | [diff] [blame] | 22 | AS_EIO = 0, /* IO error on async write */ |
| 23 | AS_ENOSPC = 1, /* ENOSPC on async write */ |
| 24 | AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */ |
| 25 | AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */ |
| 26 | AS_EXITING = 4, /* final truncate in progress */ |
Huang Ying | 371a096 | 2016-10-07 16:59:30 -0700 | [diff] [blame] | 27 | /* writeback related tags are not used */ |
Michal Hocko | 9c5d760 | 2016-10-11 13:56:04 -0700 | [diff] [blame] | 28 | AS_NO_WRITEBACK_TAGS = 5, |
Lee Schermerhorn | 9a896c9 | 2009-04-02 16:56:45 -0700 | [diff] [blame] | 29 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | |
Jeff Layton | 8ed1e46 | 2017-07-06 07:02:26 -0400 | [diff] [blame] | 31 | /** |
| 32 | * mapping_set_error - record a writeback error in the address_space |
| 33 | * @mapping - the mapping in which an error should be set |
| 34 | * @error - the error to set in the mapping |
| 35 | * |
| 36 | * When writeback fails in some way, we must record that error so that |
| 37 | * userspace can be informed when fsync and the like are called. We endeavor |
| 38 | * to report errors on any file that was open at the time of the error. Some |
| 39 | * internal callers also need to know when writeback errors have occurred. |
| 40 | * |
| 41 | * When a writeback error occurs, most filesystems will want to call |
| 42 | * mapping_set_error to record the error in the mapping so that it can be |
| 43 | * reported when the application calls fsync(2). |
| 44 | */ |
Guillaume Chazarain | 3e9f45b | 2007-05-08 00:23:25 -0700 | [diff] [blame] | 45 | static inline void mapping_set_error(struct address_space *mapping, int error) |
| 46 | { |
Jeff Layton | 8ed1e46 | 2017-07-06 07:02:26 -0400 | [diff] [blame] | 47 | if (likely(!error)) |
| 48 | return; |
| 49 | |
| 50 | /* Record in wb_err for checkers using errseq_t based tracking */ |
| 51 | filemap_set_wb_err(mapping, error); |
| 52 | |
| 53 | /* Record it in flags for now, for legacy callers */ |
| 54 | if (error == -ENOSPC) |
| 55 | set_bit(AS_ENOSPC, &mapping->flags); |
| 56 | else |
| 57 | set_bit(AS_EIO, &mapping->flags); |
Guillaume Chazarain | 3e9f45b | 2007-05-08 00:23:25 -0700 | [diff] [blame] | 58 | } |
| 59 | |
Lee Schermerhorn | ba9ddf4 | 2008-10-18 20:26:42 -0700 | [diff] [blame] | 60 | static inline void mapping_set_unevictable(struct address_space *mapping) |
| 61 | { |
| 62 | set_bit(AS_UNEVICTABLE, &mapping->flags); |
| 63 | } |
| 64 | |
Lee Schermerhorn | 89e004ea | 2008-10-18 20:26:43 -0700 | [diff] [blame] | 65 | static inline void mapping_clear_unevictable(struct address_space *mapping) |
| 66 | { |
| 67 | clear_bit(AS_UNEVICTABLE, &mapping->flags); |
| 68 | } |
| 69 | |
Lee Schermerhorn | ba9ddf4 | 2008-10-18 20:26:42 -0700 | [diff] [blame] | 70 | static inline int mapping_unevictable(struct address_space *mapping) |
| 71 | { |
Steven Rostedt | 088e546 | 2011-01-13 15:46:16 -0800 | [diff] [blame] | 72 | if (mapping) |
Lee Schermerhorn | 89e004ea | 2008-10-18 20:26:43 -0700 | [diff] [blame] | 73 | return test_bit(AS_UNEVICTABLE, &mapping->flags); |
| 74 | return !!mapping; |
Lee Schermerhorn | ba9ddf4 | 2008-10-18 20:26:42 -0700 | [diff] [blame] | 75 | } |
Lee Schermerhorn | ba9ddf4 | 2008-10-18 20:26:42 -0700 | [diff] [blame] | 76 | |
Johannes Weiner | 91b0abe | 2014-04-03 14:47:49 -0700 | [diff] [blame] | 77 | static inline void mapping_set_exiting(struct address_space *mapping) |
| 78 | { |
| 79 | set_bit(AS_EXITING, &mapping->flags); |
| 80 | } |
| 81 | |
| 82 | static inline int mapping_exiting(struct address_space *mapping) |
| 83 | { |
| 84 | return test_bit(AS_EXITING, &mapping->flags); |
| 85 | } |
| 86 | |
Huang Ying | 371a096 | 2016-10-07 16:59:30 -0700 | [diff] [blame] | 87 | static inline void mapping_set_no_writeback_tags(struct address_space *mapping) |
| 88 | { |
| 89 | set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); |
| 90 | } |
| 91 | |
| 92 | static inline int mapping_use_writeback_tags(struct address_space *mapping) |
| 93 | { |
| 94 | return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); |
| 95 | } |
| 96 | |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 97 | static inline gfp_t mapping_gfp_mask(struct address_space * mapping) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | { |
Michal Hocko | 9c5d760 | 2016-10-11 13:56:04 -0700 | [diff] [blame] | 99 | return mapping->gfp_mask; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | } |
| 101 | |
Michal Hocko | c62d255 | 2015-11-06 16:28:49 -0800 | [diff] [blame] | 102 | /* Restricts the given gfp_mask to what the mapping allows. */ |
| 103 | static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, |
| 104 | gfp_t gfp_mask) |
| 105 | { |
| 106 | return mapping_gfp_mask(mapping) & gfp_mask; |
| 107 | } |
| 108 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | /* |
| 110 | * This is non-atomic. Only to be used before the mapping is activated. |
| 111 | * Probably needs a barrier... |
| 112 | */ |
Al Viro | 260b236 | 2005-10-21 03:22:44 -0400 | [diff] [blame] | 113 | static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | { |
Michal Hocko | 9c5d760 | 2016-10-11 13:56:04 -0700 | [diff] [blame] | 115 | m->gfp_mask = mask; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | } |
| 117 | |
Mel Gorman | b745bc8 | 2014-06-04 16:10:22 -0700 | [diff] [blame] | 118 | void release_pages(struct page **pages, int nr, bool cold); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | |
Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 120 | /* |
| 121 | * speculatively take a reference to a page. |
Joonsoo Kim | 0139aa7 | 2016-05-19 17:10:49 -0700 | [diff] [blame] | 122 | * If the page is free (_refcount == 0), then _refcount is untouched, and 0 |
| 123 | * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned. |
Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 124 | * |
| 125 | * This function must be called inside the same rcu_read_lock() section as has |
| 126 | * been used to lookup the page in the pagecache radix-tree (or page table): |
Joonsoo Kim | 0139aa7 | 2016-05-19 17:10:49 -0700 | [diff] [blame] | 127 | * this allows allocators to use a synchronize_rcu() to stabilize _refcount. |
Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 128 | * |
| 129 | * Unless an RCU grace period has passed, the count of all pages coming out |
| 130 | * of the allocator must be considered unstable. page_count may return higher |
| 131 | * than expected, and put_page must be able to do the right thing when the |
| 132 | * page has been finished with, no matter what it is subsequently allocated |
| 133 | * for (because put_page is what is used here to drop an invalid speculative |
| 134 | * reference). |
| 135 | * |
| 136 | * This is the interesting part of the lockless pagecache (and lockless |
| 137 | * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page) |
| 138 | * has the following pattern: |
| 139 | * 1. find page in radix tree |
| 140 | * 2. conditionally increment refcount |
| 141 | * 3. check the page is still in pagecache (if no, goto 1) |
| 142 | * |
Joonsoo Kim | 0139aa7 | 2016-05-19 17:10:49 -0700 | [diff] [blame] | 143 | * Remove-side that cares about stability of _refcount (eg. reclaim) has the |
Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 144 | * following (with tree_lock held for write): |
| 145 | * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) |
| 146 | * B. remove page from pagecache |
| 147 | * C. free the page |
| 148 | * |
| 149 | * There are 2 critical interleavings that matter: |
| 150 | * - 2 runs before A: in this case, A sees elevated refcount and bails out |
| 151 | * - A runs before 2: in this case, 2 sees zero refcount and retries; |
| 152 | * subsequently, B will complete and 1 will find no page, causing the |
| 153 | * lookup to return NULL. |
| 154 | * |
| 155 | * It is possible that between 1 and 2, the page is removed then the exact same |
| 156 | * page is inserted into the same position in pagecache. That's OK: the |
| 157 | * old find_get_page using tree_lock could equally have run before or after |
| 158 | * such a re-insertion, depending on order that locks are granted. |
| 159 | * |
| 160 | * Lookups racing against pagecache insertion isn't a big problem: either 1 |
| 161 | * will find the page or it will not. Likewise, the old find_get_page could run |
| 162 | * either before the insertion or afterwards, depending on timing. |
| 163 | */ |
| 164 | static inline int page_cache_get_speculative(struct page *page) |
| 165 | { |
| 166 | VM_BUG_ON(in_interrupt()); |
| 167 | |
Paul E. McKenney | 8375ad9 | 2013-04-29 15:06:13 -0700 | [diff] [blame] | 168 | #ifdef CONFIG_TINY_RCU |
Frederic Weisbecker | bdd4e85 | 2011-06-08 01:13:27 +0200 | [diff] [blame] | 169 | # ifdef CONFIG_PREEMPT_COUNT |
Kirill A. Shutemov | 591a3d7 | 2017-03-24 14:13:05 +0300 | [diff] [blame] | 170 | VM_BUG_ON(!in_atomic() && !irqs_disabled()); |
Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 171 | # endif |
| 172 | /* |
| 173 | * Preempt must be disabled here - we rely on rcu_read_lock doing |
| 174 | * this for us. |
| 175 | * |
| 176 | * Pagecache won't be truncated from interrupt context, so if we have |
| 177 | * found a page in the radix tree here, we have pinned its refcount by |
| 178 | * disabling preempt, and hence no need for the "speculative get" that |
| 179 | * SMP requires. |
| 180 | */ |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 181 | VM_BUG_ON_PAGE(page_count(page) == 0, page); |
Joonsoo Kim | fe896d1 | 2016-03-17 14:19:26 -0700 | [diff] [blame] | 182 | page_ref_inc(page); |
Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 183 | |
| 184 | #else |
| 185 | if (unlikely(!get_page_unless_zero(page))) { |
| 186 | /* |
| 187 | * Either the page has been freed, or will be freed. |
| 188 | * In either case, retry here and the caller should |
| 189 | * do the right thing (see comments above). |
| 190 | */ |
| 191 | return 0; |
| 192 | } |
| 193 | #endif |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 194 | VM_BUG_ON_PAGE(PageTail(page), page); |
Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 195 | |
| 196 | return 1; |
| 197 | } |
| 198 | |
Nick Piggin | ce0ad7f | 2008-07-30 15:23:13 +1000 | [diff] [blame] | 199 | /* |
| 200 | * Same as above, but add instead of inc (could just be merged) |
| 201 | */ |
| 202 | static inline int page_cache_add_speculative(struct page *page, int count) |
| 203 | { |
| 204 | VM_BUG_ON(in_interrupt()); |
| 205 | |
Paul E. McKenney | b560d8a | 2009-08-21 22:08:51 -0700 | [diff] [blame] | 206 | #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) |
Frederic Weisbecker | bdd4e85 | 2011-06-08 01:13:27 +0200 | [diff] [blame] | 207 | # ifdef CONFIG_PREEMPT_COUNT |
Kirill A. Shutemov | 591a3d7 | 2017-03-24 14:13:05 +0300 | [diff] [blame] | 208 | VM_BUG_ON(!in_atomic() && !irqs_disabled()); |
Nick Piggin | ce0ad7f | 2008-07-30 15:23:13 +1000 | [diff] [blame] | 209 | # endif |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 210 | VM_BUG_ON_PAGE(page_count(page) == 0, page); |
Joonsoo Kim | fe896d1 | 2016-03-17 14:19:26 -0700 | [diff] [blame] | 211 | page_ref_add(page, count); |
Nick Piggin | ce0ad7f | 2008-07-30 15:23:13 +1000 | [diff] [blame] | 212 | |
| 213 | #else |
Joonsoo Kim | fe896d1 | 2016-03-17 14:19:26 -0700 | [diff] [blame] | 214 | if (unlikely(!page_ref_add_unless(page, count, 0))) |
Nick Piggin | ce0ad7f | 2008-07-30 15:23:13 +1000 | [diff] [blame] | 215 | return 0; |
| 216 | #endif |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 217 | VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page); |
Nick Piggin | ce0ad7f | 2008-07-30 15:23:13 +1000 | [diff] [blame] | 218 | |
| 219 | return 1; |
| 220 | } |
| 221 | |
Paul Jackson | 44110fe | 2006-03-24 03:16:04 -0800 | [diff] [blame] | 222 | #ifdef CONFIG_NUMA |
Nick Piggin | 2ae8814 | 2006-10-28 10:38:23 -0700 | [diff] [blame] | 223 | extern struct page *__page_cache_alloc(gfp_t gfp); |
Paul Jackson | 44110fe | 2006-03-24 03:16:04 -0800 | [diff] [blame] | 224 | #else |
Nick Piggin | 2ae8814 | 2006-10-28 10:38:23 -0700 | [diff] [blame] | 225 | static inline struct page *__page_cache_alloc(gfp_t gfp) |
| 226 | { |
| 227 | return alloc_pages(gfp, 0); |
| 228 | } |
| 229 | #endif |
| 230 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | static inline struct page *page_cache_alloc(struct address_space *x) |
| 232 | { |
Nick Piggin | 2ae8814 | 2006-10-28 10:38:23 -0700 | [diff] [blame] | 233 | return __page_cache_alloc(mapping_gfp_mask(x)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | } |
| 235 | |
| 236 | static inline struct page *page_cache_alloc_cold(struct address_space *x) |
| 237 | { |
Nick Piggin | 2ae8814 | 2006-10-28 10:38:23 -0700 | [diff] [blame] | 238 | return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | } |
| 240 | |
Michal Hocko | 8a5c743 | 2016-07-26 15:24:53 -0700 | [diff] [blame] | 241 | static inline gfp_t readahead_gfp_mask(struct address_space *x) |
Wu Fengguang | 7b1de58 | 2011-05-24 17:12:25 -0700 | [diff] [blame] | 242 | { |
Michal Hocko | 8a5c743 | 2016-07-26 15:24:53 -0700 | [diff] [blame] | 243 | return mapping_gfp_mask(x) | |
| 244 | __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN; |
Wu Fengguang | 7b1de58 | 2011-05-24 17:12:25 -0700 | [diff] [blame] | 245 | } |
| 246 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | typedef int filler_t(void *, struct page *); |
| 248 | |
Johannes Weiner | e7b563b | 2014-04-03 14:47:44 -0700 | [diff] [blame] | 249 | pgoff_t page_cache_next_hole(struct address_space *mapping, |
| 250 | pgoff_t index, unsigned long max_scan); |
| 251 | pgoff_t page_cache_prev_hole(struct address_space *mapping, |
| 252 | pgoff_t index, unsigned long max_scan); |
| 253 | |
Mel Gorman | 2457aec | 2014-06-04 16:10:31 -0700 | [diff] [blame] | 254 | #define FGP_ACCESSED 0x00000001 |
| 255 | #define FGP_LOCK 0x00000002 |
| 256 | #define FGP_CREAT 0x00000004 |
| 257 | #define FGP_WRITE 0x00000008 |
| 258 | #define FGP_NOFS 0x00000010 |
| 259 | #define FGP_NOWAIT 0x00000020 |
| 260 | |
| 261 | struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, |
Michal Hocko | 45f87de | 2014-12-29 20:30:35 +0100 | [diff] [blame] | 262 | int fgp_flags, gfp_t cache_gfp_mask); |
Mel Gorman | 2457aec | 2014-06-04 16:10:31 -0700 | [diff] [blame] | 263 | |
| 264 | /** |
| 265 | * find_get_page - find and get a page reference |
| 266 | * @mapping: the address_space to search |
| 267 | * @offset: the page index |
| 268 | * |
| 269 | * Looks up the page cache slot at @mapping & @offset. If there is a |
| 270 | * page cache page, it is returned with an increased refcount. |
| 271 | * |
| 272 | * Otherwise, %NULL is returned. |
| 273 | */ |
| 274 | static inline struct page *find_get_page(struct address_space *mapping, |
| 275 | pgoff_t offset) |
| 276 | { |
Michal Hocko | 45f87de | 2014-12-29 20:30:35 +0100 | [diff] [blame] | 277 | return pagecache_get_page(mapping, offset, 0, 0); |
Mel Gorman | 2457aec | 2014-06-04 16:10:31 -0700 | [diff] [blame] | 278 | } |
| 279 | |
| 280 | static inline struct page *find_get_page_flags(struct address_space *mapping, |
| 281 | pgoff_t offset, int fgp_flags) |
| 282 | { |
Michal Hocko | 45f87de | 2014-12-29 20:30:35 +0100 | [diff] [blame] | 283 | return pagecache_get_page(mapping, offset, fgp_flags, 0); |
Mel Gorman | 2457aec | 2014-06-04 16:10:31 -0700 | [diff] [blame] | 284 | } |
| 285 | |
| 286 | /** |
| 287 | * find_lock_page - locate, pin and lock a pagecache page |
Mel Gorman | 2457aec | 2014-06-04 16:10:31 -0700 | [diff] [blame] | 288 | * @mapping: the address_space to search |
| 289 | * @offset: the page index |
| 290 | * |
| 291 | * Looks up the page cache slot at @mapping & @offset. If there is a |
| 292 | * page cache page, it is returned locked and with an increased |
| 293 | * refcount. |
| 294 | * |
| 295 | * Otherwise, %NULL is returned. |
| 296 | * |
| 297 | * find_lock_page() may sleep. |
| 298 | */ |
| 299 | static inline struct page *find_lock_page(struct address_space *mapping, |
| 300 | pgoff_t offset) |
| 301 | { |
Michal Hocko | 45f87de | 2014-12-29 20:30:35 +0100 | [diff] [blame] | 302 | return pagecache_get_page(mapping, offset, FGP_LOCK, 0); |
Mel Gorman | 2457aec | 2014-06-04 16:10:31 -0700 | [diff] [blame] | 303 | } |
| 304 | |
| 305 | /** |
| 306 | * find_or_create_page - locate or add a pagecache page |
| 307 | * @mapping: the page's address_space |
| 308 | * @index: the page's index into the mapping |
| 309 | * @gfp_mask: page allocation mode |
| 310 | * |
| 311 | * Looks up the page cache slot at @mapping & @offset. If there is a |
| 312 | * page cache page, it is returned locked and with an increased |
| 313 | * refcount. |
| 314 | * |
| 315 | * If the page is not present, a new page is allocated using @gfp_mask |
| 316 | * and added to the page cache and the VM's LRU list. The page is |
| 317 | * returned locked and with an increased refcount. |
| 318 | * |
| 319 | * On memory exhaustion, %NULL is returned. |
| 320 | * |
| 321 | * find_or_create_page() may sleep, even if @gfp_flags specifies an |
| 322 | * atomic allocation! |
| 323 | */ |
| 324 | static inline struct page *find_or_create_page(struct address_space *mapping, |
| 325 | pgoff_t offset, gfp_t gfp_mask) |
| 326 | { |
| 327 | return pagecache_get_page(mapping, offset, |
| 328 | FGP_LOCK|FGP_ACCESSED|FGP_CREAT, |
Michal Hocko | 45f87de | 2014-12-29 20:30:35 +0100 | [diff] [blame] | 329 | gfp_mask); |
Mel Gorman | 2457aec | 2014-06-04 16:10:31 -0700 | [diff] [blame] | 330 | } |
| 331 | |
| 332 | /** |
| 333 | * grab_cache_page_nowait - returns locked page at given index in given cache |
| 334 | * @mapping: target address_space |
| 335 | * @index: the page index |
| 336 | * |
| 337 | * Same as grab_cache_page(), but do not wait if the page is unavailable. |
| 338 | * This is intended for speculative data generators, where the data can |
| 339 | * be regenerated if the page couldn't be grabbed. This routine should |
| 340 | * be safe to call while holding the lock for another page. |
| 341 | * |
| 342 | * Clear __GFP_FS when allocating the page to avoid recursion into the fs |
| 343 | * and deadlock against the caller's locked page. |
| 344 | */ |
| 345 | static inline struct page *grab_cache_page_nowait(struct address_space *mapping, |
| 346 | pgoff_t index) |
| 347 | { |
| 348 | return pagecache_get_page(mapping, index, |
| 349 | FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, |
Michal Hocko | 45f87de | 2014-12-29 20:30:35 +0100 | [diff] [blame] | 350 | mapping_gfp_mask(mapping)); |
Mel Gorman | 2457aec | 2014-06-04 16:10:31 -0700 | [diff] [blame] | 351 | } |
| 352 | |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 353 | struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 354 | struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset); |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 355 | unsigned find_get_entries(struct address_space *mapping, pgoff_t start, |
| 356 | unsigned int nr_entries, struct page **entries, |
| 357 | pgoff_t *indices); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | unsigned find_get_pages(struct address_space *mapping, pgoff_t start, |
| 359 | unsigned int nr_pages, struct page **pages); |
Jens Axboe | ebf4350 | 2006-04-27 08:46:01 +0200 | [diff] [blame] | 360 | unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, |
| 361 | unsigned int nr_pages, struct page **pages); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 | unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, |
| 363 | int tag, unsigned int nr_pages, struct page **pages); |
Ross Zwisler | 7e7f774 | 2016-01-22 15:10:44 -0800 | [diff] [blame] | 364 | unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start, |
| 365 | int tag, unsigned int nr_entries, |
| 366 | struct page **entries, pgoff_t *indices); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 367 | |
Nick Piggin | 54566b2 | 2009-01-04 12:00:53 -0800 | [diff] [blame] | 368 | struct page *grab_cache_page_write_begin(struct address_space *mapping, |
| 369 | pgoff_t index, unsigned flags); |
Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 370 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 | /* |
| 372 | * Returns locked page at given index in given cache, creating it if needed. |
| 373 | */ |
Fengguang Wu | 57f6b96 | 2007-10-16 01:24:37 -0700 | [diff] [blame] | 374 | static inline struct page *grab_cache_page(struct address_space *mapping, |
| 375 | pgoff_t index) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 376 | { |
| 377 | return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); |
| 378 | } |
| 379 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | extern struct page * read_cache_page(struct address_space *mapping, |
Hugh Dickins | 5e5358e | 2011-07-25 17:12:23 -0700 | [diff] [blame] | 381 | pgoff_t index, filler_t *filler, void *data); |
Linus Torvalds | 0531b2a | 2010-01-27 09:20:03 -0800 | [diff] [blame] | 382 | extern struct page * read_cache_page_gfp(struct address_space *mapping, |
| 383 | pgoff_t index, gfp_t gfp_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 384 | extern int read_cache_pages(struct address_space *mapping, |
| 385 | struct list_head *pages, filler_t *filler, void *data); |
| 386 | |
Pekka Enberg | 090d2b1 | 2006-06-23 02:05:08 -0700 | [diff] [blame] | 387 | static inline struct page *read_mapping_page(struct address_space *mapping, |
Hugh Dickins | 5e5358e | 2011-07-25 17:12:23 -0700 | [diff] [blame] | 388 | pgoff_t index, void *data) |
Pekka Enberg | 090d2b1 | 2006-06-23 02:05:08 -0700 | [diff] [blame] | 389 | { |
| 390 | filler_t *filler = (filler_t *)mapping->a_ops->readpage; |
| 391 | return read_cache_page(mapping, index, filler, data); |
| 392 | } |
| 393 | |
Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 394 | /* |
Kirill A. Shutemov | 5cbc198 | 2016-11-30 15:54:19 -0800 | [diff] [blame] | 395 | * Get index of the page with in radix-tree |
| 396 | * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE) |
Naoya Horiguchi | a0f7a75 | 2014-07-23 14:00:01 -0700 | [diff] [blame] | 397 | */ |
Kirill A. Shutemov | 5cbc198 | 2016-11-30 15:54:19 -0800 | [diff] [blame] | 398 | static inline pgoff_t page_to_index(struct page *page) |
Naoya Horiguchi | a0f7a75 | 2014-07-23 14:00:01 -0700 | [diff] [blame] | 399 | { |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 400 | pgoff_t pgoff; |
| 401 | |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 402 | if (likely(!PageTransTail(page))) |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 403 | return page->index; |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 404 | |
| 405 | /* |
| 406 | * We don't initialize ->index for tail pages: calculate based on |
| 407 | * head page |
| 408 | */ |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 409 | pgoff = compound_head(page)->index; |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 410 | pgoff += page - compound_head(page); |
| 411 | return pgoff; |
Naoya Horiguchi | a0f7a75 | 2014-07-23 14:00:01 -0700 | [diff] [blame] | 412 | } |
| 413 | |
| 414 | /* |
Kirill A. Shutemov | 5cbc198 | 2016-11-30 15:54:19 -0800 | [diff] [blame] | 415 | * Get the offset in PAGE_SIZE. |
| 416 | * (TODO: hugepage should have ->index in PAGE_SIZE) |
| 417 | */ |
| 418 | static inline pgoff_t page_to_pgoff(struct page *page) |
| 419 | { |
| 420 | if (unlikely(PageHeadHuge(page))) |
| 421 | return page->index << compound_order(page); |
| 422 | |
| 423 | return page_to_index(page); |
| 424 | } |
| 425 | |
| 426 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 427 | * Return byte-offset into filesystem object for page. |
| 428 | */ |
| 429 | static inline loff_t page_offset(struct page *page) |
| 430 | { |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 431 | return ((loff_t)page->index) << PAGE_SHIFT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 432 | } |
| 433 | |
Mel Gorman | f981c59 | 2012-07-31 16:44:47 -0700 | [diff] [blame] | 434 | static inline loff_t page_file_offset(struct page *page) |
| 435 | { |
Huang Ying | 8cd7978 | 2016-10-07 17:00:24 -0700 | [diff] [blame] | 436 | return ((loff_t)page_index(page)) << PAGE_SHIFT; |
Mel Gorman | f981c59 | 2012-07-31 16:44:47 -0700 | [diff] [blame] | 437 | } |
| 438 | |
Naoya Horiguchi | 0fe6e20 | 2010-05-28 09:29:16 +0900 | [diff] [blame] | 439 | extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, |
| 440 | unsigned long address); |
| 441 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 442 | static inline pgoff_t linear_page_index(struct vm_area_struct *vma, |
| 443 | unsigned long address) |
| 444 | { |
Naoya Horiguchi | 0fe6e20 | 2010-05-28 09:29:16 +0900 | [diff] [blame] | 445 | pgoff_t pgoff; |
| 446 | if (unlikely(is_vm_hugetlb_page(vma))) |
| 447 | return linear_hugepage_index(vma, address); |
| 448 | pgoff = (address - vma->vm_start) >> PAGE_SHIFT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 449 | pgoff += vma->vm_pgoff; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 450 | return pgoff; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 451 | } |
| 452 | |
Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 453 | extern void __lock_page(struct page *page); |
| 454 | extern int __lock_page_killable(struct page *page); |
Michel Lespinasse | d065bd8 | 2010-10-26 14:21:57 -0700 | [diff] [blame] | 455 | extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, |
| 456 | unsigned int flags); |
Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 457 | extern void unlock_page(struct page *page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 | |
Nick Piggin | 529ae9a | 2008-08-02 12:01:03 +0200 | [diff] [blame] | 459 | static inline int trylock_page(struct page *page) |
| 460 | { |
Kirill A. Shutemov | 48c935a | 2016-01-15 16:51:24 -0800 | [diff] [blame] | 461 | page = compound_head(page); |
Nick Piggin | 8413ac9 | 2008-10-18 20:26:59 -0700 | [diff] [blame] | 462 | return (likely(!test_and_set_bit_lock(PG_locked, &page->flags))); |
Nick Piggin | 529ae9a | 2008-08-02 12:01:03 +0200 | [diff] [blame] | 463 | } |
| 464 | |
Nick Piggin | db37648 | 2006-09-25 23:31:24 -0700 | [diff] [blame] | 465 | /* |
| 466 | * lock_page may only be called if we have the page's inode pinned. |
| 467 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 468 | static inline void lock_page(struct page *page) |
| 469 | { |
| 470 | might_sleep(); |
Nick Piggin | 529ae9a | 2008-08-02 12:01:03 +0200 | [diff] [blame] | 471 | if (!trylock_page(page)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 472 | __lock_page(page); |
| 473 | } |
Nick Piggin | db37648 | 2006-09-25 23:31:24 -0700 | [diff] [blame] | 474 | |
| 475 | /* |
Matthew Wilcox | 2687a35 | 2007-12-06 11:18:49 -0500 | [diff] [blame] | 476 | * lock_page_killable is like lock_page but can be interrupted by fatal |
| 477 | * signals. It returns 0 if it locked the page and -EINTR if it was |
| 478 | * killed while waiting. |
| 479 | */ |
| 480 | static inline int lock_page_killable(struct page *page) |
| 481 | { |
| 482 | might_sleep(); |
Nick Piggin | 529ae9a | 2008-08-02 12:01:03 +0200 | [diff] [blame] | 483 | if (!trylock_page(page)) |
Matthew Wilcox | 2687a35 | 2007-12-06 11:18:49 -0500 | [diff] [blame] | 484 | return __lock_page_killable(page); |
| 485 | return 0; |
| 486 | } |
| 487 | |
| 488 | /* |
Michel Lespinasse | d065bd8 | 2010-10-26 14:21:57 -0700 | [diff] [blame] | 489 | * lock_page_or_retry - Lock the page, unless this would block and the |
| 490 | * caller indicated that it can handle a retry. |
Paul Cassella | 9a95f3c | 2014-08-06 16:07:24 -0700 | [diff] [blame] | 491 | * |
| 492 | * Return value and mmap_sem implications depend on flags; see |
| 493 | * __lock_page_or_retry(). |
Michel Lespinasse | d065bd8 | 2010-10-26 14:21:57 -0700 | [diff] [blame] | 494 | */ |
| 495 | static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, |
| 496 | unsigned int flags) |
| 497 | { |
| 498 | might_sleep(); |
| 499 | return trylock_page(page) || __lock_page_or_retry(page, mm, flags); |
| 500 | } |
| 501 | |
| 502 | /* |
Nicholas Piggin | 74d81bf | 2017-02-22 15:44:41 -0800 | [diff] [blame] | 503 | * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc., |
| 504 | * and should not be used directly. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 505 | */ |
Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 506 | extern void wait_on_page_bit(struct page *page, int bit_nr); |
KOSAKI Motohiro | f62e00c | 2011-05-24 17:11:29 -0700 | [diff] [blame] | 507 | extern int wait_on_page_bit_killable(struct page *page, int bit_nr); |
NeilBrown | a4796e3 | 2014-09-24 11:28:32 +1000 | [diff] [blame] | 508 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 509 | /* |
| 510 | * Wait for a page to be unlocked. |
| 511 | * |
| 512 | * This must be called with the caller "holding" the page, |
| 513 | * ie with increased "page->count" so that the page won't |
| 514 | * go away during the wait.. |
| 515 | */ |
| 516 | static inline void wait_on_page_locked(struct page *page) |
| 517 | { |
| 518 | if (PageLocked(page)) |
Kirill A. Shutemov | 48c935a | 2016-01-15 16:51:24 -0800 | [diff] [blame] | 519 | wait_on_page_bit(compound_head(page), PG_locked); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 520 | } |
| 521 | |
Nicholas Piggin | 6290602 | 2016-12-25 13:00:30 +1000 | [diff] [blame] | 522 | static inline int wait_on_page_locked_killable(struct page *page) |
| 523 | { |
| 524 | if (!PageLocked(page)) |
| 525 | return 0; |
| 526 | return wait_on_page_bit_killable(compound_head(page), PG_locked); |
| 527 | } |
| 528 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 529 | /* |
| 530 | * Wait for a page to complete writeback |
| 531 | */ |
| 532 | static inline void wait_on_page_writeback(struct page *page) |
| 533 | { |
| 534 | if (PageWriteback(page)) |
| 535 | wait_on_page_bit(page, PG_writeback); |
| 536 | } |
| 537 | |
| 538 | extern void end_page_writeback(struct page *page); |
Darrick J. Wong | 1d1d1a7 | 2013-02-21 16:42:51 -0800 | [diff] [blame] | 539 | void wait_for_stable_page(struct page *page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 540 | |
Jens Axboe | c11f0c0 | 2016-08-05 08:11:04 -0600 | [diff] [blame] | 541 | void page_endio(struct page *page, bool is_write, int err); |
Matthew Wilcox | 57d9984 | 2014-06-04 16:07:45 -0700 | [diff] [blame] | 542 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 543 | /* |
David Howells | 385e1ca5f | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 544 | * Add an arbitrary waiter to a page's wait queue |
| 545 | */ |
Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 546 | extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter); |
David Howells | 385e1ca5f | 2009-04-03 16:42:39 +0100 | [diff] [blame] | 547 | |
| 548 | /* |
Al Viro | 4bce9f6 | 2016-09-17 18:02:44 -0400 | [diff] [blame] | 549 | * Fault everything in given userspace address range in. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 550 | */ |
| 551 | static inline int fault_in_pages_writeable(char __user *uaddr, int size) |
| 552 | { |
Daniel Vetter | 9923777 | 2012-04-14 18:03:10 +0200 | [diff] [blame] | 553 | char __user *end = uaddr + size - 1; |
Daniel Vetter | f56f821 | 2012-03-25 19:47:41 +0200 | [diff] [blame] | 554 | |
| 555 | if (unlikely(size == 0)) |
Al Viro | e23d415 | 2016-09-20 20:07:42 +0100 | [diff] [blame] | 556 | return 0; |
Daniel Vetter | f56f821 | 2012-03-25 19:47:41 +0200 | [diff] [blame] | 557 | |
Al Viro | e23d415 | 2016-09-20 20:07:42 +0100 | [diff] [blame] | 558 | if (unlikely(uaddr > end)) |
| 559 | return -EFAULT; |
Daniel Vetter | f56f821 | 2012-03-25 19:47:41 +0200 | [diff] [blame] | 560 | /* |
| 561 | * Writing zeroes into userspace here is OK, because we know that if |
| 562 | * the zero gets there, we'll be overwriting it. |
| 563 | */ |
Al Viro | e23d415 | 2016-09-20 20:07:42 +0100 | [diff] [blame] | 564 | do { |
| 565 | if (unlikely(__put_user(0, uaddr) != 0)) |
| 566 | return -EFAULT; |
Daniel Vetter | f56f821 | 2012-03-25 19:47:41 +0200 | [diff] [blame] | 567 | uaddr += PAGE_SIZE; |
Al Viro | e23d415 | 2016-09-20 20:07:42 +0100 | [diff] [blame] | 568 | } while (uaddr <= end); |
Daniel Vetter | f56f821 | 2012-03-25 19:47:41 +0200 | [diff] [blame] | 569 | |
| 570 | /* Check whether the range spilled into the next page. */ |
| 571 | if (((unsigned long)uaddr & PAGE_MASK) == |
| 572 | ((unsigned long)end & PAGE_MASK)) |
Al Viro | e23d415 | 2016-09-20 20:07:42 +0100 | [diff] [blame] | 573 | return __put_user(0, end); |
Daniel Vetter | f56f821 | 2012-03-25 19:47:41 +0200 | [diff] [blame] | 574 | |
Al Viro | e23d415 | 2016-09-20 20:07:42 +0100 | [diff] [blame] | 575 | return 0; |
Daniel Vetter | f56f821 | 2012-03-25 19:47:41 +0200 | [diff] [blame] | 576 | } |
| 577 | |
Al Viro | 4bce9f6 | 2016-09-17 18:02:44 -0400 | [diff] [blame] | 578 | static inline int fault_in_pages_readable(const char __user *uaddr, int size) |
Daniel Vetter | f56f821 | 2012-03-25 19:47:41 +0200 | [diff] [blame] | 579 | { |
| 580 | volatile char c; |
Daniel Vetter | f56f821 | 2012-03-25 19:47:41 +0200 | [diff] [blame] | 581 | const char __user *end = uaddr + size - 1; |
| 582 | |
| 583 | if (unlikely(size == 0)) |
Al Viro | e23d415 | 2016-09-20 20:07:42 +0100 | [diff] [blame] | 584 | return 0; |
Daniel Vetter | f56f821 | 2012-03-25 19:47:41 +0200 | [diff] [blame] | 585 | |
Al Viro | e23d415 | 2016-09-20 20:07:42 +0100 | [diff] [blame] | 586 | if (unlikely(uaddr > end)) |
| 587 | return -EFAULT; |
| 588 | |
| 589 | do { |
| 590 | if (unlikely(__get_user(c, uaddr) != 0)) |
| 591 | return -EFAULT; |
Daniel Vetter | f56f821 | 2012-03-25 19:47:41 +0200 | [diff] [blame] | 592 | uaddr += PAGE_SIZE; |
Al Viro | e23d415 | 2016-09-20 20:07:42 +0100 | [diff] [blame] | 593 | } while (uaddr <= end); |
Daniel Vetter | f56f821 | 2012-03-25 19:47:41 +0200 | [diff] [blame] | 594 | |
| 595 | /* Check whether the range spilled into the next page. */ |
| 596 | if (((unsigned long)uaddr & PAGE_MASK) == |
| 597 | ((unsigned long)end & PAGE_MASK)) { |
Al Viro | e23d415 | 2016-09-20 20:07:42 +0100 | [diff] [blame] | 598 | return __get_user(c, end); |
Daniel Vetter | f56f821 | 2012-03-25 19:47:41 +0200 | [diff] [blame] | 599 | } |
| 600 | |
Dave Chinner | 90b75db | 2016-09-26 09:57:33 +1000 | [diff] [blame] | 601 | (void)c; |
Al Viro | e23d415 | 2016-09-20 20:07:42 +0100 | [diff] [blame] | 602 | return 0; |
Daniel Vetter | f56f821 | 2012-03-25 19:47:41 +0200 | [diff] [blame] | 603 | } |
| 604 | |
Nick Piggin | 529ae9a | 2008-08-02 12:01:03 +0200 | [diff] [blame] | 605 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, |
| 606 | pgoff_t index, gfp_t gfp_mask); |
| 607 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, |
| 608 | pgoff_t index, gfp_t gfp_mask); |
Minchan Kim | 97cecb5 | 2011-03-22 16:30:53 -0700 | [diff] [blame] | 609 | extern void delete_from_page_cache(struct page *page); |
Johannes Weiner | 62cccb8 | 2016-03-15 14:57:22 -0700 | [diff] [blame] | 610 | extern void __delete_from_page_cache(struct page *page, void *shadow); |
Miklos Szeredi | ef6a3c6 | 2011-03-22 16:30:52 -0700 | [diff] [blame] | 611 | int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); |
Nick Piggin | 529ae9a | 2008-08-02 12:01:03 +0200 | [diff] [blame] | 612 | |
| 613 | /* |
| 614 | * Like add_to_page_cache_locked, but used to add newly allocated pages: |
Kirill A. Shutemov | 48c935a | 2016-01-15 16:51:24 -0800 | [diff] [blame] | 615 | * the page is new, so we can just run __SetPageLocked() against it. |
Nick Piggin | 529ae9a | 2008-08-02 12:01:03 +0200 | [diff] [blame] | 616 | */ |
| 617 | static inline int add_to_page_cache(struct page *page, |
| 618 | struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) |
| 619 | { |
| 620 | int error; |
| 621 | |
Kirill A. Shutemov | 48c935a | 2016-01-15 16:51:24 -0800 | [diff] [blame] | 622 | __SetPageLocked(page); |
Nick Piggin | 529ae9a | 2008-08-02 12:01:03 +0200 | [diff] [blame] | 623 | error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); |
| 624 | if (unlikely(error)) |
Kirill A. Shutemov | 48c935a | 2016-01-15 16:51:24 -0800 | [diff] [blame] | 625 | __ClearPageLocked(page); |
Nick Piggin | 529ae9a | 2008-08-02 12:01:03 +0200 | [diff] [blame] | 626 | return error; |
| 627 | } |
| 628 | |
Fabian Frederick | b57c2cb | 2015-05-24 17:19:41 +0200 | [diff] [blame] | 629 | static inline unsigned long dir_pages(struct inode *inode) |
| 630 | { |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 631 | return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> |
| 632 | PAGE_SHIFT; |
Fabian Frederick | b57c2cb | 2015-05-24 17:19:41 +0200 | [diff] [blame] | 633 | } |
| 634 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 635 | #endif /* _LINUX_PAGEMAP_H */ |