Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_PAGEMAP_H |
| 2 | #define _LINUX_PAGEMAP_H |
| 3 | |
| 4 | /* |
| 5 | * Copyright 1995 Linus Torvalds |
| 6 | */ |
| 7 | #include <linux/mm.h> |
| 8 | #include <linux/fs.h> |
| 9 | #include <linux/list.h> |
| 10 | #include <linux/highmem.h> |
| 11 | #include <linux/compiler.h> |
| 12 | #include <asm/uaccess.h> |
| 13 | #include <linux/gfp.h> |
Guillaume Chazarain | 3e9f45b | 2007-05-08 00:23:25 -0700 | [diff] [blame] | 14 | #include <linux/bitops.h> |
Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 15 | #include <linux/hardirq.h> /* for in_interrupt() */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | |
| 17 | /* |
| 18 | * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page |
| 19 | * allocation mode flags. |
| 20 | */ |
| 21 | #define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */ |
| 22 | #define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */ |
Andrea Arcangeli | 7906d00 | 2008-07-28 15:46:26 -0700 | [diff] [blame] | 23 | #define AS_MM_ALL_LOCKS (__GFP_BITS_SHIFT + 2) /* under mm_take_all_locks() */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | |
Guillaume Chazarain | 3e9f45b | 2007-05-08 00:23:25 -0700 | [diff] [blame] | 25 | static inline void mapping_set_error(struct address_space *mapping, int error) |
| 26 | { |
Andrew Morton | 2185e69 | 2008-07-23 21:27:19 -0700 | [diff] [blame] | 27 | if (unlikely(error)) { |
Guillaume Chazarain | 3e9f45b | 2007-05-08 00:23:25 -0700 | [diff] [blame] | 28 | if (error == -ENOSPC) |
| 29 | set_bit(AS_ENOSPC, &mapping->flags); |
| 30 | else |
| 31 | set_bit(AS_EIO, &mapping->flags); |
| 32 | } |
| 33 | } |
| 34 | |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 35 | static inline gfp_t mapping_gfp_mask(struct address_space * mapping) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | { |
Al Viro | 260b236 | 2005-10-21 03:22:44 -0400 | [diff] [blame] | 37 | return (__force gfp_t)mapping->flags & __GFP_BITS_MASK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | } |
| 39 | |
| 40 | /* |
| 41 | * This is non-atomic. Only to be used before the mapping is activated. |
| 42 | * Probably needs a barrier... |
| 43 | */ |
Al Viro | 260b236 | 2005-10-21 03:22:44 -0400 | [diff] [blame] | 44 | static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | { |
Al Viro | 260b236 | 2005-10-21 03:22:44 -0400 | [diff] [blame] | 46 | m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) | |
| 47 | (__force unsigned long)mask; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | } |
| 49 | |
| 50 | /* |
| 51 | * The page cache can done in larger chunks than |
| 52 | * one page, because it allows for more efficient |
| 53 | * throughput (it can then be mapped into user |
| 54 | * space in smaller chunks for same flexibility). |
| 55 | * |
| 56 | * Or rather, it _will_ be done in larger chunks. |
| 57 | */ |
| 58 | #define PAGE_CACHE_SHIFT PAGE_SHIFT |
| 59 | #define PAGE_CACHE_SIZE PAGE_SIZE |
| 60 | #define PAGE_CACHE_MASK PAGE_MASK |
| 61 | #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK) |
| 62 | |
| 63 | #define page_cache_get(page) get_page(page) |
| 64 | #define page_cache_release(page) put_page(page) |
| 65 | void release_pages(struct page **pages, int nr, int cold); |
| 66 | |
Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 67 | /* |
| 68 | * speculatively take a reference to a page. |
| 69 | * If the page is free (_count == 0), then _count is untouched, and 0 |
| 70 | * is returned. Otherwise, _count is incremented by 1 and 1 is returned. |
| 71 | * |
| 72 | * This function must be called inside the same rcu_read_lock() section as has |
| 73 | * been used to lookup the page in the pagecache radix-tree (or page table): |
| 74 | * this allows allocators to use a synchronize_rcu() to stabilize _count. |
| 75 | * |
| 76 | * Unless an RCU grace period has passed, the count of all pages coming out |
| 77 | * of the allocator must be considered unstable. page_count may return higher |
| 78 | * than expected, and put_page must be able to do the right thing when the |
| 79 | * page has been finished with, no matter what it is subsequently allocated |
| 80 | * for (because put_page is what is used here to drop an invalid speculative |
| 81 | * reference). |
| 82 | * |
| 83 | * This is the interesting part of the lockless pagecache (and lockless |
| 84 | * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page) |
| 85 | * has the following pattern: |
| 86 | * 1. find page in radix tree |
| 87 | * 2. conditionally increment refcount |
| 88 | * 3. check the page is still in pagecache (if no, goto 1) |
| 89 | * |
| 90 | * Remove-side that cares about stability of _count (eg. reclaim) has the |
| 91 | * following (with tree_lock held for write): |
| 92 | * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) |
| 93 | * B. remove page from pagecache |
| 94 | * C. free the page |
| 95 | * |
| 96 | * There are 2 critical interleavings that matter: |
| 97 | * - 2 runs before A: in this case, A sees elevated refcount and bails out |
| 98 | * - A runs before 2: in this case, 2 sees zero refcount and retries; |
| 99 | * subsequently, B will complete and 1 will find no page, causing the |
| 100 | * lookup to return NULL. |
| 101 | * |
| 102 | * It is possible that between 1 and 2, the page is removed then the exact same |
| 103 | * page is inserted into the same position in pagecache. That's OK: the |
| 104 | * old find_get_page using tree_lock could equally have run before or after |
| 105 | * such a re-insertion, depending on order that locks are granted. |
| 106 | * |
| 107 | * Lookups racing against pagecache insertion isn't a big problem: either 1 |
| 108 | * will find the page or it will not. Likewise, the old find_get_page could run |
| 109 | * either before the insertion or afterwards, depending on timing. |
| 110 | */ |
| 111 | static inline int page_cache_get_speculative(struct page *page) |
| 112 | { |
| 113 | VM_BUG_ON(in_interrupt()); |
| 114 | |
| 115 | #if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU) |
| 116 | # ifdef CONFIG_PREEMPT |
| 117 | VM_BUG_ON(!in_atomic()); |
| 118 | # endif |
| 119 | /* |
| 120 | * Preempt must be disabled here - we rely on rcu_read_lock doing |
| 121 | * this for us. |
| 122 | * |
| 123 | * Pagecache won't be truncated from interrupt context, so if we have |
| 124 | * found a page in the radix tree here, we have pinned its refcount by |
| 125 | * disabling preempt, and hence no need for the "speculative get" that |
| 126 | * SMP requires. |
| 127 | */ |
| 128 | VM_BUG_ON(page_count(page) == 0); |
| 129 | atomic_inc(&page->_count); |
| 130 | |
| 131 | #else |
| 132 | if (unlikely(!get_page_unless_zero(page))) { |
| 133 | /* |
| 134 | * Either the page has been freed, or will be freed. |
| 135 | * In either case, retry here and the caller should |
| 136 | * do the right thing (see comments above). |
| 137 | */ |
| 138 | return 0; |
| 139 | } |
| 140 | #endif |
| 141 | VM_BUG_ON(PageTail(page)); |
| 142 | |
| 143 | return 1; |
| 144 | } |
| 145 | |
| 146 | static inline int page_freeze_refs(struct page *page, int count) |
| 147 | { |
| 148 | return likely(atomic_cmpxchg(&page->_count, count, 0) == count); |
| 149 | } |
| 150 | |
| 151 | static inline void page_unfreeze_refs(struct page *page, int count) |
| 152 | { |
| 153 | VM_BUG_ON(page_count(page) != 0); |
| 154 | VM_BUG_ON(count == 0); |
| 155 | |
| 156 | atomic_set(&page->_count, count); |
| 157 | } |
| 158 | |
Paul Jackson | 44110fe | 2006-03-24 03:16:04 -0800 | [diff] [blame] | 159 | #ifdef CONFIG_NUMA |
Nick Piggin | 2ae8814 | 2006-10-28 10:38:23 -0700 | [diff] [blame] | 160 | extern struct page *__page_cache_alloc(gfp_t gfp); |
Paul Jackson | 44110fe | 2006-03-24 03:16:04 -0800 | [diff] [blame] | 161 | #else |
Nick Piggin | 2ae8814 | 2006-10-28 10:38:23 -0700 | [diff] [blame] | 162 | static inline struct page *__page_cache_alloc(gfp_t gfp) |
| 163 | { |
| 164 | return alloc_pages(gfp, 0); |
| 165 | } |
| 166 | #endif |
| 167 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | static inline struct page *page_cache_alloc(struct address_space *x) |
| 169 | { |
Nick Piggin | 2ae8814 | 2006-10-28 10:38:23 -0700 | [diff] [blame] | 170 | return __page_cache_alloc(mapping_gfp_mask(x)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | } |
| 172 | |
| 173 | static inline struct page *page_cache_alloc_cold(struct address_space *x) |
| 174 | { |
Nick Piggin | 2ae8814 | 2006-10-28 10:38:23 -0700 | [diff] [blame] | 175 | return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | } |
| 177 | |
| 178 | typedef int filler_t(void *, struct page *); |
| 179 | |
| 180 | extern struct page * find_get_page(struct address_space *mapping, |
Fengguang Wu | 57f6b96 | 2007-10-16 01:24:37 -0700 | [diff] [blame] | 181 | pgoff_t index); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | extern struct page * find_lock_page(struct address_space *mapping, |
Fengguang Wu | 57f6b96 | 2007-10-16 01:24:37 -0700 | [diff] [blame] | 183 | pgoff_t index); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | extern struct page * find_or_create_page(struct address_space *mapping, |
Fengguang Wu | 57f6b96 | 2007-10-16 01:24:37 -0700 | [diff] [blame] | 185 | pgoff_t index, gfp_t gfp_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | unsigned find_get_pages(struct address_space *mapping, pgoff_t start, |
| 187 | unsigned int nr_pages, struct page **pages); |
Jens Axboe | ebf4350 | 2006-04-27 08:46:01 +0200 | [diff] [blame] | 188 | unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, |
| 189 | unsigned int nr_pages, struct page **pages); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, |
| 191 | int tag, unsigned int nr_pages, struct page **pages); |
| 192 | |
Nick Piggin | afddba4 | 2007-10-16 01:25:01 -0700 | [diff] [blame] | 193 | struct page *__grab_cache_page(struct address_space *mapping, pgoff_t index); |
| 194 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | /* |
| 196 | * Returns locked page at given index in given cache, creating it if needed. |
| 197 | */ |
Fengguang Wu | 57f6b96 | 2007-10-16 01:24:37 -0700 | [diff] [blame] | 198 | static inline struct page *grab_cache_page(struct address_space *mapping, |
| 199 | pgoff_t index) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | { |
| 201 | return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); |
| 202 | } |
| 203 | |
| 204 | extern struct page * grab_cache_page_nowait(struct address_space *mapping, |
Fengguang Wu | 57f6b96 | 2007-10-16 01:24:37 -0700 | [diff] [blame] | 205 | pgoff_t index); |
Nick Piggin | 6fe6900 | 2007-05-06 14:49:04 -0700 | [diff] [blame] | 206 | extern struct page * read_cache_page_async(struct address_space *mapping, |
Fengguang Wu | 57f6b96 | 2007-10-16 01:24:37 -0700 | [diff] [blame] | 207 | pgoff_t index, filler_t *filler, |
Nick Piggin | 6fe6900 | 2007-05-06 14:49:04 -0700 | [diff] [blame] | 208 | void *data); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | extern struct page * read_cache_page(struct address_space *mapping, |
Fengguang Wu | 57f6b96 | 2007-10-16 01:24:37 -0700 | [diff] [blame] | 210 | pgoff_t index, filler_t *filler, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | void *data); |
| 212 | extern int read_cache_pages(struct address_space *mapping, |
| 213 | struct list_head *pages, filler_t *filler, void *data); |
| 214 | |
Nick Piggin | 6fe6900 | 2007-05-06 14:49:04 -0700 | [diff] [blame] | 215 | static inline struct page *read_mapping_page_async( |
| 216 | struct address_space *mapping, |
Fengguang Wu | 57f6b96 | 2007-10-16 01:24:37 -0700 | [diff] [blame] | 217 | pgoff_t index, void *data) |
Nick Piggin | 6fe6900 | 2007-05-06 14:49:04 -0700 | [diff] [blame] | 218 | { |
| 219 | filler_t *filler = (filler_t *)mapping->a_ops->readpage; |
| 220 | return read_cache_page_async(mapping, index, filler, data); |
| 221 | } |
| 222 | |
Pekka Enberg | 090d2b1 | 2006-06-23 02:05:08 -0700 | [diff] [blame] | 223 | static inline struct page *read_mapping_page(struct address_space *mapping, |
Fengguang Wu | 57f6b96 | 2007-10-16 01:24:37 -0700 | [diff] [blame] | 224 | pgoff_t index, void *data) |
Pekka Enberg | 090d2b1 | 2006-06-23 02:05:08 -0700 | [diff] [blame] | 225 | { |
| 226 | filler_t *filler = (filler_t *)mapping->a_ops->readpage; |
| 227 | return read_cache_page(mapping, index, filler, data); |
| 228 | } |
| 229 | |
Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 230 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, |
Fengguang Wu | 57f6b96 | 2007-10-16 01:24:37 -0700 | [diff] [blame] | 231 | pgoff_t index, gfp_t gfp_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, |
Fengguang Wu | 57f6b96 | 2007-10-16 01:24:37 -0700 | [diff] [blame] | 233 | pgoff_t index, gfp_t gfp_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | extern void remove_from_page_cache(struct page *page); |
| 235 | extern void __remove_from_page_cache(struct page *page); |
| 236 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | /* |
Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 238 | * Like add_to_page_cache_locked, but used to add newly allocated pages: |
| 239 | * the page is new, so we can just run SetPageLocked() against it. |
| 240 | */ |
| 241 | static inline int add_to_page_cache(struct page *page, |
| 242 | struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) |
| 243 | { |
| 244 | int error; |
| 245 | |
| 246 | SetPageLocked(page); |
| 247 | error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); |
| 248 | if (unlikely(error)) |
| 249 | ClearPageLocked(page); |
| 250 | return error; |
| 251 | } |
| 252 | |
| 253 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | * Return byte-offset into filesystem object for page. |
| 255 | */ |
| 256 | static inline loff_t page_offset(struct page *page) |
| 257 | { |
| 258 | return ((loff_t)page->index) << PAGE_CACHE_SHIFT; |
| 259 | } |
| 260 | |
| 261 | static inline pgoff_t linear_page_index(struct vm_area_struct *vma, |
| 262 | unsigned long address) |
| 263 | { |
| 264 | pgoff_t pgoff = (address - vma->vm_start) >> PAGE_SHIFT; |
| 265 | pgoff += vma->vm_pgoff; |
| 266 | return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT); |
| 267 | } |
| 268 | |
Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 269 | extern void __lock_page(struct page *page); |
| 270 | extern int __lock_page_killable(struct page *page); |
| 271 | extern void __lock_page_nosync(struct page *page); |
| 272 | extern void unlock_page(struct page *page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | |
Nick Piggin | db37648 | 2006-09-25 23:31:24 -0700 | [diff] [blame] | 274 | /* |
| 275 | * lock_page may only be called if we have the page's inode pinned. |
| 276 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | static inline void lock_page(struct page *page) |
| 278 | { |
| 279 | might_sleep(); |
| 280 | if (TestSetPageLocked(page)) |
| 281 | __lock_page(page); |
| 282 | } |
Nick Piggin | db37648 | 2006-09-25 23:31:24 -0700 | [diff] [blame] | 283 | |
| 284 | /* |
Matthew Wilcox | 2687a35 | 2007-12-06 11:18:49 -0500 | [diff] [blame] | 285 | * lock_page_killable is like lock_page but can be interrupted by fatal |
| 286 | * signals. It returns 0 if it locked the page and -EINTR if it was |
| 287 | * killed while waiting. |
| 288 | */ |
| 289 | static inline int lock_page_killable(struct page *page) |
| 290 | { |
| 291 | might_sleep(); |
| 292 | if (TestSetPageLocked(page)) |
| 293 | return __lock_page_killable(page); |
| 294 | return 0; |
| 295 | } |
| 296 | |
| 297 | /* |
Nick Piggin | db37648 | 2006-09-25 23:31:24 -0700 | [diff] [blame] | 298 | * lock_page_nosync should only be used if we can't pin the page's inode. |
| 299 | * Doesn't play quite so well with block device plugging. |
| 300 | */ |
| 301 | static inline void lock_page_nosync(struct page *page) |
| 302 | { |
| 303 | might_sleep(); |
| 304 | if (TestSetPageLocked(page)) |
| 305 | __lock_page_nosync(page); |
| 306 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 307 | |
| 308 | /* |
| 309 | * This is exported only for wait_on_page_locked/wait_on_page_writeback. |
| 310 | * Never use this directly! |
| 311 | */ |
Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 312 | extern void wait_on_page_bit(struct page *page, int bit_nr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 | |
| 314 | /* |
| 315 | * Wait for a page to be unlocked. |
| 316 | * |
| 317 | * This must be called with the caller "holding" the page, |
| 318 | * ie with increased "page->count" so that the page won't |
| 319 | * go away during the wait.. |
| 320 | */ |
| 321 | static inline void wait_on_page_locked(struct page *page) |
| 322 | { |
| 323 | if (PageLocked(page)) |
| 324 | wait_on_page_bit(page, PG_locked); |
| 325 | } |
| 326 | |
| 327 | /* |
| 328 | * Wait for a page to complete writeback |
| 329 | */ |
| 330 | static inline void wait_on_page_writeback(struct page *page) |
| 331 | { |
| 332 | if (PageWriteback(page)) |
| 333 | wait_on_page_bit(page, PG_writeback); |
| 334 | } |
| 335 | |
| 336 | extern void end_page_writeback(struct page *page); |
| 337 | |
| 338 | /* |
| 339 | * Fault a userspace page into pagetables. Return non-zero on a fault. |
| 340 | * |
| 341 | * This assumes that two userspace pages are always sufficient. That's |
| 342 | * not true if PAGE_CACHE_SIZE > PAGE_SIZE. |
| 343 | */ |
| 344 | static inline int fault_in_pages_writeable(char __user *uaddr, int size) |
| 345 | { |
| 346 | int ret; |
| 347 | |
Nick Piggin | 0829142 | 2007-10-16 01:24:59 -0700 | [diff] [blame] | 348 | if (unlikely(size == 0)) |
| 349 | return 0; |
| 350 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 351 | /* |
| 352 | * Writing zeroes into userspace here is OK, because we know that if |
| 353 | * the zero gets there, we'll be overwriting it. |
| 354 | */ |
| 355 | ret = __put_user(0, uaddr); |
| 356 | if (ret == 0) { |
| 357 | char __user *end = uaddr + size - 1; |
| 358 | |
| 359 | /* |
| 360 | * If the page was already mapped, this will get a cache miss |
| 361 | * for sure, so try to avoid doing it. |
| 362 | */ |
| 363 | if (((unsigned long)uaddr & PAGE_MASK) != |
| 364 | ((unsigned long)end & PAGE_MASK)) |
| 365 | ret = __put_user(0, end); |
| 366 | } |
| 367 | return ret; |
| 368 | } |
| 369 | |
Nick Piggin | 0829142 | 2007-10-16 01:24:59 -0700 | [diff] [blame] | 370 | static inline int fault_in_pages_readable(const char __user *uaddr, int size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 | { |
| 372 | volatile char c; |
| 373 | int ret; |
| 374 | |
Nick Piggin | 0829142 | 2007-10-16 01:24:59 -0700 | [diff] [blame] | 375 | if (unlikely(size == 0)) |
| 376 | return 0; |
| 377 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 | ret = __get_user(c, uaddr); |
| 379 | if (ret == 0) { |
| 380 | const char __user *end = uaddr + size - 1; |
| 381 | |
| 382 | if (((unsigned long)uaddr & PAGE_MASK) != |
| 383 | ((unsigned long)end & PAGE_MASK)) |
Nick Piggin | 0829142 | 2007-10-16 01:24:59 -0700 | [diff] [blame] | 384 | ret = __get_user(c, end); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | } |
Nick Piggin | 0829142 | 2007-10-16 01:24:59 -0700 | [diff] [blame] | 386 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 387 | } |
| 388 | |
| 389 | #endif /* _LINUX_PAGEMAP_H */ |