blob: db8a410ae9e15a6daf9b64678fd3edefccdcc717 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_PAGEMAP_H
2#define _LINUX_PAGEMAP_H
3
4/*
5 * Copyright 1995 Linus Torvalds
6 */
7#include <linux/mm.h>
8#include <linux/fs.h>
9#include <linux/list.h>
10#include <linux/highmem.h>
11#include <linux/compiler.h>
12#include <asm/uaccess.h>
13#include <linux/gfp.h>
Guillaume Chazarain3e9f45b2007-05-08 00:23:25 -070014#include <linux/bitops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
16/*
17 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
18 * allocation mode flags.
19 */
20#define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */
21#define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */
22
Guillaume Chazarain3e9f45b2007-05-08 00:23:25 -070023static inline void mapping_set_error(struct address_space *mapping, int error)
24{
25 if (error) {
26 if (error == -ENOSPC)
27 set_bit(AS_ENOSPC, &mapping->flags);
28 else
29 set_bit(AS_EIO, &mapping->flags);
30 }
31}
32
Al Virodd0fc662005-10-07 07:46:04 +010033static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
Linus Torvalds1da177e2005-04-16 15:20:36 -070034{
Al Viro260b2362005-10-21 03:22:44 -040035 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070036}
37
38/*
39 * This is non-atomic. Only to be used before the mapping is activated.
40 * Probably needs a barrier...
41 */
Al Viro260b2362005-10-21 03:22:44 -040042static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -070043{
Al Viro260b2362005-10-21 03:22:44 -040044 m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
45 (__force unsigned long)mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070046}
47
48/*
49 * The page cache can done in larger chunks than
50 * one page, because it allows for more efficient
51 * throughput (it can then be mapped into user
52 * space in smaller chunks for same flexibility).
53 *
54 * Or rather, it _will_ be done in larger chunks.
55 */
56#define PAGE_CACHE_SHIFT PAGE_SHIFT
57#define PAGE_CACHE_SIZE PAGE_SIZE
58#define PAGE_CACHE_MASK PAGE_MASK
59#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
60
61#define page_cache_get(page) get_page(page)
62#define page_cache_release(page) put_page(page)
63void release_pages(struct page **pages, int nr, int cold);
64
Paul Jackson44110fe2006-03-24 03:16:04 -080065#ifdef CONFIG_NUMA
Nick Piggin2ae88142006-10-28 10:38:23 -070066extern struct page *__page_cache_alloc(gfp_t gfp);
Paul Jackson44110fe2006-03-24 03:16:04 -080067#else
Nick Piggin2ae88142006-10-28 10:38:23 -070068static inline struct page *__page_cache_alloc(gfp_t gfp)
69{
70 return alloc_pages(gfp, 0);
71}
72#endif
73
Linus Torvalds1da177e2005-04-16 15:20:36 -070074static inline struct page *page_cache_alloc(struct address_space *x)
75{
Nick Piggin2ae88142006-10-28 10:38:23 -070076 return __page_cache_alloc(mapping_gfp_mask(x));
Linus Torvalds1da177e2005-04-16 15:20:36 -070077}
78
79static inline struct page *page_cache_alloc_cold(struct address_space *x)
80{
Nick Piggin2ae88142006-10-28 10:38:23 -070081 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082}
83
84typedef int filler_t(void *, struct page *);
85
86extern struct page * find_get_page(struct address_space *mapping,
Fengguang Wu57f6b962007-10-16 01:24:37 -070087 pgoff_t index);
Linus Torvalds1da177e2005-04-16 15:20:36 -070088extern struct page * find_lock_page(struct address_space *mapping,
Fengguang Wu57f6b962007-10-16 01:24:37 -070089 pgoff_t index);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090extern struct page * find_or_create_page(struct address_space *mapping,
Fengguang Wu57f6b962007-10-16 01:24:37 -070091 pgoff_t index, gfp_t gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -070092unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
93 unsigned int nr_pages, struct page **pages);
Jens Axboeebf43502006-04-27 08:46:01 +020094unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
95 unsigned int nr_pages, struct page **pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
97 int tag, unsigned int nr_pages, struct page **pages);
98
Nick Pigginafddba42007-10-16 01:25:01 -070099struct page *__grab_cache_page(struct address_space *mapping, pgoff_t index);
100
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101/*
102 * Returns locked page at given index in given cache, creating it if needed.
103 */
Fengguang Wu57f6b962007-10-16 01:24:37 -0700104static inline struct page *grab_cache_page(struct address_space *mapping,
105 pgoff_t index)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106{
107 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
108}
109
110extern struct page * grab_cache_page_nowait(struct address_space *mapping,
Fengguang Wu57f6b962007-10-16 01:24:37 -0700111 pgoff_t index);
Nick Piggin6fe69002007-05-06 14:49:04 -0700112extern struct page * read_cache_page_async(struct address_space *mapping,
Fengguang Wu57f6b962007-10-16 01:24:37 -0700113 pgoff_t index, filler_t *filler,
Nick Piggin6fe69002007-05-06 14:49:04 -0700114 void *data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115extern struct page * read_cache_page(struct address_space *mapping,
Fengguang Wu57f6b962007-10-16 01:24:37 -0700116 pgoff_t index, filler_t *filler,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 void *data);
118extern int read_cache_pages(struct address_space *mapping,
119 struct list_head *pages, filler_t *filler, void *data);
120
Nick Piggin6fe69002007-05-06 14:49:04 -0700121static inline struct page *read_mapping_page_async(
122 struct address_space *mapping,
Fengguang Wu57f6b962007-10-16 01:24:37 -0700123 pgoff_t index, void *data)
Nick Piggin6fe69002007-05-06 14:49:04 -0700124{
125 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
126 return read_cache_page_async(mapping, index, filler, data);
127}
128
Pekka Enberg090d2b12006-06-23 02:05:08 -0700129static inline struct page *read_mapping_page(struct address_space *mapping,
Fengguang Wu57f6b962007-10-16 01:24:37 -0700130 pgoff_t index, void *data)
Pekka Enberg090d2b12006-06-23 02:05:08 -0700131{
132 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
133 return read_cache_page(mapping, index, filler, data);
134}
135
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136int add_to_page_cache(struct page *page, struct address_space *mapping,
Fengguang Wu57f6b962007-10-16 01:24:37 -0700137 pgoff_t index, gfp_t gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
Fengguang Wu57f6b962007-10-16 01:24:37 -0700139 pgoff_t index, gfp_t gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140extern void remove_from_page_cache(struct page *page);
141extern void __remove_from_page_cache(struct page *page);
142
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143/*
144 * Return byte-offset into filesystem object for page.
145 */
146static inline loff_t page_offset(struct page *page)
147{
148 return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
149}
150
151static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
152 unsigned long address)
153{
154 pgoff_t pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
155 pgoff += vma->vm_pgoff;
156 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
157}
158
159extern void FASTCALL(__lock_page(struct page *page));
Nick Piggindb376482006-09-25 23:31:24 -0700160extern void FASTCALL(__lock_page_nosync(struct page *page));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161extern void FASTCALL(unlock_page(struct page *page));
162
Nick Piggindb376482006-09-25 23:31:24 -0700163/*
164 * lock_page may only be called if we have the page's inode pinned.
165 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166static inline void lock_page(struct page *page)
167{
168 might_sleep();
169 if (TestSetPageLocked(page))
170 __lock_page(page);
171}
Nick Piggindb376482006-09-25 23:31:24 -0700172
173/*
174 * lock_page_nosync should only be used if we can't pin the page's inode.
175 * Doesn't play quite so well with block device plugging.
176 */
177static inline void lock_page_nosync(struct page *page)
178{
179 might_sleep();
180 if (TestSetPageLocked(page))
181 __lock_page_nosync(page);
182}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
184/*
185 * This is exported only for wait_on_page_locked/wait_on_page_writeback.
186 * Never use this directly!
187 */
188extern void FASTCALL(wait_on_page_bit(struct page *page, int bit_nr));
189
190/*
191 * Wait for a page to be unlocked.
192 *
193 * This must be called with the caller "holding" the page,
194 * ie with increased "page->count" so that the page won't
195 * go away during the wait..
196 */
197static inline void wait_on_page_locked(struct page *page)
198{
199 if (PageLocked(page))
200 wait_on_page_bit(page, PG_locked);
201}
202
203/*
204 * Wait for a page to complete writeback
205 */
206static inline void wait_on_page_writeback(struct page *page)
207{
208 if (PageWriteback(page))
209 wait_on_page_bit(page, PG_writeback);
210}
211
212extern void end_page_writeback(struct page *page);
213
214/*
215 * Fault a userspace page into pagetables. Return non-zero on a fault.
216 *
217 * This assumes that two userspace pages are always sufficient. That's
218 * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
219 */
220static inline int fault_in_pages_writeable(char __user *uaddr, int size)
221{
222 int ret;
223
Nick Piggin08291422007-10-16 01:24:59 -0700224 if (unlikely(size == 0))
225 return 0;
226
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 /*
228 * Writing zeroes into userspace here is OK, because we know that if
229 * the zero gets there, we'll be overwriting it.
230 */
231 ret = __put_user(0, uaddr);
232 if (ret == 0) {
233 char __user *end = uaddr + size - 1;
234
235 /*
236 * If the page was already mapped, this will get a cache miss
237 * for sure, so try to avoid doing it.
238 */
239 if (((unsigned long)uaddr & PAGE_MASK) !=
240 ((unsigned long)end & PAGE_MASK))
241 ret = __put_user(0, end);
242 }
243 return ret;
244}
245
Nick Piggin08291422007-10-16 01:24:59 -0700246static inline int fault_in_pages_readable(const char __user *uaddr, int size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247{
248 volatile char c;
249 int ret;
250
Nick Piggin08291422007-10-16 01:24:59 -0700251 if (unlikely(size == 0))
252 return 0;
253
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 ret = __get_user(c, uaddr);
255 if (ret == 0) {
256 const char __user *end = uaddr + size - 1;
257
258 if (((unsigned long)uaddr & PAGE_MASK) !=
259 ((unsigned long)end & PAGE_MASK))
Nick Piggin08291422007-10-16 01:24:59 -0700260 ret = __get_user(c, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 }
Nick Piggin08291422007-10-16 01:24:59 -0700262 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263}
264
265#endif /* _LINUX_PAGEMAP_H */