blob: 7a1af574dedfc1f09c57045e446639271891ba80 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_PAGEMAP_H
2#define _LINUX_PAGEMAP_H
3
4/*
5 * Copyright 1995 Linus Torvalds
6 */
7#include <linux/mm.h>
8#include <linux/fs.h>
9#include <linux/list.h>
10#include <linux/highmem.h>
11#include <linux/compiler.h>
12#include <asm/uaccess.h>
13#include <linux/gfp.h>
14
15/*
16 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
17 * allocation mode flags.
18 */
19#define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */
20#define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */
21
Al Virodd0fc662005-10-07 07:46:04 +010022static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
Linus Torvalds1da177e2005-04-16 15:20:36 -070023{
Al Viro260b2362005-10-21 03:22:44 -040024 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070025}
26
27/*
28 * This is non-atomic. Only to be used before the mapping is activated.
29 * Probably needs a barrier...
30 */
Al Viro260b2362005-10-21 03:22:44 -040031static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -070032{
Al Viro260b2362005-10-21 03:22:44 -040033 m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
34 (__force unsigned long)mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070035}
36
37/*
38 * The page cache can done in larger chunks than
39 * one page, because it allows for more efficient
40 * throughput (it can then be mapped into user
41 * space in smaller chunks for same flexibility).
42 *
43 * Or rather, it _will_ be done in larger chunks.
44 */
45#define PAGE_CACHE_SHIFT PAGE_SHIFT
46#define PAGE_CACHE_SIZE PAGE_SIZE
47#define PAGE_CACHE_MASK PAGE_MASK
48#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
49
50#define page_cache_get(page) get_page(page)
51#define page_cache_release(page) put_page(page)
52void release_pages(struct page **pages, int nr, int cold);
53
Paul Jackson44110fe2006-03-24 03:16:04 -080054#ifdef CONFIG_NUMA
55extern struct page *page_cache_alloc(struct address_space *x);
56extern struct page *page_cache_alloc_cold(struct address_space *x);
57#else
Linus Torvalds1da177e2005-04-16 15:20:36 -070058static inline struct page *page_cache_alloc(struct address_space *x)
59{
Paul Jackson2d6c6662005-11-13 16:06:44 -080060 return alloc_pages(mapping_gfp_mask(x), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070061}
62
63static inline struct page *page_cache_alloc_cold(struct address_space *x)
64{
Paul Jackson2d6c6662005-11-13 16:06:44 -080065 return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070066}
Paul Jackson44110fe2006-03-24 03:16:04 -080067#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070068
69typedef int filler_t(void *, struct page *);
70
71extern struct page * find_get_page(struct address_space *mapping,
72 unsigned long index);
73extern struct page * find_lock_page(struct address_space *mapping,
74 unsigned long index);
Nick Piggin93fac702006-03-31 02:29:56 -080075extern __deprecated_for_modules struct page * find_trylock_page(
76 struct address_space *mapping, unsigned long index);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077extern struct page * find_or_create_page(struct address_space *mapping,
Al Viro6daa0e22005-10-21 03:18:50 -040078 unsigned long index, gfp_t gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
80 unsigned int nr_pages, struct page **pages);
Jens Axboeebf43502006-04-27 08:46:01 +020081unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
82 unsigned int nr_pages, struct page **pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
84 int tag, unsigned int nr_pages, struct page **pages);
85
86/*
87 * Returns locked page at given index in given cache, creating it if needed.
88 */
89static inline struct page *grab_cache_page(struct address_space *mapping, unsigned long index)
90{
91 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
92}
93
94extern struct page * grab_cache_page_nowait(struct address_space *mapping,
95 unsigned long index);
96extern struct page * read_cache_page(struct address_space *mapping,
97 unsigned long index, filler_t *filler,
98 void *data);
99extern int read_cache_pages(struct address_space *mapping,
100 struct list_head *pages, filler_t *filler, void *data);
101
102int add_to_page_cache(struct page *page, struct address_space *mapping,
Al Viro6daa0e22005-10-21 03:18:50 -0400103 unsigned long index, gfp_t gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
Al Viro6daa0e22005-10-21 03:18:50 -0400105 unsigned long index, gfp_t gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106extern void remove_from_page_cache(struct page *page);
107extern void __remove_from_page_cache(struct page *page);
108
109extern atomic_t nr_pagecache;
110
111#ifdef CONFIG_SMP
112
113#define PAGECACHE_ACCT_THRESHOLD max(16, NR_CPUS * 2)
114DECLARE_PER_CPU(long, nr_pagecache_local);
115
116/*
117 * pagecache_acct implements approximate accounting for pagecache.
118 * vm_enough_memory() do not need high accuracy. Writers will keep
119 * an offset in their per-cpu arena and will spill that into the
120 * global count whenever the absolute value of the local count
121 * exceeds the counter's threshold.
122 *
123 * MUST be protected from preemption.
124 * current protection is mapping->page_lock.
125 */
126static inline void pagecache_acct(int count)
127{
128 long *local;
129
130 local = &__get_cpu_var(nr_pagecache_local);
131 *local += count;
132 if (*local > PAGECACHE_ACCT_THRESHOLD || *local < -PAGECACHE_ACCT_THRESHOLD) {
133 atomic_add(*local, &nr_pagecache);
134 *local = 0;
135 }
136}
137
138#else
139
140static inline void pagecache_acct(int count)
141{
142 atomic_add(count, &nr_pagecache);
143}
144#endif
145
146static inline unsigned long get_page_cache_size(void)
147{
148 int ret = atomic_read(&nr_pagecache);
149 if (unlikely(ret < 0))
150 ret = 0;
151 return ret;
152}
153
154/*
155 * Return byte-offset into filesystem object for page.
156 */
157static inline loff_t page_offset(struct page *page)
158{
159 return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
160}
161
162static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
163 unsigned long address)
164{
165 pgoff_t pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
166 pgoff += vma->vm_pgoff;
167 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
168}
169
170extern void FASTCALL(__lock_page(struct page *page));
171extern void FASTCALL(unlock_page(struct page *page));
172
173static inline void lock_page(struct page *page)
174{
175 might_sleep();
176 if (TestSetPageLocked(page))
177 __lock_page(page);
178}
179
180/*
181 * This is exported only for wait_on_page_locked/wait_on_page_writeback.
182 * Never use this directly!
183 */
184extern void FASTCALL(wait_on_page_bit(struct page *page, int bit_nr));
185
186/*
187 * Wait for a page to be unlocked.
188 *
189 * This must be called with the caller "holding" the page,
190 * ie with increased "page->count" so that the page won't
191 * go away during the wait..
192 */
193static inline void wait_on_page_locked(struct page *page)
194{
195 if (PageLocked(page))
196 wait_on_page_bit(page, PG_locked);
197}
198
199/*
200 * Wait for a page to complete writeback
201 */
202static inline void wait_on_page_writeback(struct page *page)
203{
204 if (PageWriteback(page))
205 wait_on_page_bit(page, PG_writeback);
206}
207
208extern void end_page_writeback(struct page *page);
209
210/*
211 * Fault a userspace page into pagetables. Return non-zero on a fault.
212 *
213 * This assumes that two userspace pages are always sufficient. That's
214 * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
215 */
216static inline int fault_in_pages_writeable(char __user *uaddr, int size)
217{
218 int ret;
219
220 /*
221 * Writing zeroes into userspace here is OK, because we know that if
222 * the zero gets there, we'll be overwriting it.
223 */
224 ret = __put_user(0, uaddr);
225 if (ret == 0) {
226 char __user *end = uaddr + size - 1;
227
228 /*
229 * If the page was already mapped, this will get a cache miss
230 * for sure, so try to avoid doing it.
231 */
232 if (((unsigned long)uaddr & PAGE_MASK) !=
233 ((unsigned long)end & PAGE_MASK))
234 ret = __put_user(0, end);
235 }
236 return ret;
237}
238
239static inline void fault_in_pages_readable(const char __user *uaddr, int size)
240{
241 volatile char c;
242 int ret;
243
244 ret = __get_user(c, uaddr);
245 if (ret == 0) {
246 const char __user *end = uaddr + size - 1;
247
248 if (((unsigned long)uaddr & PAGE_MASK) !=
249 ((unsigned long)end & PAGE_MASK))
250 __get_user(c, end);
251 }
252}
253
254#endif /* _LINUX_PAGEMAP_H */