blob: 8a85ec109a3a57f1dbc8bd5a01a90fb6ad093bb3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_HIGHMEM_H
2#define _LINUX_HIGHMEM_H
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <linux/fs.h>
Cesar Eduardo Barros597781f2010-08-09 17:18:32 -07005#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <linux/mm.h>
Peter Zijlstraad76fb62006-12-06 20:32:21 -08007#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008
9#include <asm/cacheflush.h>
10
James Bottomley03beb072006-03-26 01:36:57 -080011#ifndef ARCH_HAS_FLUSH_ANON_PAGE
Russell Kinga6f36be2006-12-30 22:24:19 +000012static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
James Bottomley03beb072006-03-26 01:36:57 -080013{
14}
15#endif
16
James Bottomley5a3a5a92006-03-26 01:36:59 -080017#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
18static inline void flush_kernel_dcache_page(struct page *page)
19{
20}
James Bottomley9df5f7412010-01-25 11:42:20 -060021static inline void flush_kernel_vmap_range(void *vaddr, int size)
22{
23}
24static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
25{
26}
James Bottomley5a3a5a92006-03-26 01:36:59 -080027#endif
28
Kumar Gala3688e072009-04-01 23:38:49 -050029#include <asm/kmap_types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Kumar Gala3688e072009-04-01 23:38:49 -050031#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <asm/highmem.h>
33
34/* declarations for linux/mm/highmem.c */
35unsigned int nr_free_highpages(void);
Christoph Lameterc1f60a52006-09-25 23:31:11 -070036extern unsigned long totalhigh_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
Jeremy Fitzhardingece6234b2007-05-02 19:27:15 +020038void kmap_flush_unused(void);
39
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070040DECLARE_PER_CPU(int, __kmap_atomic_idx);
41
42static inline int kmap_atomic_idx_push(void)
43{
44 int idx = __get_cpu_var(__kmap_atomic_idx)++;
45#ifdef CONFIG_DEBUG_HIGHMEM
46 WARN_ON_ONCE(in_irq() && !irqs_disabled());
47 BUG_ON(idx > KM_TYPE_NR);
48#endif
49 return idx;
50}
51
52static inline int kmap_atomic_idx_pop(void)
53{
54 int idx = --__get_cpu_var(__kmap_atomic_idx);
55#ifdef CONFIG_DEBUG_HIGHMEM
56 BUG_ON(idx < 0);
57#endif
58 return idx;
59}
60
Linus Torvalds1da177e2005-04-16 15:20:36 -070061#else /* CONFIG_HIGHMEM */
62
63static inline unsigned int nr_free_highpages(void) { return 0; }
64
Andreas Fenkart4b529402010-01-08 14:42:31 -080065#define totalhigh_pages 0UL
Christoph Lameterc1f60a52006-09-25 23:31:11 -070066
James Bottomleya6ca1b92006-09-25 23:30:55 -070067#ifndef ARCH_HAS_KMAP
Linus Torvalds1da177e2005-04-16 15:20:36 -070068static inline void *kmap(struct page *page)
69{
70 might_sleep();
71 return page_address(page);
72}
73
Matthew Wilcox31c91132009-06-16 15:32:45 -070074static inline void kunmap(struct page *page)
75{
76}
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070078static inline void *__kmap_atomic(struct page *page)
Geert Uytterhoeven254f9c52007-05-01 22:33:07 +020079{
80 pagefault_disable();
81 return page_address(page);
82}
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070083#define kmap_atomic_prot(page, prot) __kmap_atomic(page)
Geert Uytterhoeven254f9c52007-05-01 22:33:07 +020084
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070085static inline void __kunmap_atomic(void *addr)
Andi Kleen4e60c862010-08-09 17:19:03 -070086{
87 pagefault_enable();
88}
89
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070090#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
Linus Torvalds1da177e2005-04-16 15:20:36 -070091#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
Jeremy Fitzhardingece6234b2007-05-02 19:27:15 +020092
93#define kmap_flush_unused() do {} while(0)
James Bottomleya6ca1b92006-09-25 23:30:55 -070094#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070095
96#endif /* CONFIG_HIGHMEM */
97
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070098/*
99 * Make both: kmap_atomic(page, idx) and kmap_atomic(page) work.
100 */
101#define kmap_atomic(page, args...) __kmap_atomic(page)
102
103/*
104 * Prevent people trying to call kunmap_atomic() as if it were kunmap()
105 * kunmap_atomic() should get the return value of kmap_atomic, not the page.
106 */
107#define kunmap_atomic(addr, args...) \
108do { \
109 BUILD_BUG_ON(__same_type((addr), struct page *)); \
110 __kunmap_atomic(addr); \
111} while (0)
Cesar Eduardo Barros597781f2010-08-09 17:18:32 -0700112
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
Russell King487ff322008-11-27 11:13:58 +0000114#ifndef clear_user_highpage
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
116{
117 void *addr = kmap_atomic(page, KM_USER0);
118 clear_user_page(addr, vaddr, page);
119 kunmap_atomic(addr, KM_USER0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120}
Russell King487ff322008-11-27 11:13:58 +0000121#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
123#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
Mel Gorman769848c2007-07-17 04:03:05 -0700124/**
125 * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
126 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
127 * @vma: The VMA the page is to be allocated for
128 * @vaddr: The virtual address the page will be inserted into
129 *
130 * This function will allocate a page for a VMA but the caller is expected
131 * to specify via movableflags whether the page will be movable in the
132 * future or not
133 *
134 * An architecture may override this function by defining
135 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
136 * implementation.
137 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138static inline struct page *
Mel Gorman769848c2007-07-17 04:03:05 -0700139__alloc_zeroed_user_highpage(gfp_t movableflags,
140 struct vm_area_struct *vma,
141 unsigned long vaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142{
Mel Gorman769848c2007-07-17 04:03:05 -0700143 struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
144 vma, vaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
146 if (page)
147 clear_user_highpage(page, vaddr);
148
149 return page;
150}
151#endif
152
Mel Gorman769848c2007-07-17 04:03:05 -0700153/**
Mel Gorman769848c2007-07-17 04:03:05 -0700154 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
155 * @vma: The VMA the page is to be allocated for
156 * @vaddr: The virtual address the page will be inserted into
157 *
158 * This function will allocate a page for a VMA that the caller knows will
159 * be able to migrate in the future using move_pages() or reclaimed
160 */
161static inline struct page *
162alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
163 unsigned long vaddr)
164{
165 return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
166}
167
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168static inline void clear_highpage(struct page *page)
169{
170 void *kaddr = kmap_atomic(page, KM_USER0);
171 clear_page(kaddr);
172 kunmap_atomic(kaddr, KM_USER0);
173}
174
Christoph Lametereebd2aa2008-02-04 22:28:29 -0800175static inline void zero_user_segments(struct page *page,
176 unsigned start1, unsigned end1,
177 unsigned start2, unsigned end2)
178{
179 void *kaddr = kmap_atomic(page, KM_USER0);
180
181 BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
182
183 if (end1 > start1)
184 memset(kaddr + start1, 0, end1 - start1);
185
186 if (end2 > start2)
187 memset(kaddr + start2, 0, end2 - start2);
188
189 kunmap_atomic(kaddr, KM_USER0);
190 flush_dcache_page(page);
191}
192
193static inline void zero_user_segment(struct page *page,
194 unsigned start, unsigned end)
195{
196 zero_user_segments(page, start, end, 0, 0);
197}
198
199static inline void zero_user(struct page *page,
200 unsigned start, unsigned size)
201{
202 zero_user_segments(page, start, start + size, 0, 0);
203}
Nate Diller01f27052007-05-09 02:35:07 -0700204
Nate Dillerf37bc272007-05-09 02:35:09 -0700205static inline void __deprecated memclear_highpage_flush(struct page *page,
Nate Diller01f27052007-05-09 02:35:07 -0700206 unsigned int offset, unsigned int size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207{
Christoph Lametereebd2aa2008-02-04 22:28:29 -0800208 zero_user(page, offset, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209}
210
Atsushi Nemoto77fff4a2006-12-12 17:14:54 +0000211#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
212
Atsushi Nemoto9de455b2006-12-12 17:14:55 +0000213static inline void copy_user_highpage(struct page *to, struct page *from,
214 unsigned long vaddr, struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215{
216 char *vfrom, *vto;
217
218 vfrom = kmap_atomic(from, KM_USER0);
219 vto = kmap_atomic(to, KM_USER1);
220 copy_user_page(vto, vfrom, vaddr, to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 kunmap_atomic(vto, KM_USER1);
Peter Zijlstra61ecdb802010-10-26 14:21:47 -0700222 kunmap_atomic(vfrom, KM_USER0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223}
224
Atsushi Nemoto77fff4a2006-12-12 17:14:54 +0000225#endif
226
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227static inline void copy_highpage(struct page *to, struct page *from)
228{
229 char *vfrom, *vto;
230
231 vfrom = kmap_atomic(from, KM_USER0);
232 vto = kmap_atomic(to, KM_USER1);
233 copy_page(vto, vfrom);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 kunmap_atomic(vto, KM_USER1);
Peter Zijlstra61ecdb802010-10-26 14:21:47 -0700235 kunmap_atomic(vfrom, KM_USER0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236}
237
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238#endif /* _LINUX_HIGHMEM_H */