blob: 102f76be90da6105f2bfda4c2a4f23aac657bed4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_HIGHMEM_H
2#define _LINUX_HIGHMEM_H
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <linux/fs.h>
Cesar Eduardo Barros597781f2010-08-09 17:18:32 -07005#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <linux/mm.h>
Peter Zijlstraad76fb62006-12-06 20:32:21 -08007#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008
9#include <asm/cacheflush.h>
10
James Bottomley03beb072006-03-26 01:36:57 -080011#ifndef ARCH_HAS_FLUSH_ANON_PAGE
Russell Kinga6f36be2006-12-30 22:24:19 +000012static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
James Bottomley03beb072006-03-26 01:36:57 -080013{
14}
15#endif
16
James Bottomley5a3a5a92006-03-26 01:36:59 -080017#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
18static inline void flush_kernel_dcache_page(struct page *page)
19{
20}
James Bottomley9df5f7412010-01-25 11:42:20 -060021static inline void flush_kernel_vmap_range(void *vaddr, int size)
22{
23}
24static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
25{
26}
James Bottomley5a3a5a92006-03-26 01:36:59 -080027#endif
28
Kumar Gala3688e072009-04-01 23:38:49 -050029#include <asm/kmap_types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Kumar Gala3688e072009-04-01 23:38:49 -050031#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <asm/highmem.h>
33
34/* declarations for linux/mm/highmem.c */
35unsigned int nr_free_highpages(void);
Christoph Lameterc1f60a52006-09-25 23:31:11 -070036extern unsigned long totalhigh_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
Jeremy Fitzhardingece6234b2007-05-02 19:27:15 +020038void kmap_flush_unused(void);
39
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#else /* CONFIG_HIGHMEM */
41
42static inline unsigned int nr_free_highpages(void) { return 0; }
43
Andreas Fenkart4b529402010-01-08 14:42:31 -080044#define totalhigh_pages 0UL
Christoph Lameterc1f60a52006-09-25 23:31:11 -070045
James Bottomleya6ca1b92006-09-25 23:30:55 -070046#ifndef ARCH_HAS_KMAP
Linus Torvalds1da177e2005-04-16 15:20:36 -070047static inline void *kmap(struct page *page)
48{
49 might_sleep();
50 return page_address(page);
51}
52
Matthew Wilcox31c91132009-06-16 15:32:45 -070053static inline void kunmap(struct page *page)
54{
55}
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070057static inline void *__kmap_atomic(struct page *page)
Geert Uytterhoeven254f9c52007-05-01 22:33:07 +020058{
59 pagefault_disable();
60 return page_address(page);
61}
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070062#define kmap_atomic_prot(page, prot) __kmap_atomic(page)
Geert Uytterhoeven254f9c52007-05-01 22:33:07 +020063
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070064static inline void __kunmap_atomic(void *addr)
Andi Kleen4e60c862010-08-09 17:19:03 -070065{
66 pagefault_enable();
67}
68
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070069#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
Linus Torvalds1da177e2005-04-16 15:20:36 -070070#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
Jeremy Fitzhardingece6234b2007-05-02 19:27:15 +020071
72#define kmap_flush_unused() do {} while(0)
James Bottomleya6ca1b92006-09-25 23:30:55 -070073#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75#endif /* CONFIG_HIGHMEM */
76
Peter Zijlstraa8e23a22010-10-27 15:32:57 -070077#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
78
79DECLARE_PER_CPU(int, __kmap_atomic_idx);
80
81static inline int kmap_atomic_idx_push(void)
82{
83 int idx = __get_cpu_var(__kmap_atomic_idx)++;
84#ifdef CONFIG_DEBUG_HIGHMEM
85 WARN_ON_ONCE(in_irq() && !irqs_disabled());
86 BUG_ON(idx > KM_TYPE_NR);
87#endif
88 return idx;
89}
90
91static inline int kmap_atomic_idx_pop(void)
92{
93 int idx = --__get_cpu_var(__kmap_atomic_idx);
94#ifdef CONFIG_DEBUG_HIGHMEM
95 BUG_ON(idx < 0);
96#endif
97 return idx;
98}
99
100#endif
101
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700102/*
103 * Make both: kmap_atomic(page, idx) and kmap_atomic(page) work.
104 */
105#define kmap_atomic(page, args...) __kmap_atomic(page)
106
107/*
108 * Prevent people trying to call kunmap_atomic() as if it were kunmap()
109 * kunmap_atomic() should get the return value of kmap_atomic, not the page.
110 */
111#define kunmap_atomic(addr, args...) \
112do { \
113 BUILD_BUG_ON(__same_type((addr), struct page *)); \
114 __kunmap_atomic(addr); \
115} while (0)
Cesar Eduardo Barros597781f2010-08-09 17:18:32 -0700116
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
Russell King487ff322008-11-27 11:13:58 +0000118#ifndef clear_user_highpage
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
120{
121 void *addr = kmap_atomic(page, KM_USER0);
122 clear_user_page(addr, vaddr, page);
123 kunmap_atomic(addr, KM_USER0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124}
Russell King487ff322008-11-27 11:13:58 +0000125#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
127#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
Mel Gorman769848c2007-07-17 04:03:05 -0700128/**
129 * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
130 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
131 * @vma: The VMA the page is to be allocated for
132 * @vaddr: The virtual address the page will be inserted into
133 *
134 * This function will allocate a page for a VMA but the caller is expected
135 * to specify via movableflags whether the page will be movable in the
136 * future or not
137 *
138 * An architecture may override this function by defining
139 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
140 * implementation.
141 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142static inline struct page *
Mel Gorman769848c2007-07-17 04:03:05 -0700143__alloc_zeroed_user_highpage(gfp_t movableflags,
144 struct vm_area_struct *vma,
145 unsigned long vaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146{
Mel Gorman769848c2007-07-17 04:03:05 -0700147 struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
148 vma, vaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149
150 if (page)
151 clear_user_highpage(page, vaddr);
152
153 return page;
154}
155#endif
156
Mel Gorman769848c2007-07-17 04:03:05 -0700157/**
Mel Gorman769848c2007-07-17 04:03:05 -0700158 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
159 * @vma: The VMA the page is to be allocated for
160 * @vaddr: The virtual address the page will be inserted into
161 *
162 * This function will allocate a page for a VMA that the caller knows will
163 * be able to migrate in the future using move_pages() or reclaimed
164 */
165static inline struct page *
166alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
167 unsigned long vaddr)
168{
169 return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
170}
171
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172static inline void clear_highpage(struct page *page)
173{
174 void *kaddr = kmap_atomic(page, KM_USER0);
175 clear_page(kaddr);
176 kunmap_atomic(kaddr, KM_USER0);
177}
178
Christoph Lametereebd2aa2008-02-04 22:28:29 -0800179static inline void zero_user_segments(struct page *page,
180 unsigned start1, unsigned end1,
181 unsigned start2, unsigned end2)
182{
183 void *kaddr = kmap_atomic(page, KM_USER0);
184
185 BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
186
187 if (end1 > start1)
188 memset(kaddr + start1, 0, end1 - start1);
189
190 if (end2 > start2)
191 memset(kaddr + start2, 0, end2 - start2);
192
193 kunmap_atomic(kaddr, KM_USER0);
194 flush_dcache_page(page);
195}
196
197static inline void zero_user_segment(struct page *page,
198 unsigned start, unsigned end)
199{
200 zero_user_segments(page, start, end, 0, 0);
201}
202
203static inline void zero_user(struct page *page,
204 unsigned start, unsigned size)
205{
206 zero_user_segments(page, start, start + size, 0, 0);
207}
Nate Diller01f27052007-05-09 02:35:07 -0700208
Nate Dillerf37bc272007-05-09 02:35:09 -0700209static inline void __deprecated memclear_highpage_flush(struct page *page,
Nate Diller01f27052007-05-09 02:35:07 -0700210 unsigned int offset, unsigned int size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211{
Christoph Lametereebd2aa2008-02-04 22:28:29 -0800212 zero_user(page, offset, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213}
214
Atsushi Nemoto77fff4a2006-12-12 17:14:54 +0000215#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
216
Atsushi Nemoto9de455b2006-12-12 17:14:55 +0000217static inline void copy_user_highpage(struct page *to, struct page *from,
218 unsigned long vaddr, struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219{
220 char *vfrom, *vto;
221
222 vfrom = kmap_atomic(from, KM_USER0);
223 vto = kmap_atomic(to, KM_USER1);
224 copy_user_page(vto, vfrom, vaddr, to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 kunmap_atomic(vto, KM_USER1);
Peter Zijlstra61ecdb802010-10-26 14:21:47 -0700226 kunmap_atomic(vfrom, KM_USER0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227}
228
Atsushi Nemoto77fff4a2006-12-12 17:14:54 +0000229#endif
230
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231static inline void copy_highpage(struct page *to, struct page *from)
232{
233 char *vfrom, *vto;
234
235 vfrom = kmap_atomic(from, KM_USER0);
236 vto = kmap_atomic(to, KM_USER1);
237 copy_page(vto, vfrom);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 kunmap_atomic(vto, KM_USER1);
Peter Zijlstra61ecdb802010-10-26 14:21:47 -0700239 kunmap_atomic(vfrom, KM_USER0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240}
241
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242#endif /* _LINUX_HIGHMEM_H */