blob: 2a144e6a5047db39656e58b023744825df264576 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_HIGHMEM_H
2#define _LINUX_HIGHMEM_H
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <linux/fs.h>
Cesar Eduardo Barros597781f2010-08-09 17:18:32 -07005#include <linux/kernel.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -05006#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/mm.h>
Peter Zijlstraad76fb62006-12-06 20:32:21 -08008#include <linux/uaccess.h>
Catalin Marinas43b3a0c2010-11-11 14:05:10 -08009#include <linux/hardirq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010
11#include <asm/cacheflush.h>
12
James Bottomley03beb072006-03-26 01:36:57 -080013#ifndef ARCH_HAS_FLUSH_ANON_PAGE
Russell Kinga6f36be2006-12-30 22:24:19 +000014static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
James Bottomley03beb072006-03-26 01:36:57 -080015{
16}
17#endif
18
James Bottomley5a3a5a92006-03-26 01:36:59 -080019#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
20static inline void flush_kernel_dcache_page(struct page *page)
21{
22}
James Bottomley9df5f7412010-01-25 11:42:20 -060023static inline void flush_kernel_vmap_range(void *vaddr, int size)
24{
25}
26static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
27{
28}
James Bottomley5a3a5a92006-03-26 01:36:59 -080029#endif
30
Kumar Gala3688e072009-04-01 23:38:49 -050031#include <asm/kmap_types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Kumar Gala3688e072009-04-01 23:38:49 -050033#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <asm/highmem.h>
35
36/* declarations for linux/mm/highmem.c */
37unsigned int nr_free_highpages(void);
Christoph Lameterc1f60a52006-09-25 23:31:11 -070038extern unsigned long totalhigh_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Jeremy Fitzhardingece6234b2007-05-02 19:27:15 +020040void kmap_flush_unused(void);
41
Laura Abbott0f2a1e82013-04-09 17:07:02 -070042#ifdef CONFIG_ARCH_WANT_KMAP_ATOMIC_FLUSH
43void kmap_atomic_flush_unused(void);
44#else
45static inline void kmap_atomic_flush_unused(void) { }
46#endif
47
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#else /* CONFIG_HIGHMEM */
49
50static inline unsigned int nr_free_highpages(void) { return 0; }
51
Andreas Fenkart4b529402010-01-08 14:42:31 -080052#define totalhigh_pages 0UL
Christoph Lameterc1f60a52006-09-25 23:31:11 -070053
James Bottomleya6ca1b92006-09-25 23:30:55 -070054#ifndef ARCH_HAS_KMAP
Linus Torvalds1da177e2005-04-16 15:20:36 -070055static inline void *kmap(struct page *page)
56{
57 might_sleep();
58 return page_address(page);
59}
60
Matthew Wilcox31c91132009-06-16 15:32:45 -070061static inline void kunmap(struct page *page)
62{
63}
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
Cong Wanga24401b2011-11-26 10:53:39 +080065static inline void *kmap_atomic(struct page *page)
Geert Uytterhoeven254f9c52007-05-01 22:33:07 +020066{
67 pagefault_disable();
68 return page_address(page);
69}
Cong Wanga24401b2011-11-26 10:53:39 +080070#define kmap_atomic_prot(page, prot) kmap_atomic(page)
Geert Uytterhoeven254f9c52007-05-01 22:33:07 +020071
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070072static inline void __kunmap_atomic(void *addr)
Andi Kleen4e60c862010-08-09 17:19:03 -070073{
74 pagefault_enable();
75}
76
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070077#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
Jeremy Fitzhardingece6234b2007-05-02 19:27:15 +020079
80#define kmap_flush_unused() do {} while(0)
Laura Abbott0f2a1e82013-04-09 17:07:02 -070081#define kmap_atomic_flush_unused() do {} while (0)
James Bottomleya6ca1b92006-09-25 23:30:55 -070082#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
84#endif /* CONFIG_HIGHMEM */
85
Peter Zijlstraa8e23a22010-10-27 15:32:57 -070086#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
87
88DECLARE_PER_CPU(int, __kmap_atomic_idx);
89
90static inline int kmap_atomic_idx_push(void)
91{
Christoph Lametercfb82432010-12-06 11:40:03 -060092 int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
93
Peter Zijlstraa8e23a22010-10-27 15:32:57 -070094#ifdef CONFIG_DEBUG_HIGHMEM
95 WARN_ON_ONCE(in_irq() && !irqs_disabled());
96 BUG_ON(idx > KM_TYPE_NR);
97#endif
98 return idx;
99}
100
Peter Zijlstra20273942010-10-27 15:32:58 -0700101static inline int kmap_atomic_idx(void)
102{
Christoph Lametercfb82432010-12-06 11:40:03 -0600103 return __this_cpu_read(__kmap_atomic_idx) - 1;
Peter Zijlstra20273942010-10-27 15:32:58 -0700104}
105
Christoph Lametercfb82432010-12-06 11:40:03 -0600106static inline void kmap_atomic_idx_pop(void)
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700107{
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700108#ifdef CONFIG_DEBUG_HIGHMEM
Christoph Lametercfb82432010-12-06 11:40:03 -0600109 int idx = __this_cpu_dec_return(__kmap_atomic_idx);
110
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700111 BUG_ON(idx < 0);
Christoph Lametercfb82432010-12-06 11:40:03 -0600112#else
113 __this_cpu_dec(__kmap_atomic_idx);
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700114#endif
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700115}
116
117#endif
118
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700119/*
Cong Wang980c19e2011-11-25 22:08:45 +0800120 * NOTE:
121 * kmap_atomic() and kunmap_atomic() with two arguments are deprecated.
122 * We only keep them for backward compatibility, any usage of them
123 * are now warned.
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700124 */
Cong Wang980c19e2011-11-25 22:08:45 +0800125
126#define PASTE(a, b) a ## b
127#define PASTE2(a, b) PASTE(a, b)
128
129#define NARG_(_2, _1, n, ...) n
130#define NARG(...) NARG_(__VA_ARGS__, 2, 1, :)
131
Cong Wang980c19e2011-11-25 22:08:45 +0800132static inline void __deprecated *kmap_atomic_deprecated(struct page *page,
133 enum km_type km)
134{
Cong Wanga24401b2011-11-26 10:53:39 +0800135 return kmap_atomic(page);
Cong Wang980c19e2011-11-25 22:08:45 +0800136}
137
138#define kmap_atomic1(...) kmap_atomic(__VA_ARGS__)
139#define kmap_atomic2(...) kmap_atomic_deprecated(__VA_ARGS__)
140#define kmap_atomic(...) PASTE2(kmap_atomic, NARG(__VA_ARGS__)(__VA_ARGS__))
141
142static inline void __deprecated __kunmap_atomic_deprecated(void *addr,
143 enum km_type km)
144{
145 __kunmap_atomic(addr);
146}
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700147
148/*
149 * Prevent people trying to call kunmap_atomic() as if it were kunmap()
150 * kunmap_atomic() should get the return value of kmap_atomic, not the page.
151 */
Cong Wang980c19e2011-11-25 22:08:45 +0800152#define kunmap_atomic_deprecated(addr, km) \
153do { \
154 BUILD_BUG_ON(__same_type((addr), struct page *)); \
155 __kunmap_atomic_deprecated(addr, km); \
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700156} while (0)
Cesar Eduardo Barros597781f2010-08-09 17:18:32 -0700157
Cong Wang980c19e2011-11-25 22:08:45 +0800158#define kunmap_atomic_withcheck(addr) \
159do { \
160 BUILD_BUG_ON(__same_type((addr), struct page *)); \
161 __kunmap_atomic(addr); \
162} while (0)
163
164#define kunmap_atomic1(...) kunmap_atomic_withcheck(__VA_ARGS__)
165#define kunmap_atomic2(...) kunmap_atomic_deprecated(__VA_ARGS__)
166#define kunmap_atomic(...) PASTE2(kunmap_atomic, NARG(__VA_ARGS__)(__VA_ARGS__))
167/**** End of C pre-processor tricks for deprecated macros ****/
168
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
Russell King487ff322008-11-27 11:13:58 +0000170#ifndef clear_user_highpage
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
172{
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800173 void *addr = kmap_atomic(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 clear_user_page(addr, vaddr, page);
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800175 kunmap_atomic(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176}
Russell King487ff322008-11-27 11:13:58 +0000177#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178
179#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
Mel Gorman769848c2007-07-17 04:03:05 -0700180/**
181 * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
182 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
183 * @vma: The VMA the page is to be allocated for
184 * @vaddr: The virtual address the page will be inserted into
185 *
186 * This function will allocate a page for a VMA but the caller is expected
187 * to specify via movableflags whether the page will be movable in the
188 * future or not
189 *
190 * An architecture may override this function by defining
191 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
192 * implementation.
193 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194static inline struct page *
Mel Gorman769848c2007-07-17 04:03:05 -0700195__alloc_zeroed_user_highpage(gfp_t movableflags,
196 struct vm_area_struct *vma,
197 unsigned long vaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198{
Mel Gorman769848c2007-07-17 04:03:05 -0700199 struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
200 vma, vaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
202 if (page)
203 clear_user_highpage(page, vaddr);
204
205 return page;
206}
207#endif
208
Mel Gorman769848c2007-07-17 04:03:05 -0700209/**
Mel Gorman769848c2007-07-17 04:03:05 -0700210 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
211 * @vma: The VMA the page is to be allocated for
212 * @vaddr: The virtual address the page will be inserted into
213 *
214 * This function will allocate a page for a VMA that the caller knows will
215 * be able to migrate in the future using move_pages() or reclaimed
216 */
217static inline struct page *
218alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
219 unsigned long vaddr)
220{
Heesub Shin771aaa62013-01-07 11:10:13 +0900221#ifndef CONFIG_CMA
Mel Gorman769848c2007-07-17 04:03:05 -0700222 return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
Heesub Shin771aaa62013-01-07 11:10:13 +0900223#else
224 return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma,
225 vaddr);
226#endif
Mel Gorman769848c2007-07-17 04:03:05 -0700227}
228
Heesub Shin771aaa62013-01-07 11:10:13 +0900229#ifdef CONFIG_CMA
230static inline struct page *
231alloc_zeroed_user_highpage_movable_cma(struct vm_area_struct *vma,
232 unsigned long vaddr)
233{
234 return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma,
235 vaddr);
236}
237#endif
238
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239static inline void clear_highpage(struct page *page)
240{
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800241 void *kaddr = kmap_atomic(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 clear_page(kaddr);
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800243 kunmap_atomic(kaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244}
245
Christoph Lametereebd2aa2008-02-04 22:28:29 -0800246static inline void zero_user_segments(struct page *page,
247 unsigned start1, unsigned end1,
248 unsigned start2, unsigned end2)
249{
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800250 void *kaddr = kmap_atomic(page);
Christoph Lametereebd2aa2008-02-04 22:28:29 -0800251
252 BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
253
254 if (end1 > start1)
255 memset(kaddr + start1, 0, end1 - start1);
256
257 if (end2 > start2)
258 memset(kaddr + start2, 0, end2 - start2);
259
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800260 kunmap_atomic(kaddr);
Christoph Lametereebd2aa2008-02-04 22:28:29 -0800261 flush_dcache_page(page);
262}
263
264static inline void zero_user_segment(struct page *page,
265 unsigned start, unsigned end)
266{
267 zero_user_segments(page, start, end, 0, 0);
268}
269
270static inline void zero_user(struct page *page,
271 unsigned start, unsigned size)
272{
273 zero_user_segments(page, start, start + size, 0, 0);
274}
Nate Diller01f27052007-05-09 02:35:07 -0700275
Nate Dillerf37bc272007-05-09 02:35:09 -0700276static inline void __deprecated memclear_highpage_flush(struct page *page,
Nate Diller01f27052007-05-09 02:35:07 -0700277 unsigned int offset, unsigned int size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278{
Christoph Lametereebd2aa2008-02-04 22:28:29 -0800279 zero_user(page, offset, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280}
281
Atsushi Nemoto77fff4a2006-12-12 17:14:54 +0000282#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
283
Atsushi Nemoto9de455b2006-12-12 17:14:55 +0000284static inline void copy_user_highpage(struct page *to, struct page *from,
285 unsigned long vaddr, struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286{
287 char *vfrom, *vto;
288
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800289 vfrom = kmap_atomic(from);
290 vto = kmap_atomic(to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 copy_user_page(vto, vfrom, vaddr, to);
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800292 kunmap_atomic(vto);
293 kunmap_atomic(vfrom);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294}
295
Atsushi Nemoto77fff4a2006-12-12 17:14:54 +0000296#endif
297
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298static inline void copy_highpage(struct page *to, struct page *from)
299{
300 char *vfrom, *vto;
301
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800302 vfrom = kmap_atomic(from);
303 vto = kmap_atomic(to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 copy_page(vto, vfrom);
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800305 kunmap_atomic(vto);
306 kunmap_atomic(vfrom);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307}
308
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309#endif /* _LINUX_HIGHMEM_H */