blob: 657b56524a8a4db19b5fe76f5dd45c5067964952 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_HIGHMEM_H
2#define _LINUX_HIGHMEM_H
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <linux/fs.h>
Cesar Eduardo Barros597781f2010-08-09 17:18:32 -07005#include <linux/kernel.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -05006#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/mm.h>
Peter Zijlstraad76fb62006-12-06 20:32:21 -08008#include <linux/uaccess.h>
Catalin Marinas43b3a0c2010-11-11 14:05:10 -08009#include <linux/hardirq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010
11#include <asm/cacheflush.h>
12
James Bottomley03beb072006-03-26 01:36:57 -080013#ifndef ARCH_HAS_FLUSH_ANON_PAGE
Russell Kinga6f36be2006-12-30 22:24:19 +000014static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
James Bottomley03beb072006-03-26 01:36:57 -080015{
16}
17#endif
18
James Bottomley5a3a5a92006-03-26 01:36:59 -080019#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
20static inline void flush_kernel_dcache_page(struct page *page)
21{
22}
James Bottomley9df5f7412010-01-25 11:42:20 -060023static inline void flush_kernel_vmap_range(void *vaddr, int size)
24{
25}
26static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
27{
28}
James Bottomley5a3a5a92006-03-26 01:36:59 -080029#endif
30
Kumar Gala3688e072009-04-01 23:38:49 -050031#include <asm/kmap_types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Kumar Gala3688e072009-04-01 23:38:49 -050033#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <asm/highmem.h>
35
36/* declarations for linux/mm/highmem.c */
37unsigned int nr_free_highpages(void);
Christoph Lameterc1f60a52006-09-25 23:31:11 -070038extern unsigned long totalhigh_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Jeremy Fitzhardingece6234b2007-05-02 19:27:15 +020040void kmap_flush_unused(void);
41
Laura Abbott27411242013-04-09 17:07:02 -070042#ifdef CONFIG_ARCH_WANT_KMAP_ATOMIC_FLUSH
43void kmap_atomic_flush_unused(void);
Maria Yue90f8e12017-09-21 11:30:53 +080044int kmap_remove_unused_cpu(unsigned int cpu);
Laura Abbott27411242013-04-09 17:07:02 -070045#else
46static inline void kmap_atomic_flush_unused(void) { }
47#endif
48
Mel Gorman5a178112012-07-31 16:45:02 -070049struct page *kmap_to_page(void *addr);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#else /* CONFIG_HIGHMEM */
52
53static inline unsigned int nr_free_highpages(void) { return 0; }
54
Mel Gorman5a178112012-07-31 16:45:02 -070055static inline struct page *kmap_to_page(void *addr)
56{
57 return virt_to_page(addr);
58}
59
Andreas Fenkart4b529402010-01-08 14:42:31 -080060#define totalhigh_pages 0UL
Christoph Lameterc1f60a52006-09-25 23:31:11 -070061
James Bottomleya6ca1b92006-09-25 23:30:55 -070062#ifndef ARCH_HAS_KMAP
Linus Torvalds1da177e2005-04-16 15:20:36 -070063static inline void *kmap(struct page *page)
64{
65 might_sleep();
66 return page_address(page);
67}
68
Matthew Wilcox31c91132009-06-16 15:32:45 -070069static inline void kunmap(struct page *page)
70{
71}
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
Cong Wanga24401b2011-11-26 10:53:39 +080073static inline void *kmap_atomic(struct page *page)
Geert Uytterhoeven254f9c52007-05-01 22:33:07 +020074{
David Hildenbrand2cb7c9c2015-05-11 17:52:09 +020075 preempt_disable();
Geert Uytterhoeven254f9c52007-05-01 22:33:07 +020076 pagefault_disable();
77 return page_address(page);
78}
Cong Wanga24401b2011-11-26 10:53:39 +080079#define kmap_atomic_prot(page, prot) kmap_atomic(page)
Geert Uytterhoeven254f9c52007-05-01 22:33:07 +020080
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070081static inline void __kunmap_atomic(void *addr)
Andi Kleen4e60c862010-08-09 17:19:03 -070082{
83 pagefault_enable();
David Hildenbrand2cb7c9c2015-05-11 17:52:09 +020084 preempt_enable();
Andi Kleen4e60c862010-08-09 17:19:03 -070085}
86
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070087#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
Jeremy Fitzhardingece6234b2007-05-02 19:27:15 +020088
89#define kmap_flush_unused() do {} while(0)
Laura Abbott27411242013-04-09 17:07:02 -070090#define kmap_atomic_flush_unused() do {} while (0)
James Bottomleya6ca1b92006-09-25 23:30:55 -070091#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070092
93#endif /* CONFIG_HIGHMEM */
94
Maria Yue90f8e12017-09-21 11:30:53 +080095#if !defined(CONFIG_HIGHMEM) || !defined(CONFIG_ARCH_WANT_KMAP_ATOMIC_FLUSH)
96static inline int kmap_remove_unused_cpu(unsigned int cpu) { return 0; }
97#endif
98
Peter Zijlstraa8e23a22010-10-27 15:32:57 -070099#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
100
101DECLARE_PER_CPU(int, __kmap_atomic_idx);
102
103static inline int kmap_atomic_idx_push(void)
104{
Christoph Lametercfb82432010-12-06 11:40:03 -0600105 int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
106
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700107#ifdef CONFIG_DEBUG_HIGHMEM
108 WARN_ON_ONCE(in_irq() && !irqs_disabled());
Chintan Pandya1d352bf2014-08-06 16:08:18 -0700109 BUG_ON(idx >= KM_TYPE_NR);
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700110#endif
111 return idx;
112}
113
Peter Zijlstra20273942010-10-27 15:32:58 -0700114static inline int kmap_atomic_idx(void)
115{
Christoph Lametercfb82432010-12-06 11:40:03 -0600116 return __this_cpu_read(__kmap_atomic_idx) - 1;
Peter Zijlstra20273942010-10-27 15:32:58 -0700117}
118
Christoph Lametercfb82432010-12-06 11:40:03 -0600119static inline void kmap_atomic_idx_pop(void)
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700120{
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700121#ifdef CONFIG_DEBUG_HIGHMEM
Christoph Lametercfb82432010-12-06 11:40:03 -0600122 int idx = __this_cpu_dec_return(__kmap_atomic_idx);
123
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700124 BUG_ON(idx < 0);
Christoph Lametercfb82432010-12-06 11:40:03 -0600125#else
126 __this_cpu_dec(__kmap_atomic_idx);
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700127#endif
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700128}
129
130#endif
131
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700132/*
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700133 * Prevent people trying to call kunmap_atomic() as if it were kunmap()
134 * kunmap_atomic() should get the return value of kmap_atomic, not the page.
135 */
Cong Wang1285e4c2012-06-22 23:17:53 +0800136#define kunmap_atomic(addr) \
Cong Wang980c19e2011-11-25 22:08:45 +0800137do { \
138 BUILD_BUG_ON(__same_type((addr), struct page *)); \
139 __kunmap_atomic(addr); \
140} while (0)
141
Cong Wang980c19e2011-11-25 22:08:45 +0800142
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
Russell King487ff322008-11-27 11:13:58 +0000144#ifndef clear_user_highpage
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
146{
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800147 void *addr = kmap_atomic(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 clear_user_page(addr, vaddr, page);
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800149 kunmap_atomic(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150}
Russell King487ff322008-11-27 11:13:58 +0000151#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
153#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
Mel Gorman769848c2007-07-17 04:03:05 -0700154/**
155 * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
156 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
157 * @vma: The VMA the page is to be allocated for
158 * @vaddr: The virtual address the page will be inserted into
159 *
160 * This function will allocate a page for a VMA but the caller is expected
161 * to specify via movableflags whether the page will be movable in the
162 * future or not
163 *
164 * An architecture may override this function by defining
165 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
166 * implementation.
167 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168static inline struct page *
Mel Gorman769848c2007-07-17 04:03:05 -0700169__alloc_zeroed_user_highpage(gfp_t movableflags,
170 struct vm_area_struct *vma,
171 unsigned long vaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172{
Mel Gorman769848c2007-07-17 04:03:05 -0700173 struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
174 vma, vaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175
176 if (page)
177 clear_user_highpage(page, vaddr);
178
179 return page;
180}
181#endif
182
Mel Gorman769848c2007-07-17 04:03:05 -0700183/**
Mel Gorman769848c2007-07-17 04:03:05 -0700184 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
185 * @vma: The VMA the page is to be allocated for
186 * @vaddr: The virtual address the page will be inserted into
187 *
188 * This function will allocate a page for a VMA that the caller knows will
189 * be able to migrate in the future using move_pages() or reclaimed
190 */
191static inline struct page *
192alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
193 unsigned long vaddr)
194{
Heesub Shin483242b2013-01-07 11:10:13 +0900195#ifndef CONFIG_CMA
Mel Gorman769848c2007-07-17 04:03:05 -0700196 return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
Heesub Shin483242b2013-01-07 11:10:13 +0900197#else
198 return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma,
199 vaddr);
200#endif
Mel Gorman769848c2007-07-17 04:03:05 -0700201}
202
Heesub Shin483242b2013-01-07 11:10:13 +0900203#ifdef CONFIG_CMA
204static inline struct page *
205alloc_zeroed_user_highpage_movable_cma(struct vm_area_struct *vma,
206 unsigned long vaddr)
207{
208 return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma,
209 vaddr);
210}
211#endif
212
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213static inline void clear_highpage(struct page *page)
214{
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800215 void *kaddr = kmap_atomic(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 clear_page(kaddr);
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800217 kunmap_atomic(kaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218}
219
Christoph Lametereebd2aa2008-02-04 22:28:29 -0800220static inline void zero_user_segments(struct page *page,
221 unsigned start1, unsigned end1,
222 unsigned start2, unsigned end2)
223{
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800224 void *kaddr = kmap_atomic(page);
Christoph Lametereebd2aa2008-02-04 22:28:29 -0800225
226 BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
227
228 if (end1 > start1)
229 memset(kaddr + start1, 0, end1 - start1);
230
231 if (end2 > start2)
232 memset(kaddr + start2, 0, end2 - start2);
233
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800234 kunmap_atomic(kaddr);
Christoph Lametereebd2aa2008-02-04 22:28:29 -0800235 flush_dcache_page(page);
236}
237
238static inline void zero_user_segment(struct page *page,
239 unsigned start, unsigned end)
240{
241 zero_user_segments(page, start, end, 0, 0);
242}
243
244static inline void zero_user(struct page *page,
245 unsigned start, unsigned size)
246{
247 zero_user_segments(page, start, start + size, 0, 0);
248}
Nate Diller01f27052007-05-09 02:35:07 -0700249
Atsushi Nemoto77fff4a2006-12-12 17:14:54 +0000250#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
251
Atsushi Nemoto9de455b2006-12-12 17:14:55 +0000252static inline void copy_user_highpage(struct page *to, struct page *from,
253 unsigned long vaddr, struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254{
255 char *vfrom, *vto;
256
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800257 vfrom = kmap_atomic(from);
258 vto = kmap_atomic(to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 copy_user_page(vto, vfrom, vaddr, to);
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800260 kunmap_atomic(vto);
261 kunmap_atomic(vfrom);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262}
263
Atsushi Nemoto77fff4a2006-12-12 17:14:54 +0000264#endif
265
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266static inline void copy_highpage(struct page *to, struct page *from)
267{
268 char *vfrom, *vto;
269
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800270 vfrom = kmap_atomic(from);
271 vto = kmap_atomic(to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 copy_page(vto, vfrom);
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800273 kunmap_atomic(vto);
274 kunmap_atomic(vfrom);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275}
276
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277#endif /* _LINUX_HIGHMEM_H */