blob: 4c70716759a6a143f2c8922d3a3030e47a989317 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_HIGHMEM_H
2#define _LINUX_HIGHMEM_H
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <linux/fs.h>
Cesar Eduardo Barros597781f2010-08-09 17:18:32 -07005#include <linux/kernel.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -05006#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/mm.h>
Peter Zijlstraad76fb62006-12-06 20:32:21 -08008#include <linux/uaccess.h>
Catalin Marinas43b3a0c2010-11-11 14:05:10 -08009#include <linux/hardirq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010
11#include <asm/cacheflush.h>
12
James Bottomley03beb072006-03-26 01:36:57 -080013#ifndef ARCH_HAS_FLUSH_ANON_PAGE
Russell Kinga6f36be2006-12-30 22:24:19 +000014static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
James Bottomley03beb072006-03-26 01:36:57 -080015{
16}
17#endif
18
James Bottomley5a3a5a92006-03-26 01:36:59 -080019#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
20static inline void flush_kernel_dcache_page(struct page *page)
21{
22}
James Bottomley9df5f7412010-01-25 11:42:20 -060023static inline void flush_kernel_vmap_range(void *vaddr, int size)
24{
25}
26static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
27{
28}
James Bottomley5a3a5a92006-03-26 01:36:59 -080029#endif
30
Kumar Gala3688e072009-04-01 23:38:49 -050031#include <asm/kmap_types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Kumar Gala3688e072009-04-01 23:38:49 -050033#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <asm/highmem.h>
35
36/* declarations for linux/mm/highmem.c */
37unsigned int nr_free_highpages(void);
Christoph Lameterc1f60a52006-09-25 23:31:11 -070038extern unsigned long totalhigh_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Jeremy Fitzhardingece6234b2007-05-02 19:27:15 +020040void kmap_flush_unused(void);
41
Laura Abbott27411242013-04-09 17:07:02 -070042#ifdef CONFIG_ARCH_WANT_KMAP_ATOMIC_FLUSH
43void kmap_atomic_flush_unused(void);
44#else
45static inline void kmap_atomic_flush_unused(void) { }
46#endif
47
Mel Gorman5a178112012-07-31 16:45:02 -070048struct page *kmap_to_page(void *addr);
49
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#else /* CONFIG_HIGHMEM */
51
52static inline unsigned int nr_free_highpages(void) { return 0; }
53
Mel Gorman5a178112012-07-31 16:45:02 -070054static inline struct page *kmap_to_page(void *addr)
55{
56 return virt_to_page(addr);
57}
58
Andreas Fenkart4b529402010-01-08 14:42:31 -080059#define totalhigh_pages 0UL
Christoph Lameterc1f60a52006-09-25 23:31:11 -070060
James Bottomleya6ca1b92006-09-25 23:30:55 -070061#ifndef ARCH_HAS_KMAP
Linus Torvalds1da177e2005-04-16 15:20:36 -070062static inline void *kmap(struct page *page)
63{
64 might_sleep();
65 return page_address(page);
66}
67
Matthew Wilcox31c91132009-06-16 15:32:45 -070068static inline void kunmap(struct page *page)
69{
70}
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
Cong Wanga24401b2011-11-26 10:53:39 +080072static inline void *kmap_atomic(struct page *page)
Geert Uytterhoeven254f9c52007-05-01 22:33:07 +020073{
David Hildenbrand2cb7c9c2015-05-11 17:52:09 +020074 preempt_disable();
Geert Uytterhoeven254f9c52007-05-01 22:33:07 +020075 pagefault_disable();
76 return page_address(page);
77}
Cong Wanga24401b2011-11-26 10:53:39 +080078#define kmap_atomic_prot(page, prot) kmap_atomic(page)
Geert Uytterhoeven254f9c52007-05-01 22:33:07 +020079
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070080static inline void __kunmap_atomic(void *addr)
Andi Kleen4e60c862010-08-09 17:19:03 -070081{
82 pagefault_enable();
David Hildenbrand2cb7c9c2015-05-11 17:52:09 +020083 preempt_enable();
Andi Kleen4e60c862010-08-09 17:19:03 -070084}
85
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070086#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
Jeremy Fitzhardingece6234b2007-05-02 19:27:15 +020087
88#define kmap_flush_unused() do {} while(0)
Laura Abbott27411242013-04-09 17:07:02 -070089#define kmap_atomic_flush_unused() do {} while (0)
James Bottomleya6ca1b92006-09-25 23:30:55 -070090#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
92#endif /* CONFIG_HIGHMEM */
93
Peter Zijlstraa8e23a22010-10-27 15:32:57 -070094#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
95
96DECLARE_PER_CPU(int, __kmap_atomic_idx);
97
98static inline int kmap_atomic_idx_push(void)
99{
Christoph Lametercfb82432010-12-06 11:40:03 -0600100 int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
101
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700102#ifdef CONFIG_DEBUG_HIGHMEM
103 WARN_ON_ONCE(in_irq() && !irqs_disabled());
Chintan Pandya1d352bf2014-08-06 16:08:18 -0700104 BUG_ON(idx >= KM_TYPE_NR);
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700105#endif
106 return idx;
107}
108
Peter Zijlstra20273942010-10-27 15:32:58 -0700109static inline int kmap_atomic_idx(void)
110{
Christoph Lametercfb82432010-12-06 11:40:03 -0600111 return __this_cpu_read(__kmap_atomic_idx) - 1;
Peter Zijlstra20273942010-10-27 15:32:58 -0700112}
113
Christoph Lametercfb82432010-12-06 11:40:03 -0600114static inline void kmap_atomic_idx_pop(void)
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700115{
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700116#ifdef CONFIG_DEBUG_HIGHMEM
Christoph Lametercfb82432010-12-06 11:40:03 -0600117 int idx = __this_cpu_dec_return(__kmap_atomic_idx);
118
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700119 BUG_ON(idx < 0);
Christoph Lametercfb82432010-12-06 11:40:03 -0600120#else
121 __this_cpu_dec(__kmap_atomic_idx);
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700122#endif
Peter Zijlstraa8e23a22010-10-27 15:32:57 -0700123}
124
125#endif
126
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700127/*
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700128 * Prevent people trying to call kunmap_atomic() as if it were kunmap()
129 * kunmap_atomic() should get the return value of kmap_atomic, not the page.
130 */
Cong Wang1285e4c2012-06-22 23:17:53 +0800131#define kunmap_atomic(addr) \
Cong Wang980c19e2011-11-25 22:08:45 +0800132do { \
133 BUILD_BUG_ON(__same_type((addr), struct page *)); \
134 __kunmap_atomic(addr); \
135} while (0)
136
Cong Wang980c19e2011-11-25 22:08:45 +0800137
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
Russell King487ff322008-11-27 11:13:58 +0000139#ifndef clear_user_highpage
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
141{
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800142 void *addr = kmap_atomic(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 clear_user_page(addr, vaddr, page);
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800144 kunmap_atomic(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145}
Russell King487ff322008-11-27 11:13:58 +0000146#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
148#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
Mel Gorman769848c2007-07-17 04:03:05 -0700149/**
150 * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
151 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
152 * @vma: The VMA the page is to be allocated for
153 * @vaddr: The virtual address the page will be inserted into
154 *
155 * This function will allocate a page for a VMA but the caller is expected
156 * to specify via movableflags whether the page will be movable in the
157 * future or not
158 *
159 * An architecture may override this function by defining
160 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
161 * implementation.
162 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163static inline struct page *
Mel Gorman769848c2007-07-17 04:03:05 -0700164__alloc_zeroed_user_highpage(gfp_t movableflags,
165 struct vm_area_struct *vma,
166 unsigned long vaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167{
Mel Gorman769848c2007-07-17 04:03:05 -0700168 struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
169 vma, vaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
171 if (page)
172 clear_user_highpage(page, vaddr);
173
174 return page;
175}
176#endif
177
Mel Gorman769848c2007-07-17 04:03:05 -0700178/**
Mel Gorman769848c2007-07-17 04:03:05 -0700179 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
180 * @vma: The VMA the page is to be allocated for
181 * @vaddr: The virtual address the page will be inserted into
182 *
183 * This function will allocate a page for a VMA that the caller knows will
184 * be able to migrate in the future using move_pages() or reclaimed
185 */
186static inline struct page *
187alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
188 unsigned long vaddr)
189{
190 return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
191}
192
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193static inline void clear_highpage(struct page *page)
194{
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800195 void *kaddr = kmap_atomic(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 clear_page(kaddr);
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800197 kunmap_atomic(kaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198}
199
Christoph Lametereebd2aa2008-02-04 22:28:29 -0800200static inline void zero_user_segments(struct page *page,
201 unsigned start1, unsigned end1,
202 unsigned start2, unsigned end2)
203{
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800204 void *kaddr = kmap_atomic(page);
Christoph Lametereebd2aa2008-02-04 22:28:29 -0800205
206 BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
207
208 if (end1 > start1)
209 memset(kaddr + start1, 0, end1 - start1);
210
211 if (end2 > start2)
212 memset(kaddr + start2, 0, end2 - start2);
213
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800214 kunmap_atomic(kaddr);
Christoph Lametereebd2aa2008-02-04 22:28:29 -0800215 flush_dcache_page(page);
216}
217
218static inline void zero_user_segment(struct page *page,
219 unsigned start, unsigned end)
220{
221 zero_user_segments(page, start, end, 0, 0);
222}
223
224static inline void zero_user(struct page *page,
225 unsigned start, unsigned size)
226{
227 zero_user_segments(page, start, start + size, 0, 0);
228}
Nate Diller01f27052007-05-09 02:35:07 -0700229
Atsushi Nemoto77fff4a2006-12-12 17:14:54 +0000230#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
231
Atsushi Nemoto9de455b2006-12-12 17:14:55 +0000232static inline void copy_user_highpage(struct page *to, struct page *from,
233 unsigned long vaddr, struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234{
235 char *vfrom, *vto;
236
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800237 vfrom = kmap_atomic(from);
238 vto = kmap_atomic(to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 copy_user_page(vto, vfrom, vaddr, to);
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800240 kunmap_atomic(vto);
241 kunmap_atomic(vfrom);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242}
243
Atsushi Nemoto77fff4a2006-12-12 17:14:54 +0000244#endif
245
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246static inline void copy_highpage(struct page *to, struct page *from)
247{
248 char *vfrom, *vto;
249
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800250 vfrom = kmap_atomic(from);
251 vto = kmap_atomic(to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 copy_page(vto, vfrom);
Cong Wang1ec9c5d2011-11-25 23:14:14 +0800253 kunmap_atomic(vto);
254 kunmap_atomic(vfrom);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255}
256
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257#endif /* _LINUX_HIGHMEM_H */