Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_HIGHMEM_H |
| 2 | #define _LINUX_HIGHMEM_H |
| 3 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | #include <linux/fs.h> |
Cesar Eduardo Barros | 597781f | 2010-08-09 17:18:32 -0700 | [diff] [blame] | 5 | #include <linux/kernel.h> |
Paul Gortmaker | 187f188 | 2011-11-23 20:12:59 -0500 | [diff] [blame] | 6 | #include <linux/bug.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <linux/mm.h> |
Peter Zijlstra | ad76fb6 | 2006-12-06 20:32:21 -0800 | [diff] [blame] | 8 | #include <linux/uaccess.h> |
Catalin Marinas | 43b3a0c | 2010-11-11 14:05:10 -0800 | [diff] [blame] | 9 | #include <linux/hardirq.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | |
| 11 | #include <asm/cacheflush.h> |
| 12 | |
James Bottomley | 03beb07 | 2006-03-26 01:36:57 -0800 | [diff] [blame] | 13 | #ifndef ARCH_HAS_FLUSH_ANON_PAGE |
Russell King | a6f36be | 2006-12-30 22:24:19 +0000 | [diff] [blame] | 14 | static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) |
James Bottomley | 03beb07 | 2006-03-26 01:36:57 -0800 | [diff] [blame] | 15 | { |
| 16 | } |
| 17 | #endif |
| 18 | |
James Bottomley | 5a3a5a9 | 2006-03-26 01:36:59 -0800 | [diff] [blame] | 19 | #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE |
| 20 | static inline void flush_kernel_dcache_page(struct page *page) |
| 21 | { |
| 22 | } |
James Bottomley | 9df5f741 | 2010-01-25 11:42:20 -0600 | [diff] [blame] | 23 | static inline void flush_kernel_vmap_range(void *vaddr, int size) |
| 24 | { |
| 25 | } |
| 26 | static inline void invalidate_kernel_vmap_range(void *vaddr, int size) |
| 27 | { |
| 28 | } |
James Bottomley | 5a3a5a9 | 2006-03-26 01:36:59 -0800 | [diff] [blame] | 29 | #endif |
| 30 | |
Kumar Gala | 3688e07 | 2009-04-01 23:38:49 -0500 | [diff] [blame] | 31 | #include <asm/kmap_types.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | |
Kumar Gala | 3688e07 | 2009-04-01 23:38:49 -0500 | [diff] [blame] | 33 | #ifdef CONFIG_HIGHMEM |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | #include <asm/highmem.h> |
| 35 | |
| 36 | /* declarations for linux/mm/highmem.c */ |
| 37 | unsigned int nr_free_highpages(void); |
Christoph Lameter | c1f60a5 | 2006-09-25 23:31:11 -0700 | [diff] [blame] | 38 | extern unsigned long totalhigh_pages; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | |
Jeremy Fitzhardinge | ce6234b | 2007-05-02 19:27:15 +0200 | [diff] [blame] | 40 | void kmap_flush_unused(void); |
| 41 | |
Mel Gorman | 5a17811 | 2012-07-31 16:45:02 -0700 | [diff] [blame] | 42 | struct page *kmap_to_page(void *addr); |
| 43 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | #else /* CONFIG_HIGHMEM */ |
| 45 | |
| 46 | static inline unsigned int nr_free_highpages(void) { return 0; } |
| 47 | |
Mel Gorman | 5a17811 | 2012-07-31 16:45:02 -0700 | [diff] [blame] | 48 | static inline struct page *kmap_to_page(void *addr) |
| 49 | { |
| 50 | return virt_to_page(addr); |
| 51 | } |
| 52 | |
Andreas Fenkart | 4b52940 | 2010-01-08 14:42:31 -0800 | [diff] [blame] | 53 | #define totalhigh_pages 0UL |
Christoph Lameter | c1f60a5 | 2006-09-25 23:31:11 -0700 | [diff] [blame] | 54 | |
James Bottomley | a6ca1b9 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 55 | #ifndef ARCH_HAS_KMAP |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | static inline void *kmap(struct page *page) |
| 57 | { |
| 58 | might_sleep(); |
| 59 | return page_address(page); |
| 60 | } |
| 61 | |
Matthew Wilcox | 31c9113 | 2009-06-16 15:32:45 -0700 | [diff] [blame] | 62 | static inline void kunmap(struct page *page) |
| 63 | { |
| 64 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | |
Cong Wang | a24401b | 2011-11-26 10:53:39 +0800 | [diff] [blame] | 66 | static inline void *kmap_atomic(struct page *page) |
Geert Uytterhoeven | 254f9c5 | 2007-05-01 22:33:07 +0200 | [diff] [blame] | 67 | { |
David Hildenbrand | 2cb7c9c | 2015-05-11 17:52:09 +0200 | [diff] [blame] | 68 | preempt_disable(); |
Geert Uytterhoeven | 254f9c5 | 2007-05-01 22:33:07 +0200 | [diff] [blame] | 69 | pagefault_disable(); |
| 70 | return page_address(page); |
| 71 | } |
Cong Wang | a24401b | 2011-11-26 10:53:39 +0800 | [diff] [blame] | 72 | #define kmap_atomic_prot(page, prot) kmap_atomic(page) |
Geert Uytterhoeven | 254f9c5 | 2007-05-01 22:33:07 +0200 | [diff] [blame] | 73 | |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 74 | static inline void __kunmap_atomic(void *addr) |
Andi Kleen | 4e60c86 | 2010-08-09 17:19:03 -0700 | [diff] [blame] | 75 | { |
| 76 | pagefault_enable(); |
David Hildenbrand | 2cb7c9c | 2015-05-11 17:52:09 +0200 | [diff] [blame] | 77 | preempt_enable(); |
Andi Kleen | 4e60c86 | 2010-08-09 17:19:03 -0700 | [diff] [blame] | 78 | } |
| 79 | |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 80 | #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) |
Jeremy Fitzhardinge | ce6234b | 2007-05-02 19:27:15 +0200 | [diff] [blame] | 81 | |
| 82 | #define kmap_flush_unused() do {} while(0) |
James Bottomley | a6ca1b9 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 83 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | |
| 85 | #endif /* CONFIG_HIGHMEM */ |
| 86 | |
Peter Zijlstra | a8e23a2 | 2010-10-27 15:32:57 -0700 | [diff] [blame] | 87 | #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) |
| 88 | |
| 89 | DECLARE_PER_CPU(int, __kmap_atomic_idx); |
| 90 | |
| 91 | static inline int kmap_atomic_idx_push(void) |
| 92 | { |
Christoph Lameter | cfb8243 | 2010-12-06 11:40:03 -0600 | [diff] [blame] | 93 | int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1; |
| 94 | |
Peter Zijlstra | a8e23a2 | 2010-10-27 15:32:57 -0700 | [diff] [blame] | 95 | #ifdef CONFIG_DEBUG_HIGHMEM |
| 96 | WARN_ON_ONCE(in_irq() && !irqs_disabled()); |
Chintan Pandya | 1d352bf | 2014-08-06 16:08:18 -0700 | [diff] [blame] | 97 | BUG_ON(idx >= KM_TYPE_NR); |
Peter Zijlstra | a8e23a2 | 2010-10-27 15:32:57 -0700 | [diff] [blame] | 98 | #endif |
| 99 | return idx; |
| 100 | } |
| 101 | |
Peter Zijlstra | 2027394 | 2010-10-27 15:32:58 -0700 | [diff] [blame] | 102 | static inline int kmap_atomic_idx(void) |
| 103 | { |
Christoph Lameter | cfb8243 | 2010-12-06 11:40:03 -0600 | [diff] [blame] | 104 | return __this_cpu_read(__kmap_atomic_idx) - 1; |
Peter Zijlstra | 2027394 | 2010-10-27 15:32:58 -0700 | [diff] [blame] | 105 | } |
| 106 | |
Christoph Lameter | cfb8243 | 2010-12-06 11:40:03 -0600 | [diff] [blame] | 107 | static inline void kmap_atomic_idx_pop(void) |
Peter Zijlstra | a8e23a2 | 2010-10-27 15:32:57 -0700 | [diff] [blame] | 108 | { |
Peter Zijlstra | a8e23a2 | 2010-10-27 15:32:57 -0700 | [diff] [blame] | 109 | #ifdef CONFIG_DEBUG_HIGHMEM |
Christoph Lameter | cfb8243 | 2010-12-06 11:40:03 -0600 | [diff] [blame] | 110 | int idx = __this_cpu_dec_return(__kmap_atomic_idx); |
| 111 | |
Peter Zijlstra | a8e23a2 | 2010-10-27 15:32:57 -0700 | [diff] [blame] | 112 | BUG_ON(idx < 0); |
Christoph Lameter | cfb8243 | 2010-12-06 11:40:03 -0600 | [diff] [blame] | 113 | #else |
| 114 | __this_cpu_dec(__kmap_atomic_idx); |
Peter Zijlstra | a8e23a2 | 2010-10-27 15:32:57 -0700 | [diff] [blame] | 115 | #endif |
Peter Zijlstra | a8e23a2 | 2010-10-27 15:32:57 -0700 | [diff] [blame] | 116 | } |
| 117 | |
| 118 | #endif |
| 119 | |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 120 | /* |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 121 | * Prevent people trying to call kunmap_atomic() as if it were kunmap() |
| 122 | * kunmap_atomic() should get the return value of kmap_atomic, not the page. |
| 123 | */ |
Cong Wang | 1285e4c | 2012-06-22 23:17:53 +0800 | [diff] [blame] | 124 | #define kunmap_atomic(addr) \ |
Cong Wang | 980c19e | 2011-11-25 22:08:45 +0800 | [diff] [blame] | 125 | do { \ |
| 126 | BUILD_BUG_ON(__same_type((addr), struct page *)); \ |
| 127 | __kunmap_atomic(addr); \ |
| 128 | } while (0) |
| 129 | |
Cong Wang | 980c19e | 2011-11-25 22:08:45 +0800 | [diff] [blame] | 130 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ |
Russell King | 487ff32 | 2008-11-27 11:13:58 +0000 | [diff] [blame] | 132 | #ifndef clear_user_highpage |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | static inline void clear_user_highpage(struct page *page, unsigned long vaddr) |
| 134 | { |
Cong Wang | 1ec9c5d | 2011-11-25 23:14:14 +0800 | [diff] [blame] | 135 | void *addr = kmap_atomic(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | clear_user_page(addr, vaddr, page); |
Cong Wang | 1ec9c5d | 2011-11-25 23:14:14 +0800 | [diff] [blame] | 137 | kunmap_atomic(addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | } |
Russell King | 487ff32 | 2008-11-27 11:13:58 +0000 | [diff] [blame] | 139 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | |
| 141 | #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE |
Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 142 | /** |
| 143 | * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags |
| 144 | * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE |
| 145 | * @vma: The VMA the page is to be allocated for |
| 146 | * @vaddr: The virtual address the page will be inserted into |
| 147 | * |
| 148 | * This function will allocate a page for a VMA but the caller is expected |
| 149 | * to specify via movableflags whether the page will be movable in the |
| 150 | * future or not |
| 151 | * |
| 152 | * An architecture may override this function by defining |
| 153 | * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own |
| 154 | * implementation. |
| 155 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | static inline struct page * |
Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 157 | __alloc_zeroed_user_highpage(gfp_t movableflags, |
| 158 | struct vm_area_struct *vma, |
| 159 | unsigned long vaddr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | { |
Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 161 | struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags, |
| 162 | vma, vaddr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | |
| 164 | if (page) |
| 165 | clear_user_highpage(page, vaddr); |
| 166 | |
| 167 | return page; |
| 168 | } |
| 169 | #endif |
| 170 | |
Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 171 | /** |
Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 172 | * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move |
| 173 | * @vma: The VMA the page is to be allocated for |
| 174 | * @vaddr: The virtual address the page will be inserted into |
| 175 | * |
| 176 | * This function will allocate a page for a VMA that the caller knows will |
| 177 | * be able to migrate in the future using move_pages() or reclaimed |
| 178 | */ |
| 179 | static inline struct page * |
| 180 | alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, |
| 181 | unsigned long vaddr) |
| 182 | { |
| 183 | return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr); |
| 184 | } |
| 185 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | static inline void clear_highpage(struct page *page) |
| 187 | { |
Cong Wang | 1ec9c5d | 2011-11-25 23:14:14 +0800 | [diff] [blame] | 188 | void *kaddr = kmap_atomic(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | clear_page(kaddr); |
Cong Wang | 1ec9c5d | 2011-11-25 23:14:14 +0800 | [diff] [blame] | 190 | kunmap_atomic(kaddr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | } |
| 192 | |
Christoph Lameter | eebd2aa | 2008-02-04 22:28:29 -0800 | [diff] [blame] | 193 | static inline void zero_user_segments(struct page *page, |
| 194 | unsigned start1, unsigned end1, |
| 195 | unsigned start2, unsigned end2) |
| 196 | { |
Cong Wang | 1ec9c5d | 2011-11-25 23:14:14 +0800 | [diff] [blame] | 197 | void *kaddr = kmap_atomic(page); |
Christoph Lameter | eebd2aa | 2008-02-04 22:28:29 -0800 | [diff] [blame] | 198 | |
| 199 | BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE); |
| 200 | |
| 201 | if (end1 > start1) |
| 202 | memset(kaddr + start1, 0, end1 - start1); |
| 203 | |
| 204 | if (end2 > start2) |
| 205 | memset(kaddr + start2, 0, end2 - start2); |
| 206 | |
Cong Wang | 1ec9c5d | 2011-11-25 23:14:14 +0800 | [diff] [blame] | 207 | kunmap_atomic(kaddr); |
Christoph Lameter | eebd2aa | 2008-02-04 22:28:29 -0800 | [diff] [blame] | 208 | flush_dcache_page(page); |
| 209 | } |
| 210 | |
| 211 | static inline void zero_user_segment(struct page *page, |
| 212 | unsigned start, unsigned end) |
| 213 | { |
| 214 | zero_user_segments(page, start, end, 0, 0); |
| 215 | } |
| 216 | |
| 217 | static inline void zero_user(struct page *page, |
| 218 | unsigned start, unsigned size) |
| 219 | { |
| 220 | zero_user_segments(page, start, start + size, 0, 0); |
| 221 | } |
Nate Diller | 01f2705 | 2007-05-09 02:35:07 -0700 | [diff] [blame] | 222 | |
Atsushi Nemoto | 77fff4a | 2006-12-12 17:14:54 +0000 | [diff] [blame] | 223 | #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE |
| 224 | |
Atsushi Nemoto | 9de455b | 2006-12-12 17:14:55 +0000 | [diff] [blame] | 225 | static inline void copy_user_highpage(struct page *to, struct page *from, |
| 226 | unsigned long vaddr, struct vm_area_struct *vma) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | { |
| 228 | char *vfrom, *vto; |
| 229 | |
Cong Wang | 1ec9c5d | 2011-11-25 23:14:14 +0800 | [diff] [blame] | 230 | vfrom = kmap_atomic(from); |
| 231 | vto = kmap_atomic(to); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | copy_user_page(vto, vfrom, vaddr, to); |
Cong Wang | 1ec9c5d | 2011-11-25 23:14:14 +0800 | [diff] [blame] | 233 | kunmap_atomic(vto); |
| 234 | kunmap_atomic(vfrom); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | } |
| 236 | |
Atsushi Nemoto | 77fff4a | 2006-12-12 17:14:54 +0000 | [diff] [blame] | 237 | #endif |
| 238 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | static inline void copy_highpage(struct page *to, struct page *from) |
| 240 | { |
| 241 | char *vfrom, *vto; |
| 242 | |
Cong Wang | 1ec9c5d | 2011-11-25 23:14:14 +0800 | [diff] [blame] | 243 | vfrom = kmap_atomic(from); |
| 244 | vto = kmap_atomic(to); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | copy_page(vto, vfrom); |
Cong Wang | 1ec9c5d | 2011-11-25 23:14:14 +0800 | [diff] [blame] | 246 | kunmap_atomic(vto); |
| 247 | kunmap_atomic(vfrom); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | } |
| 249 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | #endif /* _LINUX_HIGHMEM_H */ |