Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_HIGHMEM_H |
| 2 | #define _LINUX_HIGHMEM_H |
| 3 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | #include <linux/fs.h> |
| 5 | #include <linux/mm.h> |
Peter Zijlstra | ad76fb6 | 2006-12-06 20:32:21 -0800 | [diff] [blame] | 6 | #include <linux/uaccess.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | |
| 8 | #include <asm/cacheflush.h> |
| 9 | |
James Bottomley | 03beb07 | 2006-03-26 01:36:57 -0800 | [diff] [blame] | 10 | #ifndef ARCH_HAS_FLUSH_ANON_PAGE |
Russell King | a6f36be | 2006-12-30 22:24:19 +0000 | [diff] [blame] | 11 | static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) |
James Bottomley | 03beb07 | 2006-03-26 01:36:57 -0800 | [diff] [blame] | 12 | { |
| 13 | } |
| 14 | #endif |
| 15 | |
James Bottomley | 5a3a5a9 | 2006-03-26 01:36:59 -0800 | [diff] [blame] | 16 | #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE |
| 17 | static inline void flush_kernel_dcache_page(struct page *page) |
| 18 | { |
| 19 | } |
| 20 | #endif |
| 21 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #ifdef CONFIG_HIGHMEM |
| 23 | |
| 24 | #include <asm/highmem.h> |
| 25 | |
| 26 | /* declarations for linux/mm/highmem.c */ |
| 27 | unsigned int nr_free_highpages(void); |
Christoph Lameter | c1f60a5 | 2006-09-25 23:31:11 -0700 | [diff] [blame] | 28 | extern unsigned long totalhigh_pages; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | |
Jeremy Fitzhardinge | ce6234b | 2007-05-02 19:27:15 +0200 | [diff] [blame] | 30 | void kmap_flush_unused(void); |
| 31 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | #else /* CONFIG_HIGHMEM */ |
| 33 | |
| 34 | static inline unsigned int nr_free_highpages(void) { return 0; } |
| 35 | |
Christoph Lameter | c1f60a5 | 2006-09-25 23:31:11 -0700 | [diff] [blame] | 36 | #define totalhigh_pages 0 |
| 37 | |
James Bottomley | a6ca1b9 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 38 | #ifndef ARCH_HAS_KMAP |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | static inline void *kmap(struct page *page) |
| 40 | { |
| 41 | might_sleep(); |
| 42 | return page_address(page); |
| 43 | } |
| 44 | |
| 45 | #define kunmap(page) do { (void) (page); } while (0) |
| 46 | |
Geert Uytterhoeven | 254f9c5 | 2007-05-01 22:33:07 +0200 | [diff] [blame] | 47 | #include <asm/kmap_types.h> |
| 48 | |
| 49 | static inline void *kmap_atomic(struct page *page, enum km_type idx) |
| 50 | { |
| 51 | pagefault_disable(); |
| 52 | return page_address(page); |
| 53 | } |
Jeremy Fitzhardinge | ce6234b | 2007-05-02 19:27:15 +0200 | [diff] [blame] | 54 | #define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx) |
Geert Uytterhoeven | 254f9c5 | 2007-05-01 22:33:07 +0200 | [diff] [blame] | 55 | |
Peter Zijlstra | ad76fb6 | 2006-12-06 20:32:21 -0800 | [diff] [blame] | 56 | #define kunmap_atomic(addr, idx) do { pagefault_enable(); } while (0) |
| 57 | #define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | #define kmap_atomic_to_page(ptr) virt_to_page(ptr) |
Jeremy Fitzhardinge | ce6234b | 2007-05-02 19:27:15 +0200 | [diff] [blame] | 59 | |
| 60 | #define kmap_flush_unused() do {} while(0) |
James Bottomley | a6ca1b9 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 61 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | |
| 63 | #endif /* CONFIG_HIGHMEM */ |
| 64 | |
| 65 | /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ |
| 66 | static inline void clear_user_highpage(struct page *page, unsigned long vaddr) |
| 67 | { |
| 68 | void *addr = kmap_atomic(page, KM_USER0); |
| 69 | clear_user_page(addr, vaddr, page); |
| 70 | kunmap_atomic(addr, KM_USER0); |
| 71 | /* Make sure this page is cleared on other CPU's too before using it */ |
| 72 | smp_wmb(); |
| 73 | } |
| 74 | |
| 75 | #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE |
Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 76 | /** |
| 77 | * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags |
| 78 | * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE |
| 79 | * @vma: The VMA the page is to be allocated for |
| 80 | * @vaddr: The virtual address the page will be inserted into |
| 81 | * |
| 82 | * This function will allocate a page for a VMA but the caller is expected |
| 83 | * to specify via movableflags whether the page will be movable in the |
| 84 | * future or not |
| 85 | * |
| 86 | * An architecture may override this function by defining |
| 87 | * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own |
| 88 | * implementation. |
| 89 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | static inline struct page * |
Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 91 | __alloc_zeroed_user_highpage(gfp_t movableflags, |
| 92 | struct vm_area_struct *vma, |
| 93 | unsigned long vaddr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | { |
Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 95 | struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags, |
| 96 | vma, vaddr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | |
| 98 | if (page) |
| 99 | clear_user_highpage(page, vaddr); |
| 100 | |
| 101 | return page; |
| 102 | } |
| 103 | #endif |
| 104 | |
Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 105 | /** |
| 106 | * alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA |
| 107 | * @vma: The VMA the page is to be allocated for |
| 108 | * @vaddr: The virtual address the page will be inserted into |
| 109 | * |
| 110 | * This function will allocate a page for a VMA that the caller knows will |
| 111 | * not be able to move in the future using move_pages() or reclaim. If it |
| 112 | * is known that the page can move, use alloc_zeroed_user_highpage_movable |
| 113 | */ |
| 114 | static inline struct page * |
| 115 | alloc_zeroed_user_highpage(struct vm_area_struct *vma, unsigned long vaddr) |
| 116 | { |
| 117 | return __alloc_zeroed_user_highpage(0, vma, vaddr); |
| 118 | } |
| 119 | |
| 120 | /** |
| 121 | * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move |
| 122 | * @vma: The VMA the page is to be allocated for |
| 123 | * @vaddr: The virtual address the page will be inserted into |
| 124 | * |
| 125 | * This function will allocate a page for a VMA that the caller knows will |
| 126 | * be able to migrate in the future using move_pages() or reclaimed |
| 127 | */ |
| 128 | static inline struct page * |
| 129 | alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, |
| 130 | unsigned long vaddr) |
| 131 | { |
| 132 | return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr); |
| 133 | } |
| 134 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | static inline void clear_highpage(struct page *page) |
| 136 | { |
| 137 | void *kaddr = kmap_atomic(page, KM_USER0); |
| 138 | clear_page(kaddr); |
| 139 | kunmap_atomic(kaddr, KM_USER0); |
| 140 | } |
| 141 | |
| 142 | /* |
| 143 | * Same but also flushes aliased cache contents to RAM. |
Nate Diller | 01f2705 | 2007-05-09 02:35:07 -0700 | [diff] [blame] | 144 | * |
| 145 | * This must be a macro because KM_USER0 and friends aren't defined if |
| 146 | * !CONFIG_HIGHMEM |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | */ |
Nate Diller | 01f2705 | 2007-05-09 02:35:07 -0700 | [diff] [blame] | 148 | #define zero_user_page(page, offset, size, km_type) \ |
| 149 | do { \ |
| 150 | void *kaddr; \ |
| 151 | \ |
| 152 | BUG_ON((offset) + (size) > PAGE_SIZE); \ |
| 153 | \ |
| 154 | kaddr = kmap_atomic(page, km_type); \ |
| 155 | memset((char *)kaddr + (offset), 0, (size)); \ |
| 156 | flush_dcache_page(page); \ |
| 157 | kunmap_atomic(kaddr, (km_type)); \ |
| 158 | } while (0) |
| 159 | |
Nate Diller | f37bc27 | 2007-05-09 02:35:09 -0700 | [diff] [blame] | 160 | static inline void __deprecated memclear_highpage_flush(struct page *page, |
Nate Diller | 01f2705 | 2007-05-09 02:35:07 -0700 | [diff] [blame] | 161 | unsigned int offset, unsigned int size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | { |
Nate Diller | 01f2705 | 2007-05-09 02:35:07 -0700 | [diff] [blame] | 163 | zero_user_page(page, offset, size, KM_USER0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | } |
| 165 | |
Atsushi Nemoto | 77fff4a | 2006-12-12 17:14:54 +0000 | [diff] [blame] | 166 | #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE |
| 167 | |
Atsushi Nemoto | 9de455b | 2006-12-12 17:14:55 +0000 | [diff] [blame] | 168 | static inline void copy_user_highpage(struct page *to, struct page *from, |
| 169 | unsigned long vaddr, struct vm_area_struct *vma) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | { |
| 171 | char *vfrom, *vto; |
| 172 | |
| 173 | vfrom = kmap_atomic(from, KM_USER0); |
| 174 | vto = kmap_atomic(to, KM_USER1); |
| 175 | copy_user_page(vto, vfrom, vaddr, to); |
| 176 | kunmap_atomic(vfrom, KM_USER0); |
| 177 | kunmap_atomic(vto, KM_USER1); |
| 178 | /* Make sure this page is cleared on other CPU's too before using it */ |
| 179 | smp_wmb(); |
| 180 | } |
| 181 | |
Atsushi Nemoto | 77fff4a | 2006-12-12 17:14:54 +0000 | [diff] [blame] | 182 | #endif |
| 183 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | static inline void copy_highpage(struct page *to, struct page *from) |
| 185 | { |
| 186 | char *vfrom, *vto; |
| 187 | |
| 188 | vfrom = kmap_atomic(from, KM_USER0); |
| 189 | vto = kmap_atomic(to, KM_USER1); |
| 190 | copy_page(vto, vfrom); |
| 191 | kunmap_atomic(vfrom, KM_USER0); |
| 192 | kunmap_atomic(vto, KM_USER1); |
| 193 | } |
| 194 | |
| 195 | #endif /* _LINUX_HIGHMEM_H */ |