Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * High memory handling common code and variables. |
| 4 | * |
| 5 | * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de |
| 6 | * Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de |
| 7 | * |
| 8 | * |
| 9 | * Redesigned the x86 32-bit VM architecture to deal with |
| 10 | * 64-bit physical space. With current x86 CPUs this |
| 11 | * means up to 64 Gigabytes physical RAM. |
| 12 | * |
| 13 | * Rewrote high memory support to move the page cache into |
| 14 | * high memory. Implemented permanent (schedulable) kmaps |
| 15 | * based on Linus' idea. |
| 16 | * |
| 17 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> |
| 18 | */ |
| 19 | |
| 20 | #include <linux/mm.h> |
Paul Gortmaker | b95f1b31 | 2011-10-16 02:01:52 -0400 | [diff] [blame] | 21 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include <linux/swap.h> |
| 23 | #include <linux/bio.h> |
| 24 | #include <linux/pagemap.h> |
| 25 | #include <linux/mempool.h> |
| 26 | #include <linux/blkdev.h> |
| 27 | #include <linux/init.h> |
| 28 | #include <linux/hash.h> |
| 29 | #include <linux/highmem.h> |
Jason Wessel | eac79005 | 2010-08-05 09:22:24 -0500 | [diff] [blame] | 30 | #include <linux/kgdb.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | #include <asm/tlbflush.h> |
| 32 | |
Peter Zijlstra | a8e23a2 | 2010-10-27 15:32:57 -0700 | [diff] [blame] | 33 | |
| 34 | #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) |
| 35 | DEFINE_PER_CPU(int, __kmap_atomic_idx); |
| 36 | #endif |
| 37 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | /* |
| 39 | * Virtual_count is not a pure "count". |
| 40 | * 0 means that it is not mapped, and has not been mapped |
| 41 | * since a TLB flush - it is usable. |
| 42 | * 1 means that there are no users, but it has been mapped |
| 43 | * since the last TLB flush - so we can't use it. |
| 44 | * n means that there are (n-1) current users of it. |
| 45 | */ |
| 46 | #ifdef CONFIG_HIGHMEM |
Al Viro | 260b236 | 2005-10-21 03:22:44 -0400 | [diff] [blame] | 47 | |
Max Filippov | 15de36a | 2014-08-06 16:08:23 -0700 | [diff] [blame] | 48 | /* |
| 49 | * Architecture with aliasing data cache may define the following family of |
| 50 | * helper functions in its asm/highmem.h to control cache color of virtual |
| 51 | * addresses where physical memory pages are mapped by kmap. |
| 52 | */ |
| 53 | #ifndef get_pkmap_color |
| 54 | |
| 55 | /* |
| 56 | * Determine color of virtual address where the page should be mapped. |
| 57 | */ |
| 58 | static inline unsigned int get_pkmap_color(struct page *page) |
| 59 | { |
| 60 | return 0; |
| 61 | } |
| 62 | #define get_pkmap_color get_pkmap_color |
| 63 | |
| 64 | /* |
| 65 | * Get next index for mapping inside PKMAP region for page with given color. |
| 66 | */ |
| 67 | static inline unsigned int get_next_pkmap_nr(unsigned int color) |
| 68 | { |
| 69 | static unsigned int last_pkmap_nr; |
| 70 | |
| 71 | last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK; |
| 72 | return last_pkmap_nr; |
| 73 | } |
| 74 | |
| 75 | /* |
| 76 | * Determine if page index inside PKMAP region (pkmap_nr) of given color |
| 77 | * has wrapped around PKMAP region end. When this happens an attempt to |
| 78 | * flush all unused PKMAP slots is made. |
| 79 | */ |
| 80 | static inline int no_more_pkmaps(unsigned int pkmap_nr, unsigned int color) |
| 81 | { |
| 82 | return pkmap_nr == 0; |
| 83 | } |
| 84 | |
| 85 | /* |
| 86 | * Get the number of PKMAP entries of the given color. If no free slot is |
| 87 | * found after checking that many entries, kmap will sleep waiting for |
| 88 | * someone to call kunmap and free PKMAP slot. |
| 89 | */ |
| 90 | static inline int get_pkmap_entries_count(unsigned int color) |
| 91 | { |
| 92 | return LAST_PKMAP; |
| 93 | } |
| 94 | |
| 95 | /* |
| 96 | * Get head of a wait queue for PKMAP entries of the given color. |
| 97 | * Wait queues for different mapping colors should be independent to avoid |
| 98 | * unnecessary wakeups caused by freeing of slots of other colors. |
| 99 | */ |
| 100 | static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color) |
| 101 | { |
| 102 | static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait); |
| 103 | |
| 104 | return &pkmap_map_wait; |
| 105 | } |
| 106 | #endif |
| 107 | |
Christoph Lameter | c1f60a5 | 2006-09-25 23:31:11 -0700 | [diff] [blame] | 108 | unsigned long totalhigh_pages __read_mostly; |
David S. Miller | db7a94d | 2008-07-19 22:39:46 -0700 | [diff] [blame] | 109 | EXPORT_SYMBOL(totalhigh_pages); |
Christoph Lameter | c1f60a5 | 2006-09-25 23:31:11 -0700 | [diff] [blame] | 110 | |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 111 | |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 112 | EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx); |
| 113 | |
Christoph Lameter | c1f60a5 | 2006-09-25 23:31:11 -0700 | [diff] [blame] | 114 | unsigned int nr_free_highpages (void) |
| 115 | { |
Joonsoo Kim | 33499bf | 2016-05-19 17:12:26 -0700 | [diff] [blame] | 116 | struct zone *zone; |
Christoph Lameter | c1f60a5 | 2006-09-25 23:31:11 -0700 | [diff] [blame] | 117 | unsigned int pages = 0; |
| 118 | |
Joonsoo Kim | 33499bf | 2016-05-19 17:12:26 -0700 | [diff] [blame] | 119 | for_each_populated_zone(zone) { |
| 120 | if (is_highmem(zone)) |
| 121 | pages += zone_page_state(zone, NR_FREE_PAGES); |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 122 | } |
Christoph Lameter | c1f60a5 | 2006-09-25 23:31:11 -0700 | [diff] [blame] | 123 | |
| 124 | return pages; |
| 125 | } |
| 126 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | static int pkmap_count[LAST_PKMAP]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); |
| 129 | |
| 130 | pte_t * pkmap_page_table; |
| 131 | |
Nicolas Pitre | 3297e76 | 2009-03-04 22:49:41 -0500 | [diff] [blame] | 132 | /* |
| 133 | * Most architectures have no use for kmap_high_get(), so let's abstract |
| 134 | * the disabling of IRQ out of the locking in that case to save on a |
| 135 | * potential useless overhead. |
| 136 | */ |
| 137 | #ifdef ARCH_NEEDS_KMAP_HIGH_GET |
| 138 | #define lock_kmap() spin_lock_irq(&kmap_lock) |
| 139 | #define unlock_kmap() spin_unlock_irq(&kmap_lock) |
| 140 | #define lock_kmap_any(flags) spin_lock_irqsave(&kmap_lock, flags) |
| 141 | #define unlock_kmap_any(flags) spin_unlock_irqrestore(&kmap_lock, flags) |
| 142 | #else |
| 143 | #define lock_kmap() spin_lock(&kmap_lock) |
| 144 | #define unlock_kmap() spin_unlock(&kmap_lock) |
| 145 | #define lock_kmap_any(flags) \ |
| 146 | do { spin_lock(&kmap_lock); (void)(flags); } while (0) |
| 147 | #define unlock_kmap_any(flags) \ |
| 148 | do { spin_unlock(&kmap_lock); (void)(flags); } while (0) |
| 149 | #endif |
| 150 | |
Mel Gorman | 5a17811 | 2012-07-31 16:45:02 -0700 | [diff] [blame] | 151 | struct page *kmap_to_page(void *vaddr) |
| 152 | { |
| 153 | unsigned long addr = (unsigned long)vaddr; |
| 154 | |
Will Deacon | 498c228 | 2012-11-16 14:15:00 -0800 | [diff] [blame] | 155 | if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) { |
Joonsoo Kim | 4de22c0 | 2012-12-11 16:01:17 -0800 | [diff] [blame] | 156 | int i = PKMAP_NR(addr); |
Mel Gorman | 5a17811 | 2012-07-31 16:45:02 -0700 | [diff] [blame] | 157 | return pte_page(pkmap_page_table[i]); |
| 158 | } |
| 159 | |
| 160 | return virt_to_page(addr); |
| 161 | } |
Will Deacon | f0263d2 | 2012-10-19 14:03:31 +0100 | [diff] [blame] | 162 | EXPORT_SYMBOL(kmap_to_page); |
Mel Gorman | 5a17811 | 2012-07-31 16:45:02 -0700 | [diff] [blame] | 163 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | static void flush_all_zero_pkmaps(void) |
| 165 | { |
| 166 | int i; |
Nick Piggin | 5843d9a | 2008-08-01 03:15:21 +0200 | [diff] [blame] | 167 | int need_flush = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | |
| 169 | flush_cache_kmaps(); |
| 170 | |
| 171 | for (i = 0; i < LAST_PKMAP; i++) { |
| 172 | struct page *page; |
| 173 | |
| 174 | /* |
| 175 | * zero means we don't have anything to do, |
| 176 | * >1 means that it is still in use. Only |
| 177 | * a count of 1 means that it is free but |
| 178 | * needs to be unmapped |
| 179 | */ |
| 180 | if (pkmap_count[i] != 1) |
| 181 | continue; |
| 182 | pkmap_count[i] = 0; |
| 183 | |
| 184 | /* sanity check */ |
Eric Sesterhenn | 75babca | 2006-04-02 13:47:35 +0200 | [diff] [blame] | 185 | BUG_ON(pte_none(pkmap_page_table[i])); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | |
| 187 | /* |
| 188 | * Don't need an atomic fetch-and-clear op here; |
| 189 | * no-one has the page mapped, and cannot get at |
| 190 | * its virtual address (and hence PTE) without first |
| 191 | * getting the kmap_lock (which is held here). |
| 192 | * So no dangers, even with speculative execution. |
| 193 | */ |
| 194 | page = pte_page(pkmap_page_table[i]); |
Joonsoo Kim | eb2db43 | 2012-12-11 16:01:24 -0800 | [diff] [blame] | 195 | pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | |
| 197 | set_page_address(page, NULL); |
Nick Piggin | 5843d9a | 2008-08-01 03:15:21 +0200 | [diff] [blame] | 198 | need_flush = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | } |
Nick Piggin | 5843d9a | 2008-08-01 03:15:21 +0200 | [diff] [blame] | 200 | if (need_flush) |
| 201 | flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 202 | } |
| 203 | |
Randy Dunlap | 77f6078 | 2008-03-19 17:00:42 -0700 | [diff] [blame] | 204 | /** |
| 205 | * kmap_flush_unused - flush all unused kmap mappings in order to remove stray mappings |
| 206 | */ |
Jeremy Fitzhardinge | ce6234b | 2007-05-02 19:27:15 +0200 | [diff] [blame] | 207 | void kmap_flush_unused(void) |
| 208 | { |
Nicolas Pitre | 3297e76 | 2009-03-04 22:49:41 -0500 | [diff] [blame] | 209 | lock_kmap(); |
Jeremy Fitzhardinge | ce6234b | 2007-05-02 19:27:15 +0200 | [diff] [blame] | 210 | flush_all_zero_pkmaps(); |
Nicolas Pitre | 3297e76 | 2009-03-04 22:49:41 -0500 | [diff] [blame] | 211 | unlock_kmap(); |
Jeremy Fitzhardinge | ce6234b | 2007-05-02 19:27:15 +0200 | [diff] [blame] | 212 | } |
| 213 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | static inline unsigned long map_new_virtual(struct page *page) |
| 215 | { |
| 216 | unsigned long vaddr; |
| 217 | int count; |
Max Filippov | 15de36a | 2014-08-06 16:08:23 -0700 | [diff] [blame] | 218 | unsigned int last_pkmap_nr; |
| 219 | unsigned int color = get_pkmap_color(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | |
| 221 | start: |
Max Filippov | 15de36a | 2014-08-06 16:08:23 -0700 | [diff] [blame] | 222 | count = get_pkmap_entries_count(color); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | /* Find an empty entry */ |
| 224 | for (;;) { |
Max Filippov | 15de36a | 2014-08-06 16:08:23 -0700 | [diff] [blame] | 225 | last_pkmap_nr = get_next_pkmap_nr(color); |
| 226 | if (no_more_pkmaps(last_pkmap_nr, color)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | flush_all_zero_pkmaps(); |
Max Filippov | 15de36a | 2014-08-06 16:08:23 -0700 | [diff] [blame] | 228 | count = get_pkmap_entries_count(color); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | } |
| 230 | if (!pkmap_count[last_pkmap_nr]) |
| 231 | break; /* Found a usable entry */ |
| 232 | if (--count) |
| 233 | continue; |
| 234 | |
| 235 | /* |
| 236 | * Sleep for somebody else to unmap their entries |
| 237 | */ |
| 238 | { |
| 239 | DECLARE_WAITQUEUE(wait, current); |
Max Filippov | 15de36a | 2014-08-06 16:08:23 -0700 | [diff] [blame] | 240 | wait_queue_head_t *pkmap_map_wait = |
| 241 | get_pkmap_wait_queue_head(color); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | |
| 243 | __set_current_state(TASK_UNINTERRUPTIBLE); |
Max Filippov | 15de36a | 2014-08-06 16:08:23 -0700 | [diff] [blame] | 244 | add_wait_queue(pkmap_map_wait, &wait); |
Nicolas Pitre | 3297e76 | 2009-03-04 22:49:41 -0500 | [diff] [blame] | 245 | unlock_kmap(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | schedule(); |
Max Filippov | 15de36a | 2014-08-06 16:08:23 -0700 | [diff] [blame] | 247 | remove_wait_queue(pkmap_map_wait, &wait); |
Nicolas Pitre | 3297e76 | 2009-03-04 22:49:41 -0500 | [diff] [blame] | 248 | lock_kmap(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | |
| 250 | /* Somebody else might have mapped it while we slept */ |
| 251 | if (page_address(page)) |
| 252 | return (unsigned long)page_address(page); |
| 253 | |
| 254 | /* Re-start */ |
| 255 | goto start; |
| 256 | } |
| 257 | } |
| 258 | vaddr = PKMAP_ADDR(last_pkmap_nr); |
| 259 | set_pte_at(&init_mm, vaddr, |
| 260 | &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); |
| 261 | |
| 262 | pkmap_count[last_pkmap_nr] = 1; |
| 263 | set_page_address(page, (void *)vaddr); |
| 264 | |
| 265 | return vaddr; |
| 266 | } |
| 267 | |
Randy Dunlap | 77f6078 | 2008-03-19 17:00:42 -0700 | [diff] [blame] | 268 | /** |
| 269 | * kmap_high - map a highmem page into memory |
| 270 | * @page: &struct page to map |
| 271 | * |
| 272 | * Returns the page's virtual memory address. |
| 273 | * |
| 274 | * We cannot call this from interrupts, as it may block. |
| 275 | */ |
Harvey Harrison | 920c7a5 | 2008-02-04 22:29:26 -0800 | [diff] [blame] | 276 | void *kmap_high(struct page *page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | { |
| 278 | unsigned long vaddr; |
| 279 | |
| 280 | /* |
| 281 | * For highmem pages, we can't trust "virtual" until |
| 282 | * after we have the lock. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | */ |
Nicolas Pitre | 3297e76 | 2009-03-04 22:49:41 -0500 | [diff] [blame] | 284 | lock_kmap(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 | vaddr = (unsigned long)page_address(page); |
| 286 | if (!vaddr) |
| 287 | vaddr = map_new_virtual(page); |
| 288 | pkmap_count[PKMAP_NR(vaddr)]++; |
Eric Sesterhenn | 75babca | 2006-04-02 13:47:35 +0200 | [diff] [blame] | 289 | BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2); |
Nicolas Pitre | 3297e76 | 2009-03-04 22:49:41 -0500 | [diff] [blame] | 290 | unlock_kmap(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 291 | return (void*) vaddr; |
| 292 | } |
| 293 | |
| 294 | EXPORT_SYMBOL(kmap_high); |
| 295 | |
Nicolas Pitre | 3297e76 | 2009-03-04 22:49:41 -0500 | [diff] [blame] | 296 | #ifdef ARCH_NEEDS_KMAP_HIGH_GET |
| 297 | /** |
| 298 | * kmap_high_get - pin a highmem page into memory |
| 299 | * @page: &struct page to pin |
| 300 | * |
| 301 | * Returns the page's current virtual memory address, or NULL if no mapping |
Uwe Kleine-König | 5e39df5 | 2010-01-25 21:38:09 +0100 | [diff] [blame] | 302 | * exists. If and only if a non null address is returned then a |
Nicolas Pitre | 3297e76 | 2009-03-04 22:49:41 -0500 | [diff] [blame] | 303 | * matching call to kunmap_high() is necessary. |
| 304 | * |
| 305 | * This can be called from any context. |
| 306 | */ |
| 307 | void *kmap_high_get(struct page *page) |
| 308 | { |
| 309 | unsigned long vaddr, flags; |
| 310 | |
| 311 | lock_kmap_any(flags); |
| 312 | vaddr = (unsigned long)page_address(page); |
| 313 | if (vaddr) { |
| 314 | BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1); |
| 315 | pkmap_count[PKMAP_NR(vaddr)]++; |
| 316 | } |
| 317 | unlock_kmap_any(flags); |
| 318 | return (void*) vaddr; |
| 319 | } |
| 320 | #endif |
| 321 | |
Randy Dunlap | 77f6078 | 2008-03-19 17:00:42 -0700 | [diff] [blame] | 322 | /** |
Li Haifeng | 4e9dc5d | 2011-10-31 17:09:09 -0700 | [diff] [blame] | 323 | * kunmap_high - unmap a highmem page into memory |
Randy Dunlap | 77f6078 | 2008-03-19 17:00:42 -0700 | [diff] [blame] | 324 | * @page: &struct page to unmap |
Nicolas Pitre | 3297e76 | 2009-03-04 22:49:41 -0500 | [diff] [blame] | 325 | * |
| 326 | * If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called |
| 327 | * only from user context. |
Randy Dunlap | 77f6078 | 2008-03-19 17:00:42 -0700 | [diff] [blame] | 328 | */ |
Harvey Harrison | 920c7a5 | 2008-02-04 22:29:26 -0800 | [diff] [blame] | 329 | void kunmap_high(struct page *page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 | { |
| 331 | unsigned long vaddr; |
| 332 | unsigned long nr; |
Nicolas Pitre | 3297e76 | 2009-03-04 22:49:41 -0500 | [diff] [blame] | 333 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | int need_wakeup; |
Max Filippov | 15de36a | 2014-08-06 16:08:23 -0700 | [diff] [blame] | 335 | unsigned int color = get_pkmap_color(page); |
| 336 | wait_queue_head_t *pkmap_map_wait; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 337 | |
Nicolas Pitre | 3297e76 | 2009-03-04 22:49:41 -0500 | [diff] [blame] | 338 | lock_kmap_any(flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 339 | vaddr = (unsigned long)page_address(page); |
Eric Sesterhenn | 75babca | 2006-04-02 13:47:35 +0200 | [diff] [blame] | 340 | BUG_ON(!vaddr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | nr = PKMAP_NR(vaddr); |
| 342 | |
| 343 | /* |
| 344 | * A count must never go down to zero |
| 345 | * without a TLB flush! |
| 346 | */ |
| 347 | need_wakeup = 0; |
| 348 | switch (--pkmap_count[nr]) { |
| 349 | case 0: |
| 350 | BUG(); |
| 351 | case 1: |
| 352 | /* |
| 353 | * Avoid an unnecessary wake_up() function call. |
| 354 | * The common case is pkmap_count[] == 1, but |
| 355 | * no waiters. |
| 356 | * The tasks queued in the wait-queue are guarded |
| 357 | * by both the lock in the wait-queue-head and by |
| 358 | * the kmap_lock. As the kmap_lock is held here, |
| 359 | * no need for the wait-queue-head's lock. Simply |
| 360 | * test if the queue is empty. |
| 361 | */ |
Max Filippov | 15de36a | 2014-08-06 16:08:23 -0700 | [diff] [blame] | 362 | pkmap_map_wait = get_pkmap_wait_queue_head(color); |
| 363 | need_wakeup = waitqueue_active(pkmap_map_wait); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 364 | } |
Nicolas Pitre | 3297e76 | 2009-03-04 22:49:41 -0500 | [diff] [blame] | 365 | unlock_kmap_any(flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | |
| 367 | /* do wake-up, if needed, race-free outside of the spin lock */ |
| 368 | if (need_wakeup) |
Max Filippov | 15de36a | 2014-08-06 16:08:23 -0700 | [diff] [blame] | 369 | wake_up(pkmap_map_wait); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | } |
| 371 | |
| 372 | EXPORT_SYMBOL(kunmap_high); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 | #endif |
| 374 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 | #if defined(HASHED_PAGE_VIRTUAL) |
| 376 | |
| 377 | #define PA_HASH_ORDER 7 |
| 378 | |
| 379 | /* |
| 380 | * Describes one page->virtual association |
| 381 | */ |
| 382 | struct page_address_map { |
| 383 | struct page *page; |
| 384 | void *virtual; |
| 385 | struct list_head list; |
| 386 | }; |
| 387 | |
Joonsoo Kim | a354e2c | 2012-12-11 16:01:23 -0800 | [diff] [blame] | 388 | static struct page_address_map page_address_maps[LAST_PKMAP]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 | |
| 390 | /* |
| 391 | * Hash table bucket |
| 392 | */ |
| 393 | static struct page_address_slot { |
| 394 | struct list_head lh; /* List of page_address_maps */ |
| 395 | spinlock_t lock; /* Protect this bucket's list */ |
| 396 | } ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER]; |
| 397 | |
Ian Campbell | f991879 | 2011-08-17 13:45:09 +0100 | [diff] [blame] | 398 | static struct page_address_slot *page_slot(const struct page *page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 | { |
| 400 | return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)]; |
| 401 | } |
| 402 | |
Randy Dunlap | 77f6078 | 2008-03-19 17:00:42 -0700 | [diff] [blame] | 403 | /** |
| 404 | * page_address - get the mapped virtual address of a page |
| 405 | * @page: &struct page to get the virtual address of |
| 406 | * |
| 407 | * Returns the page's virtual address. |
| 408 | */ |
Ian Campbell | f991879 | 2011-08-17 13:45:09 +0100 | [diff] [blame] | 409 | void *page_address(const struct page *page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 410 | { |
| 411 | unsigned long flags; |
| 412 | void *ret; |
| 413 | struct page_address_slot *pas; |
| 414 | |
| 415 | if (!PageHighMem(page)) |
| 416 | return lowmem_page_address(page); |
| 417 | |
| 418 | pas = page_slot(page); |
| 419 | ret = NULL; |
| 420 | spin_lock_irqsave(&pas->lock, flags); |
| 421 | if (!list_empty(&pas->lh)) { |
| 422 | struct page_address_map *pam; |
| 423 | |
| 424 | list_for_each_entry(pam, &pas->lh, list) { |
| 425 | if (pam->page == page) { |
| 426 | ret = pam->virtual; |
| 427 | goto done; |
| 428 | } |
| 429 | } |
| 430 | } |
| 431 | done: |
| 432 | spin_unlock_irqrestore(&pas->lock, flags); |
| 433 | return ret; |
| 434 | } |
| 435 | |
| 436 | EXPORT_SYMBOL(page_address); |
| 437 | |
Randy Dunlap | 77f6078 | 2008-03-19 17:00:42 -0700 | [diff] [blame] | 438 | /** |
| 439 | * set_page_address - set a page's virtual address |
| 440 | * @page: &struct page to set |
| 441 | * @virtual: virtual address to use |
| 442 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 443 | void set_page_address(struct page *page, void *virtual) |
| 444 | { |
| 445 | unsigned long flags; |
| 446 | struct page_address_slot *pas; |
| 447 | struct page_address_map *pam; |
| 448 | |
| 449 | BUG_ON(!PageHighMem(page)); |
| 450 | |
| 451 | pas = page_slot(page); |
| 452 | if (virtual) { /* Add */ |
Joonsoo Kim | a354e2c | 2012-12-11 16:01:23 -0800 | [diff] [blame] | 453 | pam = &page_address_maps[PKMAP_NR((unsigned long)virtual)]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 | pam->page = page; |
| 455 | pam->virtual = virtual; |
| 456 | |
| 457 | spin_lock_irqsave(&pas->lock, flags); |
| 458 | list_add_tail(&pam->list, &pas->lh); |
| 459 | spin_unlock_irqrestore(&pas->lock, flags); |
| 460 | } else { /* Remove */ |
| 461 | spin_lock_irqsave(&pas->lock, flags); |
| 462 | list_for_each_entry(pam, &pas->lh, list) { |
| 463 | if (pam->page == page) { |
| 464 | list_del(&pam->list); |
| 465 | spin_unlock_irqrestore(&pas->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 466 | goto done; |
| 467 | } |
| 468 | } |
| 469 | spin_unlock_irqrestore(&pas->lock, flags); |
| 470 | } |
| 471 | done: |
| 472 | return; |
| 473 | } |
| 474 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 475 | void __init page_address_init(void) |
| 476 | { |
| 477 | int i; |
| 478 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 479 | for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) { |
| 480 | INIT_LIST_HEAD(&page_address_htable[i].lh); |
| 481 | spin_lock_init(&page_address_htable[i].lock); |
| 482 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 483 | } |
| 484 | |
| 485 | #endif /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */ |