Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * High memory handling common code and variables. |
| 3 | * |
| 4 | * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de |
| 5 | * Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de |
| 6 | * |
| 7 | * |
| 8 | * Redesigned the x86 32-bit VM architecture to deal with |
| 9 | * 64-bit physical space. With current x86 CPUs this |
| 10 | * means up to 64 Gigabytes physical RAM. |
| 11 | * |
| 12 | * Rewrote high memory support to move the page cache into |
| 13 | * high memory. Implemented permanent (schedulable) kmaps |
| 14 | * based on Linus' idea. |
| 15 | * |
| 16 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> |
| 17 | */ |
| 18 | |
| 19 | #include <linux/mm.h> |
| 20 | #include <linux/module.h> |
| 21 | #include <linux/swap.h> |
| 22 | #include <linux/bio.h> |
| 23 | #include <linux/pagemap.h> |
| 24 | #include <linux/mempool.h> |
| 25 | #include <linux/blkdev.h> |
| 26 | #include <linux/init.h> |
| 27 | #include <linux/hash.h> |
| 28 | #include <linux/highmem.h> |
Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 29 | #include <linux/blktrace_api.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | #include <asm/tlbflush.h> |
| 31 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | /* |
| 33 | * Virtual_count is not a pure "count". |
| 34 | * 0 means that it is not mapped, and has not been mapped |
| 35 | * since a TLB flush - it is usable. |
| 36 | * 1 means that there are no users, but it has been mapped |
| 37 | * since the last TLB flush - so we can't use it. |
| 38 | * n means that there are (n-1) current users of it. |
| 39 | */ |
| 40 | #ifdef CONFIG_HIGHMEM |
Al Viro | 260b236 | 2005-10-21 03:22:44 -0400 | [diff] [blame] | 41 | |
Christoph Lameter | c1f60a5 | 2006-09-25 23:31:11 -0700 | [diff] [blame] | 42 | unsigned long totalhigh_pages __read_mostly; |
David S. Miller | db7a94d | 2008-07-19 22:39:46 -0700 | [diff] [blame] | 43 | EXPORT_SYMBOL(totalhigh_pages); |
Christoph Lameter | c1f60a5 | 2006-09-25 23:31:11 -0700 | [diff] [blame] | 44 | |
| 45 | unsigned int nr_free_highpages (void) |
| 46 | { |
| 47 | pg_data_t *pgdat; |
| 48 | unsigned int pages = 0; |
| 49 | |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 50 | for_each_online_pgdat(pgdat) { |
Christoph Lameter | d23ad42 | 2007-02-10 01:43:02 -0800 | [diff] [blame] | 51 | pages += zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM], |
| 52 | NR_FREE_PAGES); |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 53 | if (zone_movable_is_highmem()) |
| 54 | pages += zone_page_state( |
| 55 | &pgdat->node_zones[ZONE_MOVABLE], |
| 56 | NR_FREE_PAGES); |
| 57 | } |
Christoph Lameter | c1f60a5 | 2006-09-25 23:31:11 -0700 | [diff] [blame] | 58 | |
| 59 | return pages; |
| 60 | } |
| 61 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | static int pkmap_count[LAST_PKMAP]; |
| 63 | static unsigned int last_pkmap_nr; |
| 64 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); |
| 65 | |
| 66 | pte_t * pkmap_page_table; |
| 67 | |
| 68 | static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait); |
| 69 | |
Nicolas Pitre | 3297e76 | 2009-03-04 22:49:41 -0500 | [diff] [blame] | 70 | /* |
| 71 | * Most architectures have no use for kmap_high_get(), so let's abstract |
| 72 | * the disabling of IRQ out of the locking in that case to save on a |
| 73 | * potential useless overhead. |
| 74 | */ |
| 75 | #ifdef ARCH_NEEDS_KMAP_HIGH_GET |
| 76 | #define lock_kmap() spin_lock_irq(&kmap_lock) |
| 77 | #define unlock_kmap() spin_unlock_irq(&kmap_lock) |
| 78 | #define lock_kmap_any(flags) spin_lock_irqsave(&kmap_lock, flags) |
| 79 | #define unlock_kmap_any(flags) spin_unlock_irqrestore(&kmap_lock, flags) |
| 80 | #else |
| 81 | #define lock_kmap() spin_lock(&kmap_lock) |
| 82 | #define unlock_kmap() spin_unlock(&kmap_lock) |
| 83 | #define lock_kmap_any(flags) \ |
| 84 | do { spin_lock(&kmap_lock); (void)(flags); } while (0) |
| 85 | #define unlock_kmap_any(flags) \ |
| 86 | do { spin_unlock(&kmap_lock); (void)(flags); } while (0) |
| 87 | #endif |
| 88 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | static void flush_all_zero_pkmaps(void) |
| 90 | { |
| 91 | int i; |
Nick Piggin | 5843d9a | 2008-08-01 03:15:21 +0200 | [diff] [blame] | 92 | int need_flush = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | |
| 94 | flush_cache_kmaps(); |
| 95 | |
| 96 | for (i = 0; i < LAST_PKMAP; i++) { |
| 97 | struct page *page; |
| 98 | |
| 99 | /* |
| 100 | * zero means we don't have anything to do, |
| 101 | * >1 means that it is still in use. Only |
| 102 | * a count of 1 means that it is free but |
| 103 | * needs to be unmapped |
| 104 | */ |
| 105 | if (pkmap_count[i] != 1) |
| 106 | continue; |
| 107 | pkmap_count[i] = 0; |
| 108 | |
| 109 | /* sanity check */ |
Eric Sesterhenn | 75babca | 2006-04-02 13:47:35 +0200 | [diff] [blame] | 110 | BUG_ON(pte_none(pkmap_page_table[i])); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | |
| 112 | /* |
| 113 | * Don't need an atomic fetch-and-clear op here; |
| 114 | * no-one has the page mapped, and cannot get at |
| 115 | * its virtual address (and hence PTE) without first |
| 116 | * getting the kmap_lock (which is held here). |
| 117 | * So no dangers, even with speculative execution. |
| 118 | */ |
| 119 | page = pte_page(pkmap_page_table[i]); |
| 120 | pte_clear(&init_mm, (unsigned long)page_address(page), |
| 121 | &pkmap_page_table[i]); |
| 122 | |
| 123 | set_page_address(page, NULL); |
Nick Piggin | 5843d9a | 2008-08-01 03:15:21 +0200 | [diff] [blame] | 124 | need_flush = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | } |
Nick Piggin | 5843d9a | 2008-08-01 03:15:21 +0200 | [diff] [blame] | 126 | if (need_flush) |
| 127 | flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | } |
| 129 | |
Randy Dunlap | 77f6078 | 2008-03-19 17:00:42 -0700 | [diff] [blame] | 130 | /** |
| 131 | * kmap_flush_unused - flush all unused kmap mappings in order to remove stray mappings |
| 132 | */ |
Jeremy Fitzhardinge | ce6234b | 2007-05-02 19:27:15 +0200 | [diff] [blame] | 133 | void kmap_flush_unused(void) |
| 134 | { |
Nicolas Pitre | 3297e76 | 2009-03-04 22:49:41 -0500 | [diff] [blame] | 135 | lock_kmap(); |
Jeremy Fitzhardinge | ce6234b | 2007-05-02 19:27:15 +0200 | [diff] [blame] | 136 | flush_all_zero_pkmaps(); |
Nicolas Pitre | 3297e76 | 2009-03-04 22:49:41 -0500 | [diff] [blame] | 137 | unlock_kmap(); |
Jeremy Fitzhardinge | ce6234b | 2007-05-02 19:27:15 +0200 | [diff] [blame] | 138 | } |
| 139 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | static inline unsigned long map_new_virtual(struct page *page) |
| 141 | { |
| 142 | unsigned long vaddr; |
| 143 | int count; |
| 144 | |
| 145 | start: |
| 146 | count = LAST_PKMAP; |
| 147 | /* Find an empty entry */ |
| 148 | for (;;) { |
| 149 | last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK; |
| 150 | if (!last_pkmap_nr) { |
| 151 | flush_all_zero_pkmaps(); |
| 152 | count = LAST_PKMAP; |
| 153 | } |
| 154 | if (!pkmap_count[last_pkmap_nr]) |
| 155 | break; /* Found a usable entry */ |
| 156 | if (--count) |
| 157 | continue; |
| 158 | |
| 159 | /* |
| 160 | * Sleep for somebody else to unmap their entries |
| 161 | */ |
| 162 | { |
| 163 | DECLARE_WAITQUEUE(wait, current); |
| 164 | |
| 165 | __set_current_state(TASK_UNINTERRUPTIBLE); |
| 166 | add_wait_queue(&pkmap_map_wait, &wait); |
Nicolas Pitre | 3297e76 | 2009-03-04 22:49:41 -0500 | [diff] [blame] | 167 | unlock_kmap(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | schedule(); |
| 169 | remove_wait_queue(&pkmap_map_wait, &wait); |
Nicolas Pitre | 3297e76 | 2009-03-04 22:49:41 -0500 | [diff] [blame] | 170 | lock_kmap(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | |
| 172 | /* Somebody else might have mapped it while we slept */ |
| 173 | if (page_address(page)) |
| 174 | return (unsigned long)page_address(page); |
| 175 | |
| 176 | /* Re-start */ |
| 177 | goto start; |
| 178 | } |
| 179 | } |
| 180 | vaddr = PKMAP_ADDR(last_pkmap_nr); |
| 181 | set_pte_at(&init_mm, vaddr, |
| 182 | &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); |
| 183 | |
| 184 | pkmap_count[last_pkmap_nr] = 1; |
| 185 | set_page_address(page, (void *)vaddr); |
| 186 | |
| 187 | return vaddr; |
| 188 | } |
| 189 | |
Randy Dunlap | 77f6078 | 2008-03-19 17:00:42 -0700 | [diff] [blame] | 190 | /** |
| 191 | * kmap_high - map a highmem page into memory |
| 192 | * @page: &struct page to map |
| 193 | * |
| 194 | * Returns the page's virtual memory address. |
| 195 | * |
| 196 | * We cannot call this from interrupts, as it may block. |
| 197 | */ |
Harvey Harrison | 920c7a5 | 2008-02-04 22:29:26 -0800 | [diff] [blame] | 198 | void *kmap_high(struct page *page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | { |
| 200 | unsigned long vaddr; |
| 201 | |
| 202 | /* |
| 203 | * For highmem pages, we can't trust "virtual" until |
| 204 | * after we have the lock. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | */ |
Nicolas Pitre | 3297e76 | 2009-03-04 22:49:41 -0500 | [diff] [blame] | 206 | lock_kmap(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | vaddr = (unsigned long)page_address(page); |
| 208 | if (!vaddr) |
| 209 | vaddr = map_new_virtual(page); |
| 210 | pkmap_count[PKMAP_NR(vaddr)]++; |
Eric Sesterhenn | 75babca | 2006-04-02 13:47:35 +0200 | [diff] [blame] | 211 | BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2); |
Nicolas Pitre | 3297e76 | 2009-03-04 22:49:41 -0500 | [diff] [blame] | 212 | unlock_kmap(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | return (void*) vaddr; |
| 214 | } |
| 215 | |
| 216 | EXPORT_SYMBOL(kmap_high); |
| 217 | |
Nicolas Pitre | 3297e76 | 2009-03-04 22:49:41 -0500 | [diff] [blame] | 218 | #ifdef ARCH_NEEDS_KMAP_HIGH_GET |
| 219 | /** |
| 220 | * kmap_high_get - pin a highmem page into memory |
| 221 | * @page: &struct page to pin |
| 222 | * |
| 223 | * Returns the page's current virtual memory address, or NULL if no mapping |
| 224 | * exists. When and only when a non null address is returned then a |
| 225 | * matching call to kunmap_high() is necessary. |
| 226 | * |
| 227 | * This can be called from any context. |
| 228 | */ |
| 229 | void *kmap_high_get(struct page *page) |
| 230 | { |
| 231 | unsigned long vaddr, flags; |
| 232 | |
| 233 | lock_kmap_any(flags); |
| 234 | vaddr = (unsigned long)page_address(page); |
| 235 | if (vaddr) { |
| 236 | BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1); |
| 237 | pkmap_count[PKMAP_NR(vaddr)]++; |
| 238 | } |
| 239 | unlock_kmap_any(flags); |
| 240 | return (void*) vaddr; |
| 241 | } |
| 242 | #endif |
| 243 | |
Randy Dunlap | 77f6078 | 2008-03-19 17:00:42 -0700 | [diff] [blame] | 244 | /** |
| 245 | * kunmap_high - map a highmem page into memory |
| 246 | * @page: &struct page to unmap |
Nicolas Pitre | 3297e76 | 2009-03-04 22:49:41 -0500 | [diff] [blame] | 247 | * |
| 248 | * If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called |
| 249 | * only from user context. |
Randy Dunlap | 77f6078 | 2008-03-19 17:00:42 -0700 | [diff] [blame] | 250 | */ |
Harvey Harrison | 920c7a5 | 2008-02-04 22:29:26 -0800 | [diff] [blame] | 251 | void kunmap_high(struct page *page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | { |
| 253 | unsigned long vaddr; |
| 254 | unsigned long nr; |
Nicolas Pitre | 3297e76 | 2009-03-04 22:49:41 -0500 | [diff] [blame] | 255 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | int need_wakeup; |
| 257 | |
Nicolas Pitre | 3297e76 | 2009-03-04 22:49:41 -0500 | [diff] [blame] | 258 | lock_kmap_any(flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 259 | vaddr = (unsigned long)page_address(page); |
Eric Sesterhenn | 75babca | 2006-04-02 13:47:35 +0200 | [diff] [blame] | 260 | BUG_ON(!vaddr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | nr = PKMAP_NR(vaddr); |
| 262 | |
| 263 | /* |
| 264 | * A count must never go down to zero |
| 265 | * without a TLB flush! |
| 266 | */ |
| 267 | need_wakeup = 0; |
| 268 | switch (--pkmap_count[nr]) { |
| 269 | case 0: |
| 270 | BUG(); |
| 271 | case 1: |
| 272 | /* |
| 273 | * Avoid an unnecessary wake_up() function call. |
| 274 | * The common case is pkmap_count[] == 1, but |
| 275 | * no waiters. |
| 276 | * The tasks queued in the wait-queue are guarded |
| 277 | * by both the lock in the wait-queue-head and by |
| 278 | * the kmap_lock. As the kmap_lock is held here, |
| 279 | * no need for the wait-queue-head's lock. Simply |
| 280 | * test if the queue is empty. |
| 281 | */ |
| 282 | need_wakeup = waitqueue_active(&pkmap_map_wait); |
| 283 | } |
Nicolas Pitre | 3297e76 | 2009-03-04 22:49:41 -0500 | [diff] [blame] | 284 | unlock_kmap_any(flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 | |
| 286 | /* do wake-up, if needed, race-free outside of the spin lock */ |
| 287 | if (need_wakeup) |
| 288 | wake_up(&pkmap_map_wait); |
| 289 | } |
| 290 | |
| 291 | EXPORT_SYMBOL(kunmap_high); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | #endif |
| 293 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 | #if defined(HASHED_PAGE_VIRTUAL) |
| 295 | |
| 296 | #define PA_HASH_ORDER 7 |
| 297 | |
| 298 | /* |
| 299 | * Describes one page->virtual association |
| 300 | */ |
| 301 | struct page_address_map { |
| 302 | struct page *page; |
| 303 | void *virtual; |
| 304 | struct list_head list; |
| 305 | }; |
| 306 | |
| 307 | /* |
| 308 | * page_address_map freelist, allocated from page_address_maps. |
| 309 | */ |
| 310 | static struct list_head page_address_pool; /* freelist */ |
| 311 | static spinlock_t pool_lock; /* protects page_address_pool */ |
| 312 | |
| 313 | /* |
| 314 | * Hash table bucket |
| 315 | */ |
| 316 | static struct page_address_slot { |
| 317 | struct list_head lh; /* List of page_address_maps */ |
| 318 | spinlock_t lock; /* Protect this bucket's list */ |
| 319 | } ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER]; |
| 320 | |
| 321 | static struct page_address_slot *page_slot(struct page *page) |
| 322 | { |
| 323 | return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)]; |
| 324 | } |
| 325 | |
Randy Dunlap | 77f6078 | 2008-03-19 17:00:42 -0700 | [diff] [blame] | 326 | /** |
| 327 | * page_address - get the mapped virtual address of a page |
| 328 | * @page: &struct page to get the virtual address of |
| 329 | * |
| 330 | * Returns the page's virtual address. |
| 331 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | void *page_address(struct page *page) |
| 333 | { |
| 334 | unsigned long flags; |
| 335 | void *ret; |
| 336 | struct page_address_slot *pas; |
| 337 | |
| 338 | if (!PageHighMem(page)) |
| 339 | return lowmem_page_address(page); |
| 340 | |
| 341 | pas = page_slot(page); |
| 342 | ret = NULL; |
| 343 | spin_lock_irqsave(&pas->lock, flags); |
| 344 | if (!list_empty(&pas->lh)) { |
| 345 | struct page_address_map *pam; |
| 346 | |
| 347 | list_for_each_entry(pam, &pas->lh, list) { |
| 348 | if (pam->page == page) { |
| 349 | ret = pam->virtual; |
| 350 | goto done; |
| 351 | } |
| 352 | } |
| 353 | } |
| 354 | done: |
| 355 | spin_unlock_irqrestore(&pas->lock, flags); |
| 356 | return ret; |
| 357 | } |
| 358 | |
| 359 | EXPORT_SYMBOL(page_address); |
| 360 | |
Randy Dunlap | 77f6078 | 2008-03-19 17:00:42 -0700 | [diff] [blame] | 361 | /** |
| 362 | * set_page_address - set a page's virtual address |
| 363 | * @page: &struct page to set |
| 364 | * @virtual: virtual address to use |
| 365 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | void set_page_address(struct page *page, void *virtual) |
| 367 | { |
| 368 | unsigned long flags; |
| 369 | struct page_address_slot *pas; |
| 370 | struct page_address_map *pam; |
| 371 | |
| 372 | BUG_ON(!PageHighMem(page)); |
| 373 | |
| 374 | pas = page_slot(page); |
| 375 | if (virtual) { /* Add */ |
| 376 | BUG_ON(list_empty(&page_address_pool)); |
| 377 | |
| 378 | spin_lock_irqsave(&pool_lock, flags); |
| 379 | pam = list_entry(page_address_pool.next, |
| 380 | struct page_address_map, list); |
| 381 | list_del(&pam->list); |
| 382 | spin_unlock_irqrestore(&pool_lock, flags); |
| 383 | |
| 384 | pam->page = page; |
| 385 | pam->virtual = virtual; |
| 386 | |
| 387 | spin_lock_irqsave(&pas->lock, flags); |
| 388 | list_add_tail(&pam->list, &pas->lh); |
| 389 | spin_unlock_irqrestore(&pas->lock, flags); |
| 390 | } else { /* Remove */ |
| 391 | spin_lock_irqsave(&pas->lock, flags); |
| 392 | list_for_each_entry(pam, &pas->lh, list) { |
| 393 | if (pam->page == page) { |
| 394 | list_del(&pam->list); |
| 395 | spin_unlock_irqrestore(&pas->lock, flags); |
| 396 | spin_lock_irqsave(&pool_lock, flags); |
| 397 | list_add_tail(&pam->list, &page_address_pool); |
| 398 | spin_unlock_irqrestore(&pool_lock, flags); |
| 399 | goto done; |
| 400 | } |
| 401 | } |
| 402 | spin_unlock_irqrestore(&pas->lock, flags); |
| 403 | } |
| 404 | done: |
| 405 | return; |
| 406 | } |
| 407 | |
| 408 | static struct page_address_map page_address_maps[LAST_PKMAP]; |
| 409 | |
| 410 | void __init page_address_init(void) |
| 411 | { |
| 412 | int i; |
| 413 | |
| 414 | INIT_LIST_HEAD(&page_address_pool); |
| 415 | for (i = 0; i < ARRAY_SIZE(page_address_maps); i++) |
| 416 | list_add(&page_address_maps[i].list, &page_address_pool); |
| 417 | for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) { |
| 418 | INIT_LIST_HEAD(&page_address_htable[i].lh); |
| 419 | spin_lock_init(&page_address_htable[i].lock); |
| 420 | } |
| 421 | spin_lock_init(&pool_lock); |
| 422 | } |
| 423 | |
| 424 | #endif /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */ |