Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _ASM_IA64_CACHEFLUSH_H |
| 2 | #define _ASM_IA64_CACHEFLUSH_H |
| 3 | |
| 4 | /* |
| 5 | * Copyright (C) 2002 Hewlett-Packard Co |
| 6 | * David Mosberger-Tang <davidm@hpl.hp.com> |
| 7 | */ |
| 8 | |
| 9 | #include <linux/page-flags.h> |
Jiri Slaby | 1977f03 | 2007-10-18 23:40:25 -0700 | [diff] [blame^] | 10 | #include <linux/bitops.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <asm/page.h> |
| 13 | |
| 14 | /* |
| 15 | * Cache flushing routines. This is the kind of stuff that can be very expensive, so try |
| 16 | * to avoid them whenever possible. |
| 17 | */ |
| 18 | |
| 19 | #define flush_cache_all() do { } while (0) |
| 20 | #define flush_cache_mm(mm) do { } while (0) |
Ralf Baechle | ec8c044 | 2006-12-12 17:14:57 +0000 | [diff] [blame] | 21 | #define flush_cache_dup_mm(mm) do { } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #define flush_cache_range(vma, start, end) do { } while (0) |
| 23 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) |
| 24 | #define flush_icache_page(vma,page) do { } while (0) |
| 25 | #define flush_cache_vmap(start, end) do { } while (0) |
| 26 | #define flush_cache_vunmap(start, end) do { } while (0) |
| 27 | |
| 28 | #define flush_dcache_page(page) \ |
| 29 | do { \ |
| 30 | clear_bit(PG_arch_1, &(page)->flags); \ |
| 31 | } while (0) |
| 32 | |
| 33 | #define flush_dcache_mmap_lock(mapping) do { } while (0) |
| 34 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) |
| 35 | |
| 36 | extern void flush_icache_range (unsigned long start, unsigned long end); |
| 37 | |
| 38 | #define flush_icache_user_range(vma, page, user_addr, len) \ |
| 39 | do { \ |
| 40 | unsigned long _addr = (unsigned long) page_address(page) + ((user_addr) & ~PAGE_MASK); \ |
| 41 | flush_icache_range(_addr, _addr + (len)); \ |
| 42 | } while (0) |
| 43 | |
| 44 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ |
| 45 | do { memcpy(dst, src, len); \ |
| 46 | flush_icache_user_range(vma, page, vaddr, len); \ |
| 47 | } while (0) |
| 48 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ |
| 49 | memcpy(dst, src, len) |
| 50 | |
| 51 | #endif /* _ASM_IA64_CACHEFLUSH_H */ |