Yoshinori Sato | cce2d45 | 2008-08-04 16:33:47 +0900 | [diff] [blame] | 1 | #ifndef __ASM_CPU_SH2A_CACHEFLUSH_H |
| 2 | #define __ASM_CPU_SH2A_CACHEFLUSH_H |
| 3 | |
| 4 | /* |
| 5 | * Cache flushing: |
| 6 | * |
| 7 | * - flush_cache_all() flushes entire cache |
| 8 | * - flush_cache_mm(mm) flushes the specified mm context's cache lines |
| 9 | * - flush_cache_dup mm(mm) handles cache flushing when forking |
| 10 | * - flush_cache_page(mm, vmaddr, pfn) flushes a single page |
| 11 | * - flush_cache_range(vma, start, end) flushes a range of pages |
| 12 | * |
| 13 | * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache |
| 14 | * - flush_icache_range(start, end) flushes(invalidates) a range for icache |
| 15 | * - flush_icache_page(vma, pg) flushes(invalidates) a page for icache |
| 16 | * |
| 17 | * Caches are indexed (effectively) by physical address on SH-2, so |
| 18 | * we don't need them. |
| 19 | */ |
| 20 | #define flush_cache_all() do { } while (0) |
| 21 | #define flush_cache_mm(mm) do { } while (0) |
| 22 | #define flush_cache_dup_mm(mm) do { } while (0) |
| 23 | #define flush_cache_range(vma, start, end) do { } while (0) |
| 24 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) |
| 25 | #define flush_dcache_page(page) do { } while (0) |
| 26 | #define flush_dcache_mmap_lock(mapping) do { } while (0) |
| 27 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) |
| 28 | void flush_icache_range(unsigned long start, unsigned long end); |
| 29 | #define flush_icache_page(vma,pg) do { } while (0) |
| 30 | #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) |
| 31 | #define flush_cache_sigtramp(vaddr) do { } while (0) |
| 32 | |
| 33 | #define p3_cache_init() do { } while (0) |
| 34 | #endif /* __ASM_CPU_SH2A_CACHEFLUSH_H */ |