Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2004-2006 Atmel Corporation |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 as |
| 6 | * published by the Free Software Foundation. |
| 7 | */ |
| 8 | #ifndef __ASM_AVR32_CACHEFLUSH_H |
| 9 | #define __ASM_AVR32_CACHEFLUSH_H |
| 10 | |
| 11 | /* Keep includes the same across arches. */ |
| 12 | #include <linux/mm.h> |
| 13 | |
| 14 | #define CACHE_OP_ICACHE_INVALIDATE 0x01 |
| 15 | #define CACHE_OP_DCACHE_INVALIDATE 0x0b |
| 16 | #define CACHE_OP_DCACHE_CLEAN 0x0c |
| 17 | #define CACHE_OP_DCACHE_CLEAN_INVAL 0x0d |
| 18 | |
| 19 | /* |
| 20 | * Invalidate any cacheline containing virtual address vaddr without |
| 21 | * writing anything back to memory. |
| 22 | * |
| 23 | * Note that this function may corrupt unrelated data structures when |
| 24 | * applied on buffers that are not cacheline aligned in both ends. |
| 25 | */ |
| 26 | static inline void invalidate_dcache_line(void *vaddr) |
| 27 | { |
| 28 | asm volatile("cache %0[0], %1" |
| 29 | : |
| 30 | : "r"(vaddr), "n"(CACHE_OP_DCACHE_INVALIDATE) |
| 31 | : "memory"); |
| 32 | } |
| 33 | |
| 34 | /* |
| 35 | * Make sure any cacheline containing virtual address vaddr is written |
| 36 | * to memory. |
| 37 | */ |
| 38 | static inline void clean_dcache_line(void *vaddr) |
| 39 | { |
| 40 | asm volatile("cache %0[0], %1" |
| 41 | : |
| 42 | : "r"(vaddr), "n"(CACHE_OP_DCACHE_CLEAN) |
| 43 | : "memory"); |
| 44 | } |
| 45 | |
| 46 | /* |
| 47 | * Make sure any cacheline containing virtual address vaddr is written |
| 48 | * to memory and then invalidate it. |
| 49 | */ |
| 50 | static inline void flush_dcache_line(void *vaddr) |
| 51 | { |
| 52 | asm volatile("cache %0[0], %1" |
| 53 | : |
| 54 | : "r"(vaddr), "n"(CACHE_OP_DCACHE_CLEAN_INVAL) |
| 55 | : "memory"); |
| 56 | } |
| 57 | |
| 58 | /* |
| 59 | * Invalidate any instruction cacheline containing virtual address |
| 60 | * vaddr. |
| 61 | */ |
| 62 | static inline void invalidate_icache_line(void *vaddr) |
| 63 | { |
| 64 | asm volatile("cache %0[0], %1" |
| 65 | : |
| 66 | : "r"(vaddr), "n"(CACHE_OP_ICACHE_INVALIDATE) |
| 67 | : "memory"); |
| 68 | } |
| 69 | |
| 70 | /* |
| 71 | * Applies the above functions on all lines that are touched by the |
| 72 | * specified virtual address range. |
| 73 | */ |
| 74 | void invalidate_dcache_region(void *start, size_t len); |
| 75 | void clean_dcache_region(void *start, size_t len); |
| 76 | void flush_dcache_region(void *start, size_t len); |
| 77 | void invalidate_icache_region(void *start, size_t len); |
| 78 | |
| 79 | /* |
| 80 | * Make sure any pending writes are completed before continuing. |
| 81 | */ |
| 82 | #define flush_write_buffer() asm volatile("sync 0" : : : "memory") |
| 83 | |
| 84 | /* |
| 85 | * The following functions are called when a virtual mapping changes. |
| 86 | * We do not need to flush anything in this case. |
| 87 | */ |
| 88 | #define flush_cache_all() do { } while (0) |
| 89 | #define flush_cache_mm(mm) do { } while (0) |
| 90 | #define flush_cache_range(vma, start, end) do { } while (0) |
| 91 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) |
| 92 | #define flush_cache_vmap(start, end) do { } while (0) |
| 93 | #define flush_cache_vunmap(start, end) do { } while (0) |
| 94 | |
| 95 | /* |
| 96 | * I think we need to implement this one to be able to reliably |
| 97 | * execute pages from RAMDISK. However, if we implement the |
| 98 | * flush_dcache_*() functions, it might not be needed anymore. |
| 99 | * |
| 100 | * #define flush_icache_page(vma, page) do { } while (0) |
| 101 | */ |
| 102 | extern void flush_icache_page(struct vm_area_struct *vma, struct page *page); |
| 103 | |
| 104 | /* |
| 105 | * These are (I think) related to D-cache aliasing. We might need to |
| 106 | * do something here, but only for certain configurations. No such |
| 107 | * configurations exist at this time. |
| 108 | */ |
| 109 | #define flush_dcache_page(page) do { } while (0) |
| 110 | #define flush_dcache_mmap_lock(page) do { } while (0) |
| 111 | #define flush_dcache_mmap_unlock(page) do { } while (0) |
| 112 | |
| 113 | /* |
| 114 | * These are for I/D cache coherency. In this case, we do need to |
| 115 | * flush with all configurations. |
| 116 | */ |
| 117 | extern void flush_icache_range(unsigned long start, unsigned long end); |
| 118 | extern void flush_icache_user_range(struct vm_area_struct *vma, |
| 119 | struct page *page, |
| 120 | unsigned long addr, int len); |
| 121 | |
| 122 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) do { \ |
| 123 | memcpy(dst, src, len); \ |
| 124 | flush_icache_user_range(vma, page, vaddr, len); \ |
| 125 | } while(0) |
| 126 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ |
| 127 | memcpy(dst, src, len) |
| 128 | |
| 129 | #endif /* __ASM_AVR32_CACHEFLUSH_H */ |