Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2004-2006 Atmel Corporation |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 as |
| 6 | * published by the Free Software Foundation. |
| 7 | */ |
| 8 | #ifndef __ASM_AVR32_CACHEFLUSH_H |
| 9 | #define __ASM_AVR32_CACHEFLUSH_H |
| 10 | |
| 11 | /* Keep includes the same across arches. */ |
| 12 | #include <linux/mm.h> |
| 13 | |
| 14 | #define CACHE_OP_ICACHE_INVALIDATE 0x01 |
| 15 | #define CACHE_OP_DCACHE_INVALIDATE 0x0b |
| 16 | #define CACHE_OP_DCACHE_CLEAN 0x0c |
| 17 | #define CACHE_OP_DCACHE_CLEAN_INVAL 0x0d |
| 18 | |
| 19 | /* |
| 20 | * Invalidate any cacheline containing virtual address vaddr without |
| 21 | * writing anything back to memory. |
| 22 | * |
| 23 | * Note that this function may corrupt unrelated data structures when |
| 24 | * applied on buffers that are not cacheline aligned in both ends. |
| 25 | */ |
| 26 | static inline void invalidate_dcache_line(void *vaddr) |
| 27 | { |
| 28 | asm volatile("cache %0[0], %1" |
| 29 | : |
| 30 | : "r"(vaddr), "n"(CACHE_OP_DCACHE_INVALIDATE) |
| 31 | : "memory"); |
| 32 | } |
| 33 | |
| 34 | /* |
| 35 | * Make sure any cacheline containing virtual address vaddr is written |
| 36 | * to memory. |
| 37 | */ |
| 38 | static inline void clean_dcache_line(void *vaddr) |
| 39 | { |
| 40 | asm volatile("cache %0[0], %1" |
| 41 | : |
| 42 | : "r"(vaddr), "n"(CACHE_OP_DCACHE_CLEAN) |
| 43 | : "memory"); |
| 44 | } |
| 45 | |
| 46 | /* |
| 47 | * Make sure any cacheline containing virtual address vaddr is written |
| 48 | * to memory and then invalidate it. |
| 49 | */ |
| 50 | static inline void flush_dcache_line(void *vaddr) |
| 51 | { |
| 52 | asm volatile("cache %0[0], %1" |
| 53 | : |
| 54 | : "r"(vaddr), "n"(CACHE_OP_DCACHE_CLEAN_INVAL) |
| 55 | : "memory"); |
| 56 | } |
| 57 | |
| 58 | /* |
| 59 | * Invalidate any instruction cacheline containing virtual address |
| 60 | * vaddr. |
| 61 | */ |
| 62 | static inline void invalidate_icache_line(void *vaddr) |
| 63 | { |
| 64 | asm volatile("cache %0[0], %1" |
| 65 | : |
| 66 | : "r"(vaddr), "n"(CACHE_OP_ICACHE_INVALIDATE) |
| 67 | : "memory"); |
| 68 | } |
| 69 | |
| 70 | /* |
| 71 | * Applies the above functions on all lines that are touched by the |
| 72 | * specified virtual address range. |
| 73 | */ |
| 74 | void invalidate_dcache_region(void *start, size_t len); |
| 75 | void clean_dcache_region(void *start, size_t len); |
| 76 | void flush_dcache_region(void *start, size_t len); |
| 77 | void invalidate_icache_region(void *start, size_t len); |
| 78 | |
| 79 | /* |
| 80 | * Make sure any pending writes are completed before continuing. |
| 81 | */ |
| 82 | #define flush_write_buffer() asm volatile("sync 0" : : : "memory") |
| 83 | |
| 84 | /* |
| 85 | * The following functions are called when a virtual mapping changes. |
| 86 | * We do not need to flush anything in this case. |
| 87 | */ |
| 88 | #define flush_cache_all() do { } while (0) |
| 89 | #define flush_cache_mm(mm) do { } while (0) |
Ralf Baechle | ec8c044 | 2006-12-12 17:14:57 +0000 | [diff] [blame] | 90 | #define flush_cache_dup_mm(mm) do { } while (0) |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 91 | #define flush_cache_range(vma, start, end) do { } while (0) |
| 92 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) |
| 93 | #define flush_cache_vmap(start, end) do { } while (0) |
| 94 | #define flush_cache_vunmap(start, end) do { } while (0) |
| 95 | |
| 96 | /* |
| 97 | * I think we need to implement this one to be able to reliably |
| 98 | * execute pages from RAMDISK. However, if we implement the |
| 99 | * flush_dcache_*() functions, it might not be needed anymore. |
| 100 | * |
| 101 | * #define flush_icache_page(vma, page) do { } while (0) |
| 102 | */ |
| 103 | extern void flush_icache_page(struct vm_area_struct *vma, struct page *page); |
| 104 | |
| 105 | /* |
| 106 | * These are (I think) related to D-cache aliasing. We might need to |
| 107 | * do something here, but only for certain configurations. No such |
| 108 | * configurations exist at this time. |
| 109 | */ |
Ilya Loginov | 2d4dc89 | 2009-11-26 09:16:19 +0100 | [diff] [blame] | 110 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 111 | #define flush_dcache_page(page) do { } while (0) |
| 112 | #define flush_dcache_mmap_lock(page) do { } while (0) |
| 113 | #define flush_dcache_mmap_unlock(page) do { } while (0) |
| 114 | |
| 115 | /* |
| 116 | * These are for I/D cache coherency. In this case, we do need to |
| 117 | * flush with all configurations. |
| 118 | */ |
| 119 | extern void flush_icache_range(unsigned long start, unsigned long end); |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 120 | |
Haavard Skinnemoen | 68ca3e5 | 2007-12-03 18:04:11 +0100 | [diff] [blame] | 121 | extern void copy_to_user_page(struct vm_area_struct *vma, struct page *page, |
| 122 | unsigned long vaddr, void *dst, const void *src, |
| 123 | unsigned long len); |
| 124 | |
| 125 | static inline void copy_from_user_page(struct vm_area_struct *vma, |
| 126 | struct page *page, unsigned long vaddr, void *dst, |
| 127 | const void *src, unsigned long len) |
| 128 | { |
| 129 | memcpy(dst, src, len); |
| 130 | } |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 131 | |
| 132 | #endif /* __ASM_AVR32_CACHEFLUSH_H */ |