Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 1 | /* |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
Max Filippov | f615136 | 2013-10-17 02:42:26 +0400 | [diff] [blame] | 6 | * (C) 2001 - 2013 Tensilica Inc. |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 7 | */ |
| 8 | |
| 9 | #ifndef _XTENSA_CACHEFLUSH_H |
| 10 | #define _XTENSA_CACHEFLUSH_H |
| 11 | |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 12 | #include <linux/mm.h> |
| 13 | #include <asm/processor.h> |
| 14 | #include <asm/page.h> |
| 15 | |
| 16 | /* |
Chris Zankel | 6656920 | 2007-08-22 10:14:51 -0700 | [diff] [blame] | 17 | * Lo-level routines for cache flushing. |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 18 | * |
| 19 | * invalidate data or instruction cache: |
| 20 | * |
| 21 | * __invalidate_icache_all() |
| 22 | * __invalidate_icache_page(adr) |
| 23 | * __invalidate_dcache_page(adr) |
| 24 | * __invalidate_icache_range(from,size) |
| 25 | * __invalidate_dcache_range(from,size) |
| 26 | * |
| 27 | * flush data cache: |
| 28 | * |
| 29 | * __flush_dcache_page(adr) |
| 30 | * |
| 31 | * flush and invalidate data cache: |
| 32 | * |
| 33 | * __flush_invalidate_dcache_all() |
| 34 | * __flush_invalidate_dcache_page(adr) |
| 35 | * __flush_invalidate_dcache_range(from,size) |
Chris Zankel | 6656920 | 2007-08-22 10:14:51 -0700 | [diff] [blame] | 36 | * |
| 37 | * specials for cache aliasing: |
| 38 | * |
| 39 | * __flush_invalidate_dcache_page_alias(vaddr,paddr) |
Max Filippov | a91902d | 2014-07-21 18:54:11 +0400 | [diff] [blame] | 40 | * __invalidate_dcache_page_alias(vaddr,paddr) |
Chris Zankel | 6656920 | 2007-08-22 10:14:51 -0700 | [diff] [blame] | 41 | * __invalidate_icache_page_alias(vaddr,paddr) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 42 | */ |
| 43 | |
Chris Zankel | 6656920 | 2007-08-22 10:14:51 -0700 | [diff] [blame] | 44 | extern void __invalidate_dcache_all(void); |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 45 | extern void __invalidate_icache_all(void); |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 46 | extern void __invalidate_dcache_page(unsigned long); |
| 47 | extern void __invalidate_icache_page(unsigned long); |
| 48 | extern void __invalidate_icache_range(unsigned long, unsigned long); |
| 49 | extern void __invalidate_dcache_range(unsigned long, unsigned long); |
| 50 | |
| 51 | #if XCHAL_DCACHE_IS_WRITEBACK |
Chris Zankel | 6656920 | 2007-08-22 10:14:51 -0700 | [diff] [blame] | 52 | extern void __flush_invalidate_dcache_all(void); |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 53 | extern void __flush_dcache_page(unsigned long); |
Chris Zankel | 6656920 | 2007-08-22 10:14:51 -0700 | [diff] [blame] | 54 | extern void __flush_dcache_range(unsigned long, unsigned long); |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 55 | extern void __flush_invalidate_dcache_page(unsigned long); |
| 56 | extern void __flush_invalidate_dcache_range(unsigned long, unsigned long); |
| 57 | #else |
Chris Zankel | 6656920 | 2007-08-22 10:14:51 -0700 | [diff] [blame] | 58 | # define __flush_dcache_range(p,s) do { } while(0) |
| 59 | # define __flush_dcache_page(p) do { } while(0) |
| 60 | # define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p) |
| 61 | # define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s) |
| 62 | #endif |
| 63 | |
Johannes Weiner | e5083a6 | 2009-03-04 16:21:31 +0100 | [diff] [blame] | 64 | #if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE) |
Chris Zankel | 6656920 | 2007-08-22 10:14:51 -0700 | [diff] [blame] | 65 | extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long); |
Max Filippov | a91902d | 2014-07-21 18:54:11 +0400 | [diff] [blame] | 66 | extern void __invalidate_dcache_page_alias(unsigned long, unsigned long); |
Johannes Weiner | e5083a6 | 2009-03-04 16:21:31 +0100 | [diff] [blame] | 67 | #else |
| 68 | static inline void __flush_invalidate_dcache_page_alias(unsigned long virt, |
| 69 | unsigned long phys) { } |
Chris Zankel | 6656920 | 2007-08-22 10:14:51 -0700 | [diff] [blame] | 70 | #endif |
Johannes Weiner | e5083a6 | 2009-03-04 16:21:31 +0100 | [diff] [blame] | 71 | #if defined(CONFIG_MMU) && (ICACHE_WAY_SIZE > PAGE_SIZE) |
Chris Zankel | 6656920 | 2007-08-22 10:14:51 -0700 | [diff] [blame] | 72 | extern void __invalidate_icache_page_alias(unsigned long, unsigned long); |
Chris Zankel | 9f8fcf3 | 2008-01-18 16:15:29 -0800 | [diff] [blame] | 73 | #else |
Johannes Weiner | e5083a6 | 2009-03-04 16:21:31 +0100 | [diff] [blame] | 74 | static inline void __invalidate_icache_page_alias(unsigned long virt, |
| 75 | unsigned long phys) { } |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 76 | #endif |
| 77 | |
| 78 | /* |
| 79 | * We have physically tagged caches - nothing to do here - |
| 80 | * unless we have cache aliasing. |
| 81 | * |
| 82 | * Pages can get remapped. Because this might change the 'color' of that page, |
| 83 | * we have to flush the cache before the PTE is changed. |
| 84 | * (see also Documentation/cachetlb.txt) |
| 85 | */ |
| 86 | |
Max Filippov | f615136 | 2013-10-17 02:42:26 +0400 | [diff] [blame] | 87 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) || defined(CONFIG_SMP) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 88 | |
Max Filippov | f615136 | 2013-10-17 02:42:26 +0400 | [diff] [blame] | 89 | #ifdef CONFIG_SMP |
| 90 | void flush_cache_all(void); |
| 91 | void flush_cache_range(struct vm_area_struct*, ulong, ulong); |
| 92 | void flush_icache_range(unsigned long start, unsigned long end); |
| 93 | void flush_cache_page(struct vm_area_struct*, |
| 94 | unsigned long, unsigned long); |
| 95 | #else |
| 96 | #define flush_cache_all local_flush_cache_all |
| 97 | #define flush_cache_range local_flush_cache_range |
| 98 | #define flush_icache_range local_flush_icache_range |
| 99 | #define flush_cache_page local_flush_cache_page |
| 100 | #endif |
| 101 | |
| 102 | #define local_flush_cache_all() \ |
Chris Zankel | 6656920 | 2007-08-22 10:14:51 -0700 | [diff] [blame] | 103 | do { \ |
| 104 | __flush_invalidate_dcache_all(); \ |
| 105 | __invalidate_icache_all(); \ |
| 106 | } while (0) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 107 | |
Chris Zankel | 6656920 | 2007-08-22 10:14:51 -0700 | [diff] [blame] | 108 | #define flush_cache_mm(mm) flush_cache_all() |
| 109 | #define flush_cache_dup_mm(mm) flush_cache_mm(mm) |
| 110 | |
| 111 | #define flush_cache_vmap(start,end) flush_cache_all() |
| 112 | #define flush_cache_vunmap(start,end) flush_cache_all() |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 113 | |
Ilya Loginov | 2d4dc89 | 2009-11-26 09:16:19 +0100 | [diff] [blame] | 114 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 115 | extern void flush_dcache_page(struct page*); |
Max Filippov | f615136 | 2013-10-17 02:42:26 +0400 | [diff] [blame] | 116 | |
| 117 | void local_flush_cache_range(struct vm_area_struct *vma, |
| 118 | unsigned long start, unsigned long end); |
| 119 | void local_flush_cache_page(struct vm_area_struct *vma, |
| 120 | unsigned long address, unsigned long pfn); |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 121 | |
| 122 | #else |
| 123 | |
| 124 | #define flush_cache_all() do { } while (0) |
| 125 | #define flush_cache_mm(mm) do { } while (0) |
Ralf Baechle | ec8c044 | 2006-12-12 17:14:57 +0000 | [diff] [blame] | 126 | #define flush_cache_dup_mm(mm) do { } while (0) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 127 | |
| 128 | #define flush_cache_vmap(start,end) do { } while (0) |
| 129 | #define flush_cache_vunmap(start,end) do { } while (0) |
| 130 | |
Chris Zankel | 91e0806 | 2010-05-01 22:55:21 -0700 | [diff] [blame] | 131 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 132 | #define flush_dcache_page(page) do { } while (0) |
| 133 | |
Max Filippov | f615136 | 2013-10-17 02:42:26 +0400 | [diff] [blame] | 134 | #define flush_icache_range local_flush_icache_range |
| 135 | #define flush_cache_page(vma, addr, pfn) do { } while (0) |
| 136 | #define flush_cache_range(vma, start, end) do { } while (0) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 137 | |
| 138 | #endif |
| 139 | |
Chris Zankel | 6656920 | 2007-08-22 10:14:51 -0700 | [diff] [blame] | 140 | /* Ensure consistency between data and instruction cache. */ |
Max Filippov | f615136 | 2013-10-17 02:42:26 +0400 | [diff] [blame] | 141 | #define local_flush_icache_range(start, end) \ |
Chris Zankel | 6656920 | 2007-08-22 10:14:51 -0700 | [diff] [blame] | 142 | do { \ |
| 143 | __flush_dcache_range(start, (end) - (start)); \ |
| 144 | __invalidate_icache_range(start,(end) - (start)); \ |
| 145 | } while (0) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 146 | |
| 147 | /* This is not required, see Documentation/cachetlb.txt */ |
Chris Zankel | 6656920 | 2007-08-22 10:14:51 -0700 | [diff] [blame] | 148 | #define flush_icache_page(vma,page) do { } while (0) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 149 | |
| 150 | #define flush_dcache_mmap_lock(mapping) do { } while (0) |
| 151 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) |
| 152 | |
Chris Zankel | 6656920 | 2007-08-22 10:14:51 -0700 | [diff] [blame] | 153 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 154 | |
Chris Zankel | 6656920 | 2007-08-22 10:14:51 -0700 | [diff] [blame] | 155 | extern void copy_to_user_page(struct vm_area_struct*, struct page*, |
| 156 | unsigned long, void*, const void*, unsigned long); |
| 157 | extern void copy_from_user_page(struct vm_area_struct*, struct page*, |
| 158 | unsigned long, void*, const void*, unsigned long); |
| 159 | |
| 160 | #else |
| 161 | |
| 162 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ |
| 163 | do { \ |
| 164 | memcpy(dst, src, len); \ |
| 165 | __flush_dcache_range((unsigned long) dst, len); \ |
| 166 | __invalidate_icache_range((unsigned long) dst, len); \ |
| 167 | } while (0) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 168 | |
| 169 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ |
| 170 | memcpy(dst, src, len) |
| 171 | |
Chris Zankel | 6656920 | 2007-08-22 10:14:51 -0700 | [diff] [blame] | 172 | #endif |
| 173 | |
Oskar Schirmer | bd97424 | 2009-06-10 12:58:45 -0700 | [diff] [blame] | 174 | #define XTENSA_CACHEBLK_LOG2 29 |
| 175 | #define XTENSA_CACHEBLK_SIZE (1 << XTENSA_CACHEBLK_LOG2) |
| 176 | #define XTENSA_CACHEBLK_MASK (7 << XTENSA_CACHEBLK_LOG2) |
| 177 | |
| 178 | #if XCHAL_HAVE_CACHEATTR |
| 179 | static inline u32 xtensa_get_cacheattr(void) |
| 180 | { |
| 181 | u32 r; |
Max Filippov | bc5378f | 2012-10-15 03:55:38 +0400 | [diff] [blame] | 182 | asm volatile(" rsr %0, cacheattr" : "=a"(r)); |
Oskar Schirmer | bd97424 | 2009-06-10 12:58:45 -0700 | [diff] [blame] | 183 | return r; |
| 184 | } |
| 185 | |
| 186 | static inline u32 xtensa_get_dtlb1(u32 addr) |
| 187 | { |
| 188 | u32 r = addr & XTENSA_CACHEBLK_MASK; |
| 189 | return r | ((xtensa_get_cacheattr() >> (r >> (XTENSA_CACHEBLK_LOG2-2))) |
| 190 | & 0xF); |
| 191 | } |
| 192 | #else |
| 193 | static inline u32 xtensa_get_dtlb1(u32 addr) |
| 194 | { |
| 195 | u32 r; |
| 196 | asm volatile(" rdtlb1 %0, %1" : "=a"(r) : "a"(addr)); |
| 197 | asm volatile(" dsync"); |
| 198 | return r; |
| 199 | } |
| 200 | |
| 201 | static inline u32 xtensa_get_cacheattr(void) |
| 202 | { |
| 203 | u32 r = 0; |
| 204 | u32 a = 0; |
| 205 | do { |
| 206 | a -= XTENSA_CACHEBLK_SIZE; |
| 207 | r = (r << 4) | (xtensa_get_dtlb1(a) & 0xF); |
| 208 | } while (a); |
| 209 | return r; |
| 210 | } |
| 211 | #endif |
| 212 | |
| 213 | static inline int xtensa_need_flush_dma_source(u32 addr) |
| 214 | { |
| 215 | return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) >= 4; |
| 216 | } |
| 217 | |
| 218 | static inline int xtensa_need_invalidate_dma_destination(u32 addr) |
| 219 | { |
| 220 | return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) != 2; |
| 221 | } |
| 222 | |
| 223 | static inline void flush_dcache_unaligned(u32 addr, u32 size) |
| 224 | { |
| 225 | u32 cnt; |
| 226 | if (size) { |
| 227 | cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr) |
| 228 | + XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE; |
| 229 | while (cnt--) { |
| 230 | asm volatile(" dhwb %0, 0" : : "a"(addr)); |
| 231 | addr += XCHAL_DCACHE_LINESIZE; |
| 232 | } |
| 233 | asm volatile(" dsync"); |
| 234 | } |
| 235 | } |
| 236 | |
| 237 | static inline void invalidate_dcache_unaligned(u32 addr, u32 size) |
| 238 | { |
| 239 | int cnt; |
| 240 | if (size) { |
| 241 | asm volatile(" dhwbi %0, 0 ;" : : "a"(addr)); |
| 242 | cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr) |
| 243 | - XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE; |
| 244 | while (cnt-- > 0) { |
| 245 | asm volatile(" dhi %0, %1" : : "a"(addr), |
| 246 | "n"(XCHAL_DCACHE_LINESIZE)); |
| 247 | addr += XCHAL_DCACHE_LINESIZE; |
| 248 | } |
| 249 | asm volatile(" dhwbi %0, %1" : : "a"(addr), |
| 250 | "n"(XCHAL_DCACHE_LINESIZE)); |
| 251 | asm volatile(" dsync"); |
| 252 | } |
| 253 | } |
| 254 | |
| 255 | static inline void flush_invalidate_dcache_unaligned(u32 addr, u32 size) |
| 256 | { |
| 257 | u32 cnt; |
| 258 | if (size) { |
| 259 | cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr) |
| 260 | + XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE; |
| 261 | while (cnt--) { |
| 262 | asm volatile(" dhwbi %0, 0" : : "a"(addr)); |
| 263 | addr += XCHAL_DCACHE_LINESIZE; |
| 264 | } |
| 265 | asm volatile(" dsync"); |
| 266 | } |
| 267 | } |
| 268 | |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 269 | #endif /* _XTENSA_CACHEFLUSH_H */ |