blob: a508f2f73bd73b2217a0e4aa15636e30f76ad419 [file] [log] [blame]
Chris Zankel9a8fd552005-06-23 22:01:26 -07001/*
2 * include/asm-xtensa/cacheflush.h
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
Chris Zankel66569202007-08-22 10:14:51 -07008 * (C) 2001 - 2007 Tensilica Inc.
Chris Zankel9a8fd552005-06-23 22:01:26 -07009 */
10
11#ifndef _XTENSA_CACHEFLUSH_H
12#define _XTENSA_CACHEFLUSH_H
13
14#ifdef __KERNEL__
15
16#include <linux/mm.h>
17#include <asm/processor.h>
18#include <asm/page.h>
19
20/*
Chris Zankel66569202007-08-22 10:14:51 -070021 * Lo-level routines for cache flushing.
Chris Zankel9a8fd552005-06-23 22:01:26 -070022 *
23 * invalidate data or instruction cache:
24 *
25 * __invalidate_icache_all()
26 * __invalidate_icache_page(adr)
27 * __invalidate_dcache_page(adr)
28 * __invalidate_icache_range(from,size)
29 * __invalidate_dcache_range(from,size)
30 *
31 * flush data cache:
32 *
33 * __flush_dcache_page(adr)
34 *
35 * flush and invalidate data cache:
36 *
37 * __flush_invalidate_dcache_all()
38 * __flush_invalidate_dcache_page(adr)
39 * __flush_invalidate_dcache_range(from,size)
Chris Zankel66569202007-08-22 10:14:51 -070040 *
41 * specials for cache aliasing:
42 *
43 * __flush_invalidate_dcache_page_alias(vaddr,paddr)
44 * __invalidate_icache_page_alias(vaddr,paddr)
Chris Zankel9a8fd552005-06-23 22:01:26 -070045 */
46
Chris Zankel66569202007-08-22 10:14:51 -070047extern void __invalidate_dcache_all(void);
Chris Zankel9a8fd552005-06-23 22:01:26 -070048extern void __invalidate_icache_all(void);
Chris Zankel9a8fd552005-06-23 22:01:26 -070049extern void __invalidate_dcache_page(unsigned long);
50extern void __invalidate_icache_page(unsigned long);
51extern void __invalidate_icache_range(unsigned long, unsigned long);
52extern void __invalidate_dcache_range(unsigned long, unsigned long);
53
Chris Zankel66569202007-08-22 10:14:51 -070054
Chris Zankel9a8fd552005-06-23 22:01:26 -070055#if XCHAL_DCACHE_IS_WRITEBACK
Chris Zankel66569202007-08-22 10:14:51 -070056extern void __flush_invalidate_dcache_all(void);
Chris Zankel9a8fd552005-06-23 22:01:26 -070057extern void __flush_dcache_page(unsigned long);
Chris Zankel66569202007-08-22 10:14:51 -070058extern void __flush_dcache_range(unsigned long, unsigned long);
Chris Zankel9a8fd552005-06-23 22:01:26 -070059extern void __flush_invalidate_dcache_page(unsigned long);
60extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
61#else
Chris Zankel66569202007-08-22 10:14:51 -070062# define __flush_dcache_range(p,s) do { } while(0)
63# define __flush_dcache_page(p) do { } while(0)
64# define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p)
65# define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s)
66#endif
67
Johannes Weinere5083a62009-03-04 16:21:31 +010068#if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
Chris Zankel66569202007-08-22 10:14:51 -070069extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
Johannes Weinere5083a62009-03-04 16:21:31 +010070#else
71static inline void __flush_invalidate_dcache_page_alias(unsigned long virt,
72 unsigned long phys) { }
Chris Zankel66569202007-08-22 10:14:51 -070073#endif
Johannes Weinere5083a62009-03-04 16:21:31 +010074#if defined(CONFIG_MMU) && (ICACHE_WAY_SIZE > PAGE_SIZE)
Chris Zankel66569202007-08-22 10:14:51 -070075extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
Chris Zankel9f8fcf32008-01-18 16:15:29 -080076#else
Johannes Weinere5083a62009-03-04 16:21:31 +010077static inline void __invalidate_icache_page_alias(unsigned long virt,
78 unsigned long phys) { }
Chris Zankel9a8fd552005-06-23 22:01:26 -070079#endif
80
81/*
82 * We have physically tagged caches - nothing to do here -
83 * unless we have cache aliasing.
84 *
85 * Pages can get remapped. Because this might change the 'color' of that page,
86 * we have to flush the cache before the PTE is changed.
87 * (see also Documentation/cachetlb.txt)
88 */
89
Chris Zankel66569202007-08-22 10:14:51 -070090#if (DCACHE_WAY_SIZE > PAGE_SIZE)
Chris Zankel9a8fd552005-06-23 22:01:26 -070091
Chris Zankel66569202007-08-22 10:14:51 -070092#define flush_cache_all() \
93 do { \
94 __flush_invalidate_dcache_all(); \
95 __invalidate_icache_all(); \
96 } while (0)
Chris Zankel9a8fd552005-06-23 22:01:26 -070097
Chris Zankel66569202007-08-22 10:14:51 -070098#define flush_cache_mm(mm) flush_cache_all()
99#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
100
101#define flush_cache_vmap(start,end) flush_cache_all()
102#define flush_cache_vunmap(start,end) flush_cache_all()
Chris Zankel9a8fd552005-06-23 22:01:26 -0700103
Ilya Loginov2d4dc892009-11-26 09:16:19 +0100104#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
Chris Zankel9a8fd552005-06-23 22:01:26 -0700105extern void flush_dcache_page(struct page*);
Chris Zankel9a8fd552005-06-23 22:01:26 -0700106extern void flush_cache_range(struct vm_area_struct*, ulong, ulong);
107extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long);
108
109#else
110
111#define flush_cache_all() do { } while (0)
112#define flush_cache_mm(mm) do { } while (0)
Ralf Baechleec8c0442006-12-12 17:14:57 +0000113#define flush_cache_dup_mm(mm) do { } while (0)
Chris Zankel9a8fd552005-06-23 22:01:26 -0700114
115#define flush_cache_vmap(start,end) do { } while (0)
116#define flush_cache_vunmap(start,end) do { } while (0)
117
118#define flush_dcache_page(page) do { } while (0)
119
120#define flush_cache_page(vma,addr,pfn) do { } while (0)
121#define flush_cache_range(vma,start,end) do { } while (0)
122
123#endif
124
Chris Zankel66569202007-08-22 10:14:51 -0700125/* Ensure consistency between data and instruction cache. */
Chris Zankel9a8fd552005-06-23 22:01:26 -0700126#define flush_icache_range(start,end) \
Chris Zankel66569202007-08-22 10:14:51 -0700127 do { \
128 __flush_dcache_range(start, (end) - (start)); \
129 __invalidate_icache_range(start,(end) - (start)); \
130 } while (0)
Chris Zankel9a8fd552005-06-23 22:01:26 -0700131
132/* This is not required, see Documentation/cachetlb.txt */
Chris Zankel66569202007-08-22 10:14:51 -0700133#define flush_icache_page(vma,page) do { } while (0)
Chris Zankel9a8fd552005-06-23 22:01:26 -0700134
135#define flush_dcache_mmap_lock(mapping) do { } while (0)
136#define flush_dcache_mmap_unlock(mapping) do { } while (0)
137
Chris Zankel66569202007-08-22 10:14:51 -0700138#if (DCACHE_WAY_SIZE > PAGE_SIZE)
Chris Zankel9a8fd552005-06-23 22:01:26 -0700139
Chris Zankel66569202007-08-22 10:14:51 -0700140extern void copy_to_user_page(struct vm_area_struct*, struct page*,
141 unsigned long, void*, const void*, unsigned long);
142extern void copy_from_user_page(struct vm_area_struct*, struct page*,
143 unsigned long, void*, const void*, unsigned long);
144
145#else
146
147#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
148 do { \
149 memcpy(dst, src, len); \
150 __flush_dcache_range((unsigned long) dst, len); \
151 __invalidate_icache_range((unsigned long) dst, len); \
152 } while (0)
Chris Zankel9a8fd552005-06-23 22:01:26 -0700153
154#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
155 memcpy(dst, src, len)
156
Chris Zankel66569202007-08-22 10:14:51 -0700157#endif
158
Oskar Schirmerbd974242009-06-10 12:58:45 -0700159#define XTENSA_CACHEBLK_LOG2 29
160#define XTENSA_CACHEBLK_SIZE (1 << XTENSA_CACHEBLK_LOG2)
161#define XTENSA_CACHEBLK_MASK (7 << XTENSA_CACHEBLK_LOG2)
162
163#if XCHAL_HAVE_CACHEATTR
164static inline u32 xtensa_get_cacheattr(void)
165{
166 u32 r;
167 asm volatile(" rsr %0, CACHEATTR" : "=a"(r));
168 return r;
169}
170
171static inline u32 xtensa_get_dtlb1(u32 addr)
172{
173 u32 r = addr & XTENSA_CACHEBLK_MASK;
174 return r | ((xtensa_get_cacheattr() >> (r >> (XTENSA_CACHEBLK_LOG2-2)))
175 & 0xF);
176}
177#else
178static inline u32 xtensa_get_dtlb1(u32 addr)
179{
180 u32 r;
181 asm volatile(" rdtlb1 %0, %1" : "=a"(r) : "a"(addr));
182 asm volatile(" dsync");
183 return r;
184}
185
186static inline u32 xtensa_get_cacheattr(void)
187{
188 u32 r = 0;
189 u32 a = 0;
190 do {
191 a -= XTENSA_CACHEBLK_SIZE;
192 r = (r << 4) | (xtensa_get_dtlb1(a) & 0xF);
193 } while (a);
194 return r;
195}
196#endif
197
198static inline int xtensa_need_flush_dma_source(u32 addr)
199{
200 return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) >= 4;
201}
202
203static inline int xtensa_need_invalidate_dma_destination(u32 addr)
204{
205 return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) != 2;
206}
207
208static inline void flush_dcache_unaligned(u32 addr, u32 size)
209{
210 u32 cnt;
211 if (size) {
212 cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
213 + XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
214 while (cnt--) {
215 asm volatile(" dhwb %0, 0" : : "a"(addr));
216 addr += XCHAL_DCACHE_LINESIZE;
217 }
218 asm volatile(" dsync");
219 }
220}
221
222static inline void invalidate_dcache_unaligned(u32 addr, u32 size)
223{
224 int cnt;
225 if (size) {
226 asm volatile(" dhwbi %0, 0 ;" : : "a"(addr));
227 cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
228 - XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
229 while (cnt-- > 0) {
230 asm volatile(" dhi %0, %1" : : "a"(addr),
231 "n"(XCHAL_DCACHE_LINESIZE));
232 addr += XCHAL_DCACHE_LINESIZE;
233 }
234 asm volatile(" dhwbi %0, %1" : : "a"(addr),
235 "n"(XCHAL_DCACHE_LINESIZE));
236 asm volatile(" dsync");
237 }
238}
239
240static inline void flush_invalidate_dcache_unaligned(u32 addr, u32 size)
241{
242 u32 cnt;
243 if (size) {
244 cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
245 + XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
246 while (cnt--) {
247 asm volatile(" dhwbi %0, 0" : : "a"(addr));
248 addr += XCHAL_DCACHE_LINESIZE;
249 }
250 asm volatile(" dsync");
251 }
252}
253
Chris Zankel9a8fd552005-06-23 22:01:26 -0700254#endif /* __KERNEL__ */
Chris Zankel9a8fd552005-06-23 22:01:26 -0700255#endif /* _XTENSA_CACHEFLUSH_H */