blob: 5a32020471e3bab2fc1e966a6e968c17a87f1d37 [file] [log] [blame]
Eric Miao905a09d2008-06-06 16:34:03 +08001/*
2 * arch/arm/mm/cache-xsc3l2.c - XScale3 L2 cache controller support
3 *
4 * Copyright (C) 2007 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19#include <linux/init.h>
Nicolas Pitre25cbe452010-12-15 23:29:04 -050020#include <linux/highmem.h>
Eric Miao905a09d2008-06-06 16:34:03 +080021#include <asm/system.h>
Russell King0ba8b9b2008-08-10 18:08:10 +010022#include <asm/cputype.h>
Eric Miao905a09d2008-06-06 16:34:03 +080023#include <asm/cacheflush.h>
Eric Miao905a09d2008-06-06 16:34:03 +080024
25#define CR_L2 (1 << 26)
26
27#define CACHE_LINE_SIZE 32
28#define CACHE_LINE_SHIFT 5
29#define CACHE_WAY_PER_SET 8
30
31#define CACHE_WAY_SIZE(l2ctype) (8192 << (((l2ctype) >> 8) & 0xf))
32#define CACHE_SET_SIZE(l2ctype) (CACHE_WAY_SIZE(l2ctype) >> CACHE_LINE_SHIFT)
33
34static inline int xsc3_l2_present(void)
35{
36 unsigned long l2ctype;
37
38 __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype));
39
40 return !!(l2ctype & 0xf8);
41}
42
43static inline void xsc3_l2_clean_mva(unsigned long addr)
44{
45 __asm__("mcr p15, 1, %0, c7, c11, 1" : : "r" (addr));
46}
47
Eric Miao905a09d2008-06-06 16:34:03 +080048static inline void xsc3_l2_inv_mva(unsigned long addr)
49{
50 __asm__("mcr p15, 1, %0, c7, c7, 1" : : "r" (addr));
51}
52
Eric Miao905a09d2008-06-06 16:34:03 +080053static inline void xsc3_l2_inv_all(void)
54{
55 unsigned long l2ctype, set_way;
56 int set, way;
57
58 __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype));
59
60 for (set = 0; set < CACHE_SET_SIZE(l2ctype); set++) {
61 for (way = 0; way < CACHE_WAY_PER_SET; way++) {
62 set_way = (way << 29) | (set << 5);
63 __asm__("mcr p15, 1, %0, c7, c11, 2" : : "r"(set_way));
64 }
65 }
66
67 dsb();
68}
69
Nicolas Pitre25cbe452010-12-15 23:29:04 -050070static inline void l2_unmap_va(unsigned long va)
71{
Nicolas Pitre3902a152008-09-18 22:55:47 -040072#ifdef CONFIG_HIGHMEM
Nicolas Pitre25cbe452010-12-15 23:29:04 -050073 if (va != -1)
74 kunmap_atomic((void *)va);
Nicolas Pitre3902a152008-09-18 22:55:47 -040075#endif
Nicolas Pitre25cbe452010-12-15 23:29:04 -050076}
Nicolas Pitre3902a152008-09-18 22:55:47 -040077
Nicolas Pitre25cbe452010-12-15 23:29:04 -050078static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va)
Nicolas Pitre3902a152008-09-18 22:55:47 -040079{
80#ifdef CONFIG_HIGHMEM
81 unsigned long va = prev_va & PAGE_MASK;
82 unsigned long pa_offset = pa << (32 - PAGE_SHIFT);
83 if (unlikely(pa_offset < (prev_va << (32 - PAGE_SHIFT)))) {
84 /*
85 * Switching to a new page. Because cache ops are
86 * using virtual addresses only, we must put a mapping
Nicolas Pitre25cbe452010-12-15 23:29:04 -050087 * in place for it.
Nicolas Pitre3902a152008-09-18 22:55:47 -040088 */
Nicolas Pitre25cbe452010-12-15 23:29:04 -050089 l2_unmap_va(prev_va);
90 va = (unsigned long)kmap_atomic_pfn(pa >> PAGE_SHIFT);
Nicolas Pitre3902a152008-09-18 22:55:47 -040091 }
92 return va + (pa_offset >> (32 - PAGE_SHIFT));
93#else
94 return __phys_to_virt(pa);
95#endif
96}
97
Eric Miao905a09d2008-06-06 16:34:03 +080098static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
99{
Nicolas Pitre25cbe452010-12-15 23:29:04 -0500100 unsigned long vaddr;
Nicolas Pitre3902a152008-09-18 22:55:47 -0400101
Eric Miao905a09d2008-06-06 16:34:03 +0800102 if (start == 0 && end == -1ul) {
103 xsc3_l2_inv_all();
104 return;
105 }
106
Nicolas Pitre3902a152008-09-18 22:55:47 -0400107 vaddr = -1; /* to force the first mapping */
Nicolas Pitre3902a152008-09-18 22:55:47 -0400108
Eric Miao905a09d2008-06-06 16:34:03 +0800109 /*
110 * Clean and invalidate partial first cache line.
111 */
112 if (start & (CACHE_LINE_SIZE - 1)) {
Nicolas Pitre25cbe452010-12-15 23:29:04 -0500113 vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr);
Nicolas Pitre3902a152008-09-18 22:55:47 -0400114 xsc3_l2_clean_mva(vaddr);
115 xsc3_l2_inv_mva(vaddr);
Eric Miao905a09d2008-06-06 16:34:03 +0800116 start = (start | (CACHE_LINE_SIZE - 1)) + 1;
117 }
118
119 /*
Eric Miao905a09d2008-06-06 16:34:03 +0800120 * Invalidate all full cache lines between 'start' and 'end'.
121 */
Nicolas Pitre3902a152008-09-18 22:55:47 -0400122 while (start < (end & ~(CACHE_LINE_SIZE - 1))) {
Nicolas Pitre25cbe452010-12-15 23:29:04 -0500123 vaddr = l2_map_va(start, vaddr);
Nicolas Pitre3902a152008-09-18 22:55:47 -0400124 xsc3_l2_inv_mva(vaddr);
Eric Miao905a09d2008-06-06 16:34:03 +0800125 start += CACHE_LINE_SIZE;
126 }
127
Nicolas Pitre3902a152008-09-18 22:55:47 -0400128 /*
129 * Clean and invalidate partial last cache line.
130 */
131 if (start < end) {
Nicolas Pitre25cbe452010-12-15 23:29:04 -0500132 vaddr = l2_map_va(start, vaddr);
Nicolas Pitre3902a152008-09-18 22:55:47 -0400133 xsc3_l2_clean_mva(vaddr);
134 xsc3_l2_inv_mva(vaddr);
135 }
136
Nicolas Pitre25cbe452010-12-15 23:29:04 -0500137 l2_unmap_va(vaddr);
Nicolas Pitre3902a152008-09-18 22:55:47 -0400138
Eric Miao905a09d2008-06-06 16:34:03 +0800139 dsb();
140}
141
142static void xsc3_l2_clean_range(unsigned long start, unsigned long end)
143{
Nicolas Pitre25cbe452010-12-15 23:29:04 -0500144 unsigned long vaddr;
Nicolas Pitre3902a152008-09-18 22:55:47 -0400145
146 vaddr = -1; /* to force the first mapping */
Nicolas Pitre3902a152008-09-18 22:55:47 -0400147
Eric Miao905a09d2008-06-06 16:34:03 +0800148 start &= ~(CACHE_LINE_SIZE - 1);
149 while (start < end) {
Nicolas Pitre25cbe452010-12-15 23:29:04 -0500150 vaddr = l2_map_va(start, vaddr);
Nicolas Pitre3902a152008-09-18 22:55:47 -0400151 xsc3_l2_clean_mva(vaddr);
Eric Miao905a09d2008-06-06 16:34:03 +0800152 start += CACHE_LINE_SIZE;
153 }
154
Nicolas Pitre25cbe452010-12-15 23:29:04 -0500155 l2_unmap_va(vaddr);
Nicolas Pitre3902a152008-09-18 22:55:47 -0400156
Eric Miao905a09d2008-06-06 16:34:03 +0800157 dsb();
158}
159
160/*
161 * optimize L2 flush all operation by set/way format
162 */
163static inline void xsc3_l2_flush_all(void)
164{
165 unsigned long l2ctype, set_way;
166 int set, way;
167
168 __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype));
169
170 for (set = 0; set < CACHE_SET_SIZE(l2ctype); set++) {
171 for (way = 0; way < CACHE_WAY_PER_SET; way++) {
172 set_way = (way << 29) | (set << 5);
173 __asm__("mcr p15, 1, %0, c7, c15, 2" : : "r"(set_way));
174 }
175 }
176
177 dsb();
178}
179
180static void xsc3_l2_flush_range(unsigned long start, unsigned long end)
181{
Nicolas Pitre25cbe452010-12-15 23:29:04 -0500182 unsigned long vaddr;
Nicolas Pitre3902a152008-09-18 22:55:47 -0400183
Eric Miao905a09d2008-06-06 16:34:03 +0800184 if (start == 0 && end == -1ul) {
185 xsc3_l2_flush_all();
186 return;
187 }
188
Nicolas Pitre3902a152008-09-18 22:55:47 -0400189 vaddr = -1; /* to force the first mapping */
Nicolas Pitre3902a152008-09-18 22:55:47 -0400190
Eric Miao905a09d2008-06-06 16:34:03 +0800191 start &= ~(CACHE_LINE_SIZE - 1);
192 while (start < end) {
Nicolas Pitre25cbe452010-12-15 23:29:04 -0500193 vaddr = l2_map_va(start, vaddr);
Nicolas Pitre3902a152008-09-18 22:55:47 -0400194 xsc3_l2_clean_mva(vaddr);
195 xsc3_l2_inv_mva(vaddr);
Eric Miao905a09d2008-06-06 16:34:03 +0800196 start += CACHE_LINE_SIZE;
197 }
198
Nicolas Pitre25cbe452010-12-15 23:29:04 -0500199 l2_unmap_va(vaddr);
Nicolas Pitre3902a152008-09-18 22:55:47 -0400200
Eric Miao905a09d2008-06-06 16:34:03 +0800201 dsb();
202}
203
204static int __init xsc3_l2_init(void)
205{
206 if (!cpu_is_xsc3() || !xsc3_l2_present())
207 return 0;
208
Haojian Zhuangdc8601a2009-12-30 02:27:24 -0500209 if (get_cr() & CR_L2) {
Eric Miao905a09d2008-06-06 16:34:03 +0800210 pr_info("XScale3 L2 cache enabled.\n");
Eric Miao905a09d2008-06-06 16:34:03 +0800211 xsc3_l2_inv_all();
Eric Miao905a09d2008-06-06 16:34:03 +0800212
Haojian Zhuangdc8601a2009-12-30 02:27:24 -0500213 outer_cache.inv_range = xsc3_l2_inv_range;
214 outer_cache.clean_range = xsc3_l2_clean_range;
215 outer_cache.flush_range = xsc3_l2_flush_range;
216 }
Eric Miao905a09d2008-06-06 16:34:03 +0800217
218 return 0;
219}
220core_initcall(xsc3_l2_init);