Eric Miao | 905a09d | 2008-06-06 16:34:03 +0800 | [diff] [blame] | 1 | /* |
| 2 | * arch/arm/mm/cache-xsc3l2.c - XScale3 L2 cache controller support |
| 3 | * |
| 4 | * Copyright (C) 2007 ARM Limited |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License |
| 16 | * along with this program; if not, write to the Free Software |
| 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
| 18 | */ |
| 19 | #include <linux/init.h> |
Eric Miao | 905a09d | 2008-06-06 16:34:03 +0800 | [diff] [blame] | 20 | #include <asm/system.h> |
Russell King | 0ba8b9b | 2008-08-10 18:08:10 +0100 | [diff] [blame] | 21 | #include <asm/cputype.h> |
Eric Miao | 905a09d | 2008-06-06 16:34:03 +0800 | [diff] [blame] | 22 | #include <asm/cacheflush.h> |
Nicolas Pitre | 3902a15 | 2008-09-18 22:55:47 -0400 | [diff] [blame] | 23 | #include <asm/kmap_types.h> |
| 24 | #include <asm/fixmap.h> |
| 25 | #include <asm/pgtable.h> |
| 26 | #include <asm/tlbflush.h> |
| 27 | #include "mm.h" |
Eric Miao | 905a09d | 2008-06-06 16:34:03 +0800 | [diff] [blame] | 28 | |
| 29 | #define CR_L2 (1 << 26) |
| 30 | |
| 31 | #define CACHE_LINE_SIZE 32 |
| 32 | #define CACHE_LINE_SHIFT 5 |
| 33 | #define CACHE_WAY_PER_SET 8 |
| 34 | |
| 35 | #define CACHE_WAY_SIZE(l2ctype) (8192 << (((l2ctype) >> 8) & 0xf)) |
| 36 | #define CACHE_SET_SIZE(l2ctype) (CACHE_WAY_SIZE(l2ctype) >> CACHE_LINE_SHIFT) |
| 37 | |
| 38 | static inline int xsc3_l2_present(void) |
| 39 | { |
| 40 | unsigned long l2ctype; |
| 41 | |
| 42 | __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype)); |
| 43 | |
| 44 | return !!(l2ctype & 0xf8); |
| 45 | } |
| 46 | |
| 47 | static inline void xsc3_l2_clean_mva(unsigned long addr) |
| 48 | { |
| 49 | __asm__("mcr p15, 1, %0, c7, c11, 1" : : "r" (addr)); |
| 50 | } |
| 51 | |
Eric Miao | 905a09d | 2008-06-06 16:34:03 +0800 | [diff] [blame] | 52 | static inline void xsc3_l2_inv_mva(unsigned long addr) |
| 53 | { |
| 54 | __asm__("mcr p15, 1, %0, c7, c7, 1" : : "r" (addr)); |
| 55 | } |
| 56 | |
Eric Miao | 905a09d | 2008-06-06 16:34:03 +0800 | [diff] [blame] | 57 | static inline void xsc3_l2_inv_all(void) |
| 58 | { |
| 59 | unsigned long l2ctype, set_way; |
| 60 | int set, way; |
| 61 | |
| 62 | __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype)); |
| 63 | |
| 64 | for (set = 0; set < CACHE_SET_SIZE(l2ctype); set++) { |
| 65 | for (way = 0; way < CACHE_WAY_PER_SET; way++) { |
| 66 | set_way = (way << 29) | (set << 5); |
| 67 | __asm__("mcr p15, 1, %0, c7, c11, 2" : : "r"(set_way)); |
| 68 | } |
| 69 | } |
| 70 | |
| 71 | dsb(); |
| 72 | } |
| 73 | |
Nicolas Pitre | 3902a15 | 2008-09-18 22:55:47 -0400 | [diff] [blame] | 74 | #ifdef CONFIG_HIGHMEM |
| 75 | #define l2_map_save_flags(x) raw_local_save_flags(x) |
| 76 | #define l2_map_restore_flags(x) raw_local_irq_restore(x) |
| 77 | #else |
| 78 | #define l2_map_save_flags(x) ((x) = 0) |
| 79 | #define l2_map_restore_flags(x) ((void)(x)) |
| 80 | #endif |
| 81 | |
| 82 | static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va, |
| 83 | unsigned long flags) |
| 84 | { |
| 85 | #ifdef CONFIG_HIGHMEM |
| 86 | unsigned long va = prev_va & PAGE_MASK; |
| 87 | unsigned long pa_offset = pa << (32 - PAGE_SHIFT); |
| 88 | if (unlikely(pa_offset < (prev_va << (32 - PAGE_SHIFT)))) { |
| 89 | /* |
| 90 | * Switching to a new page. Because cache ops are |
| 91 | * using virtual addresses only, we must put a mapping |
| 92 | * in place for it. We also enable interrupts for a |
| 93 | * short while and disable them again to protect this |
| 94 | * mapping. |
| 95 | */ |
| 96 | unsigned long idx; |
| 97 | raw_local_irq_restore(flags); |
| 98 | idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id(); |
| 99 | va = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
| 100 | raw_local_irq_restore(flags | PSR_I_BIT); |
| 101 | set_pte_ext(TOP_PTE(va), pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL), 0); |
| 102 | local_flush_tlb_kernel_page(va); |
| 103 | } |
| 104 | return va + (pa_offset >> (32 - PAGE_SHIFT)); |
| 105 | #else |
| 106 | return __phys_to_virt(pa); |
| 107 | #endif |
| 108 | } |
| 109 | |
Eric Miao | 905a09d | 2008-06-06 16:34:03 +0800 | [diff] [blame] | 110 | static void xsc3_l2_inv_range(unsigned long start, unsigned long end) |
| 111 | { |
Nicolas Pitre | 3902a15 | 2008-09-18 22:55:47 -0400 | [diff] [blame] | 112 | unsigned long vaddr, flags; |
| 113 | |
Eric Miao | 905a09d | 2008-06-06 16:34:03 +0800 | [diff] [blame] | 114 | if (start == 0 && end == -1ul) { |
| 115 | xsc3_l2_inv_all(); |
| 116 | return; |
| 117 | } |
| 118 | |
Nicolas Pitre | 3902a15 | 2008-09-18 22:55:47 -0400 | [diff] [blame] | 119 | vaddr = -1; /* to force the first mapping */ |
| 120 | l2_map_save_flags(flags); |
| 121 | |
Eric Miao | 905a09d | 2008-06-06 16:34:03 +0800 | [diff] [blame] | 122 | /* |
| 123 | * Clean and invalidate partial first cache line. |
| 124 | */ |
| 125 | if (start & (CACHE_LINE_SIZE - 1)) { |
Nicolas Pitre | 3902a15 | 2008-09-18 22:55:47 -0400 | [diff] [blame] | 126 | vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr, flags); |
| 127 | xsc3_l2_clean_mva(vaddr); |
| 128 | xsc3_l2_inv_mva(vaddr); |
Eric Miao | 905a09d | 2008-06-06 16:34:03 +0800 | [diff] [blame] | 129 | start = (start | (CACHE_LINE_SIZE - 1)) + 1; |
| 130 | } |
| 131 | |
| 132 | /* |
Eric Miao | 905a09d | 2008-06-06 16:34:03 +0800 | [diff] [blame] | 133 | * Invalidate all full cache lines between 'start' and 'end'. |
| 134 | */ |
Nicolas Pitre | 3902a15 | 2008-09-18 22:55:47 -0400 | [diff] [blame] | 135 | while (start < (end & ~(CACHE_LINE_SIZE - 1))) { |
| 136 | vaddr = l2_map_va(start, vaddr, flags); |
| 137 | xsc3_l2_inv_mva(vaddr); |
Eric Miao | 905a09d | 2008-06-06 16:34:03 +0800 | [diff] [blame] | 138 | start += CACHE_LINE_SIZE; |
| 139 | } |
| 140 | |
Nicolas Pitre | 3902a15 | 2008-09-18 22:55:47 -0400 | [diff] [blame] | 141 | /* |
| 142 | * Clean and invalidate partial last cache line. |
| 143 | */ |
| 144 | if (start < end) { |
| 145 | vaddr = l2_map_va(start, vaddr, flags); |
| 146 | xsc3_l2_clean_mva(vaddr); |
| 147 | xsc3_l2_inv_mva(vaddr); |
| 148 | } |
| 149 | |
| 150 | l2_map_restore_flags(flags); |
| 151 | |
Eric Miao | 905a09d | 2008-06-06 16:34:03 +0800 | [diff] [blame] | 152 | dsb(); |
| 153 | } |
| 154 | |
| 155 | static void xsc3_l2_clean_range(unsigned long start, unsigned long end) |
| 156 | { |
Nicolas Pitre | 3902a15 | 2008-09-18 22:55:47 -0400 | [diff] [blame] | 157 | unsigned long vaddr, flags; |
| 158 | |
| 159 | vaddr = -1; /* to force the first mapping */ |
| 160 | l2_map_save_flags(flags); |
| 161 | |
Eric Miao | 905a09d | 2008-06-06 16:34:03 +0800 | [diff] [blame] | 162 | start &= ~(CACHE_LINE_SIZE - 1); |
| 163 | while (start < end) { |
Nicolas Pitre | 3902a15 | 2008-09-18 22:55:47 -0400 | [diff] [blame] | 164 | vaddr = l2_map_va(start, vaddr, flags); |
| 165 | xsc3_l2_clean_mva(vaddr); |
Eric Miao | 905a09d | 2008-06-06 16:34:03 +0800 | [diff] [blame] | 166 | start += CACHE_LINE_SIZE; |
| 167 | } |
| 168 | |
Nicolas Pitre | 3902a15 | 2008-09-18 22:55:47 -0400 | [diff] [blame] | 169 | l2_map_restore_flags(flags); |
| 170 | |
Eric Miao | 905a09d | 2008-06-06 16:34:03 +0800 | [diff] [blame] | 171 | dsb(); |
| 172 | } |
| 173 | |
| 174 | /* |
| 175 | * optimize L2 flush all operation by set/way format |
| 176 | */ |
| 177 | static inline void xsc3_l2_flush_all(void) |
| 178 | { |
| 179 | unsigned long l2ctype, set_way; |
| 180 | int set, way; |
| 181 | |
| 182 | __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype)); |
| 183 | |
| 184 | for (set = 0; set < CACHE_SET_SIZE(l2ctype); set++) { |
| 185 | for (way = 0; way < CACHE_WAY_PER_SET; way++) { |
| 186 | set_way = (way << 29) | (set << 5); |
| 187 | __asm__("mcr p15, 1, %0, c7, c15, 2" : : "r"(set_way)); |
| 188 | } |
| 189 | } |
| 190 | |
| 191 | dsb(); |
| 192 | } |
| 193 | |
| 194 | static void xsc3_l2_flush_range(unsigned long start, unsigned long end) |
| 195 | { |
Nicolas Pitre | 3902a15 | 2008-09-18 22:55:47 -0400 | [diff] [blame] | 196 | unsigned long vaddr, flags; |
| 197 | |
Eric Miao | 905a09d | 2008-06-06 16:34:03 +0800 | [diff] [blame] | 198 | if (start == 0 && end == -1ul) { |
| 199 | xsc3_l2_flush_all(); |
| 200 | return; |
| 201 | } |
| 202 | |
Nicolas Pitre | 3902a15 | 2008-09-18 22:55:47 -0400 | [diff] [blame] | 203 | vaddr = -1; /* to force the first mapping */ |
| 204 | l2_map_save_flags(flags); |
| 205 | |
Eric Miao | 905a09d | 2008-06-06 16:34:03 +0800 | [diff] [blame] | 206 | start &= ~(CACHE_LINE_SIZE - 1); |
| 207 | while (start < end) { |
Nicolas Pitre | 3902a15 | 2008-09-18 22:55:47 -0400 | [diff] [blame] | 208 | vaddr = l2_map_va(start, vaddr, flags); |
| 209 | xsc3_l2_clean_mva(vaddr); |
| 210 | xsc3_l2_inv_mva(vaddr); |
Eric Miao | 905a09d | 2008-06-06 16:34:03 +0800 | [diff] [blame] | 211 | start += CACHE_LINE_SIZE; |
| 212 | } |
| 213 | |
Nicolas Pitre | 3902a15 | 2008-09-18 22:55:47 -0400 | [diff] [blame] | 214 | l2_map_restore_flags(flags); |
| 215 | |
Eric Miao | 905a09d | 2008-06-06 16:34:03 +0800 | [diff] [blame] | 216 | dsb(); |
| 217 | } |
| 218 | |
| 219 | static int __init xsc3_l2_init(void) |
| 220 | { |
| 221 | if (!cpu_is_xsc3() || !xsc3_l2_present()) |
| 222 | return 0; |
| 223 | |
| 224 | if (!(get_cr() & CR_L2)) { |
| 225 | pr_info("XScale3 L2 cache enabled.\n"); |
| 226 | adjust_cr(CR_L2, CR_L2); |
| 227 | xsc3_l2_inv_all(); |
| 228 | } |
| 229 | |
| 230 | outer_cache.inv_range = xsc3_l2_inv_range; |
| 231 | outer_cache.clean_range = xsc3_l2_clean_range; |
| 232 | outer_cache.flush_range = xsc3_l2_flush_range; |
| 233 | |
| 234 | return 0; |
| 235 | } |
| 236 | core_initcall(xsc3_l2_init); |