Yoshinori Sato | cce2d45 | 2008-08-04 16:33:47 +0900 | [diff] [blame] | 1 | /* |
| 2 | * arch/sh/mm/cache-sh2a.c |
| 3 | * |
| 4 | * Copyright (C) 2008 Yoshinori Sato |
| 5 | * |
| 6 | * Released under the terms of the GNU GPL v2.0. |
| 7 | */ |
| 8 | |
| 9 | #include <linux/init.h> |
| 10 | #include <linux/mm.h> |
| 11 | |
| 12 | #include <asm/cache.h> |
| 13 | #include <asm/addrspace.h> |
| 14 | #include <asm/processor.h> |
| 15 | #include <asm/cacheflush.h> |
| 16 | #include <asm/io.h> |
| 17 | |
Phil Edworthy | c1537b4 | 2012-01-09 16:08:47 +0000 | [diff] [blame] | 18 | /* |
| 19 | * The maximum number of pages we support up to when doing ranged dcache |
| 20 | * flushing. Anything exceeding this will simply flush the dcache in its |
| 21 | * entirety. |
| 22 | */ |
| 23 | #define MAX_OCACHE_PAGES 32 |
| 24 | #define MAX_ICACHE_PAGES 32 |
| 25 | |
| 26 | static void sh2a_flush_oc_line(unsigned long v, int way) |
| 27 | { |
| 28 | unsigned long addr = (v & 0x000007f0) | (way << 11); |
| 29 | unsigned long data; |
| 30 | |
| 31 | data = __raw_readl(CACHE_OC_ADDRESS_ARRAY | addr); |
| 32 | if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) { |
| 33 | data &= ~SH_CACHE_UPDATED; |
| 34 | __raw_writel(data, CACHE_OC_ADDRESS_ARRAY | addr); |
| 35 | } |
| 36 | } |
| 37 | |
| 38 | static void sh2a_invalidate_line(unsigned long cache_addr, unsigned long v) |
| 39 | { |
| 40 | /* Set associative bit to hit all ways */ |
| 41 | unsigned long addr = (v & 0x000007f0) | SH_CACHE_ASSOC; |
| 42 | __raw_writel((addr & CACHE_PHYSADDR_MASK), cache_addr | addr); |
| 43 | } |
| 44 | |
| 45 | /* |
| 46 | * Write back the dirty D-caches, but not invalidate them. |
| 47 | */ |
Paul Mundt | a58e1a2 | 2009-08-15 12:38:29 +0900 | [diff] [blame] | 48 | static void sh2a__flush_wback_region(void *start, int size) |
Yoshinori Sato | cce2d45 | 2008-08-04 16:33:47 +0900 | [diff] [blame] | 49 | { |
Phil Edworthy | c1537b4 | 2012-01-09 16:08:47 +0000 | [diff] [blame] | 50 | #ifdef CONFIG_CACHE_WRITEBACK |
Yoshinori Sato | cce2d45 | 2008-08-04 16:33:47 +0900 | [diff] [blame] | 51 | unsigned long v; |
| 52 | unsigned long begin, end; |
| 53 | unsigned long flags; |
Phil Edworthy | c1537b4 | 2012-01-09 16:08:47 +0000 | [diff] [blame] | 54 | int nr_ways; |
Yoshinori Sato | cce2d45 | 2008-08-04 16:33:47 +0900 | [diff] [blame] | 55 | |
| 56 | begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); |
| 57 | end = ((unsigned long)start + size + L1_CACHE_BYTES-1) |
| 58 | & ~(L1_CACHE_BYTES-1); |
Phil Edworthy | c1537b4 | 2012-01-09 16:08:47 +0000 | [diff] [blame] | 59 | nr_ways = current_cpu_data.dcache.ways; |
Yoshinori Sato | cce2d45 | 2008-08-04 16:33:47 +0900 | [diff] [blame] | 60 | |
| 61 | local_irq_save(flags); |
| 62 | jump_to_uncached(); |
| 63 | |
Phil Edworthy | c1537b4 | 2012-01-09 16:08:47 +0000 | [diff] [blame] | 64 | /* If there are too many pages then flush the entire cache */ |
| 65 | if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) { |
| 66 | begin = CACHE_OC_ADDRESS_ARRAY; |
| 67 | end = begin + (nr_ways * current_cpu_data.dcache.way_size); |
| 68 | |
| 69 | for (v = begin; v < end; v += L1_CACHE_BYTES) { |
| 70 | unsigned long data = __raw_readl(v); |
| 71 | if (data & SH_CACHE_UPDATED) |
| 72 | __raw_writel(data & ~SH_CACHE_UPDATED, v); |
| 73 | } |
| 74 | } else { |
Yoshinori Sato | cce2d45 | 2008-08-04 16:33:47 +0900 | [diff] [blame] | 75 | int way; |
Phil Edworthy | c1537b4 | 2012-01-09 16:08:47 +0000 | [diff] [blame] | 76 | for (way = 0; way < nr_ways; way++) { |
| 77 | for (v = begin; v < end; v += L1_CACHE_BYTES) |
| 78 | sh2a_flush_oc_line(v, way); |
Yoshinori Sato | cce2d45 | 2008-08-04 16:33:47 +0900 | [diff] [blame] | 79 | } |
| 80 | } |
| 81 | |
| 82 | back_to_cached(); |
| 83 | local_irq_restore(flags); |
Phil Edworthy | c1537b4 | 2012-01-09 16:08:47 +0000 | [diff] [blame] | 84 | #endif |
Yoshinori Sato | cce2d45 | 2008-08-04 16:33:47 +0900 | [diff] [blame] | 85 | } |
| 86 | |
Phil Edworthy | c1537b4 | 2012-01-09 16:08:47 +0000 | [diff] [blame] | 87 | /* |
| 88 | * Write back the dirty D-caches and invalidate them. |
| 89 | */ |
Paul Mundt | a58e1a2 | 2009-08-15 12:38:29 +0900 | [diff] [blame] | 90 | static void sh2a__flush_purge_region(void *start, int size) |
Yoshinori Sato | cce2d45 | 2008-08-04 16:33:47 +0900 | [diff] [blame] | 91 | { |
| 92 | unsigned long v; |
| 93 | unsigned long begin, end; |
| 94 | unsigned long flags; |
| 95 | |
| 96 | begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); |
| 97 | end = ((unsigned long)start + size + L1_CACHE_BYTES-1) |
| 98 | & ~(L1_CACHE_BYTES-1); |
| 99 | |
| 100 | local_irq_save(flags); |
| 101 | jump_to_uncached(); |
| 102 | |
| 103 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { |
Phil Edworthy | c1537b4 | 2012-01-09 16:08:47 +0000 | [diff] [blame] | 104 | #ifdef CONFIG_CACHE_WRITEBACK |
| 105 | int way; |
| 106 | int nr_ways = current_cpu_data.dcache.ways; |
| 107 | for (way = 0; way < nr_ways; way++) |
| 108 | sh2a_flush_oc_line(v, way); |
| 109 | #endif |
| 110 | sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v); |
Yoshinori Sato | cce2d45 | 2008-08-04 16:33:47 +0900 | [diff] [blame] | 111 | } |
Phil Edworthy | c1537b4 | 2012-01-09 16:08:47 +0000 | [diff] [blame] | 112 | |
Yoshinori Sato | cce2d45 | 2008-08-04 16:33:47 +0900 | [diff] [blame] | 113 | back_to_cached(); |
| 114 | local_irq_restore(flags); |
| 115 | } |
| 116 | |
Phil Edworthy | c1537b4 | 2012-01-09 16:08:47 +0000 | [diff] [blame] | 117 | /* |
| 118 | * Invalidate the D-caches, but no write back please |
| 119 | */ |
Paul Mundt | a58e1a2 | 2009-08-15 12:38:29 +0900 | [diff] [blame] | 120 | static void sh2a__flush_invalidate_region(void *start, int size) |
Yoshinori Sato | cce2d45 | 2008-08-04 16:33:47 +0900 | [diff] [blame] | 121 | { |
| 122 | unsigned long v; |
| 123 | unsigned long begin, end; |
| 124 | unsigned long flags; |
| 125 | |
| 126 | begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); |
| 127 | end = ((unsigned long)start + size + L1_CACHE_BYTES-1) |
| 128 | & ~(L1_CACHE_BYTES-1); |
Phil Edworthy | c1537b4 | 2012-01-09 16:08:47 +0000 | [diff] [blame] | 129 | |
Yoshinori Sato | cce2d45 | 2008-08-04 16:33:47 +0900 | [diff] [blame] | 130 | local_irq_save(flags); |
| 131 | jump_to_uncached(); |
| 132 | |
Phil Edworthy | c1537b4 | 2012-01-09 16:08:47 +0000 | [diff] [blame] | 133 | /* If there are too many pages then just blow the cache */ |
| 134 | if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) { |
| 135 | __raw_writel(__raw_readl(CCR) | CCR_OCACHE_INVALIDATE, CCR); |
| 136 | } else { |
| 137 | for (v = begin; v < end; v += L1_CACHE_BYTES) |
| 138 | sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v); |
Yoshinori Sato | cce2d45 | 2008-08-04 16:33:47 +0900 | [diff] [blame] | 139 | } |
Phil Edworthy | c1537b4 | 2012-01-09 16:08:47 +0000 | [diff] [blame] | 140 | |
Yoshinori Sato | cce2d45 | 2008-08-04 16:33:47 +0900 | [diff] [blame] | 141 | back_to_cached(); |
| 142 | local_irq_restore(flags); |
| 143 | } |
| 144 | |
Phil Edworthy | c1537b4 | 2012-01-09 16:08:47 +0000 | [diff] [blame] | 145 | /* |
| 146 | * Write back the range of D-cache, and purge the I-cache. |
| 147 | */ |
Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 148 | static void sh2a_flush_icache_range(void *args) |
Yoshinori Sato | cce2d45 | 2008-08-04 16:33:47 +0900 | [diff] [blame] | 149 | { |
Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 150 | struct flusher_data *data = args; |
| 151 | unsigned long start, end; |
Yoshinori Sato | cce2d45 | 2008-08-04 16:33:47 +0900 | [diff] [blame] | 152 | unsigned long v; |
Paul Mundt | 983f4c5 | 2009-09-01 21:12:55 +0900 | [diff] [blame] | 153 | unsigned long flags; |
Yoshinori Sato | cce2d45 | 2008-08-04 16:33:47 +0900 | [diff] [blame] | 154 | |
Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 155 | start = data->addr1 & ~(L1_CACHE_BYTES-1); |
| 156 | end = (data->addr2 + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1); |
Yoshinori Sato | cce2d45 | 2008-08-04 16:33:47 +0900 | [diff] [blame] | 157 | |
Phil Edworthy | c1537b4 | 2012-01-09 16:08:47 +0000 | [diff] [blame] | 158 | #ifdef CONFIG_CACHE_WRITEBACK |
| 159 | sh2a__flush_wback_region((void *)start, end-start); |
| 160 | #endif |
| 161 | |
Paul Mundt | 983f4c5 | 2009-09-01 21:12:55 +0900 | [diff] [blame] | 162 | local_irq_save(flags); |
Yoshinori Sato | cce2d45 | 2008-08-04 16:33:47 +0900 | [diff] [blame] | 163 | jump_to_uncached(); |
| 164 | |
Phil Edworthy | c1537b4 | 2012-01-09 16:08:47 +0000 | [diff] [blame] | 165 | /* I-Cache invalidate */ |
| 166 | /* If there are too many pages then just blow the cache */ |
| 167 | if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { |
| 168 | __raw_writel(__raw_readl(CCR) | CCR_ICACHE_INVALIDATE, CCR); |
| 169 | } else { |
| 170 | for (v = start; v < end; v += L1_CACHE_BYTES) |
| 171 | sh2a_invalidate_line(CACHE_IC_ADDRESS_ARRAY, v); |
Yoshinori Sato | cce2d45 | 2008-08-04 16:33:47 +0900 | [diff] [blame] | 172 | } |
| 173 | |
| 174 | back_to_cached(); |
Paul Mundt | 983f4c5 | 2009-09-01 21:12:55 +0900 | [diff] [blame] | 175 | local_irq_restore(flags); |
Yoshinori Sato | cce2d45 | 2008-08-04 16:33:47 +0900 | [diff] [blame] | 176 | } |
Paul Mundt | a58e1a2 | 2009-08-15 12:38:29 +0900 | [diff] [blame] | 177 | |
| 178 | void __init sh2a_cache_init(void) |
| 179 | { |
Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 180 | local_flush_icache_range = sh2a_flush_icache_range; |
Paul Mundt | a58e1a2 | 2009-08-15 12:38:29 +0900 | [diff] [blame] | 181 | |
| 182 | __flush_wback_region = sh2a__flush_wback_region; |
| 183 | __flush_purge_region = sh2a__flush_purge_region; |
| 184 | __flush_invalidate_region = sh2a__flush_invalidate_region; |
| 185 | } |