blob: ae08cbbfa5697559cfcb5e669086d9ddbe244d20 [file] [log] [blame]
Yoshinori Satocce2d452008-08-04 16:33:47 +09001/*
2 * arch/sh/mm/cache-sh2a.c
3 *
4 * Copyright (C) 2008 Yoshinori Sato
5 *
6 * Released under the terms of the GNU GPL v2.0.
7 */
8
9#include <linux/init.h>
10#include <linux/mm.h>
11
12#include <asm/cache.h>
13#include <asm/addrspace.h>
14#include <asm/processor.h>
15#include <asm/cacheflush.h>
16#include <asm/io.h>
17
Phil Edworthyc1537b42012-01-09 16:08:47 +000018/*
19 * The maximum number of pages we support up to when doing ranged dcache
20 * flushing. Anything exceeding this will simply flush the dcache in its
21 * entirety.
22 */
23#define MAX_OCACHE_PAGES 32
24#define MAX_ICACHE_PAGES 32
25
26static void sh2a_flush_oc_line(unsigned long v, int way)
27{
28 unsigned long addr = (v & 0x000007f0) | (way << 11);
29 unsigned long data;
30
31 data = __raw_readl(CACHE_OC_ADDRESS_ARRAY | addr);
32 if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) {
33 data &= ~SH_CACHE_UPDATED;
34 __raw_writel(data, CACHE_OC_ADDRESS_ARRAY | addr);
35 }
36}
37
38static void sh2a_invalidate_line(unsigned long cache_addr, unsigned long v)
39{
40 /* Set associative bit to hit all ways */
41 unsigned long addr = (v & 0x000007f0) | SH_CACHE_ASSOC;
42 __raw_writel((addr & CACHE_PHYSADDR_MASK), cache_addr | addr);
43}
44
45/*
46 * Write back the dirty D-caches, but not invalidate them.
47 */
Paul Mundta58e1a22009-08-15 12:38:29 +090048static void sh2a__flush_wback_region(void *start, int size)
Yoshinori Satocce2d452008-08-04 16:33:47 +090049{
Phil Edworthyc1537b42012-01-09 16:08:47 +000050#ifdef CONFIG_CACHE_WRITEBACK
Yoshinori Satocce2d452008-08-04 16:33:47 +090051 unsigned long v;
52 unsigned long begin, end;
53 unsigned long flags;
Phil Edworthyc1537b42012-01-09 16:08:47 +000054 int nr_ways;
Yoshinori Satocce2d452008-08-04 16:33:47 +090055
56 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
57 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
58 & ~(L1_CACHE_BYTES-1);
Phil Edworthyc1537b42012-01-09 16:08:47 +000059 nr_ways = current_cpu_data.dcache.ways;
Yoshinori Satocce2d452008-08-04 16:33:47 +090060
61 local_irq_save(flags);
62 jump_to_uncached();
63
Phil Edworthyc1537b42012-01-09 16:08:47 +000064 /* If there are too many pages then flush the entire cache */
65 if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) {
66 begin = CACHE_OC_ADDRESS_ARRAY;
67 end = begin + (nr_ways * current_cpu_data.dcache.way_size);
68
69 for (v = begin; v < end; v += L1_CACHE_BYTES) {
70 unsigned long data = __raw_readl(v);
71 if (data & SH_CACHE_UPDATED)
72 __raw_writel(data & ~SH_CACHE_UPDATED, v);
73 }
74 } else {
Yoshinori Satocce2d452008-08-04 16:33:47 +090075 int way;
Phil Edworthyc1537b42012-01-09 16:08:47 +000076 for (way = 0; way < nr_ways; way++) {
77 for (v = begin; v < end; v += L1_CACHE_BYTES)
78 sh2a_flush_oc_line(v, way);
Yoshinori Satocce2d452008-08-04 16:33:47 +090079 }
80 }
81
82 back_to_cached();
83 local_irq_restore(flags);
Phil Edworthyc1537b42012-01-09 16:08:47 +000084#endif
Yoshinori Satocce2d452008-08-04 16:33:47 +090085}
86
Phil Edworthyc1537b42012-01-09 16:08:47 +000087/*
88 * Write back the dirty D-caches and invalidate them.
89 */
Paul Mundta58e1a22009-08-15 12:38:29 +090090static void sh2a__flush_purge_region(void *start, int size)
Yoshinori Satocce2d452008-08-04 16:33:47 +090091{
92 unsigned long v;
93 unsigned long begin, end;
94 unsigned long flags;
95
96 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
97 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
98 & ~(L1_CACHE_BYTES-1);
99
100 local_irq_save(flags);
101 jump_to_uncached();
102
103 for (v = begin; v < end; v+=L1_CACHE_BYTES) {
Phil Edworthyc1537b42012-01-09 16:08:47 +0000104#ifdef CONFIG_CACHE_WRITEBACK
105 int way;
106 int nr_ways = current_cpu_data.dcache.ways;
107 for (way = 0; way < nr_ways; way++)
108 sh2a_flush_oc_line(v, way);
109#endif
110 sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v);
Yoshinori Satocce2d452008-08-04 16:33:47 +0900111 }
Phil Edworthyc1537b42012-01-09 16:08:47 +0000112
Yoshinori Satocce2d452008-08-04 16:33:47 +0900113 back_to_cached();
114 local_irq_restore(flags);
115}
116
Phil Edworthyc1537b42012-01-09 16:08:47 +0000117/*
118 * Invalidate the D-caches, but no write back please
119 */
Paul Mundta58e1a22009-08-15 12:38:29 +0900120static void sh2a__flush_invalidate_region(void *start, int size)
Yoshinori Satocce2d452008-08-04 16:33:47 +0900121{
122 unsigned long v;
123 unsigned long begin, end;
124 unsigned long flags;
125
126 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
127 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
128 & ~(L1_CACHE_BYTES-1);
Phil Edworthyc1537b42012-01-09 16:08:47 +0000129
Yoshinori Satocce2d452008-08-04 16:33:47 +0900130 local_irq_save(flags);
131 jump_to_uncached();
132
Phil Edworthyc1537b42012-01-09 16:08:47 +0000133 /* If there are too many pages then just blow the cache */
134 if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) {
135 __raw_writel(__raw_readl(CCR) | CCR_OCACHE_INVALIDATE, CCR);
136 } else {
137 for (v = begin; v < end; v += L1_CACHE_BYTES)
138 sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v);
Yoshinori Satocce2d452008-08-04 16:33:47 +0900139 }
Phil Edworthyc1537b42012-01-09 16:08:47 +0000140
Yoshinori Satocce2d452008-08-04 16:33:47 +0900141 back_to_cached();
142 local_irq_restore(flags);
143}
144
Phil Edworthyc1537b42012-01-09 16:08:47 +0000145/*
146 * Write back the range of D-cache, and purge the I-cache.
147 */
Paul Mundtf26b2a52009-08-21 17:23:14 +0900148static void sh2a_flush_icache_range(void *args)
Yoshinori Satocce2d452008-08-04 16:33:47 +0900149{
Paul Mundtf26b2a52009-08-21 17:23:14 +0900150 struct flusher_data *data = args;
151 unsigned long start, end;
Yoshinori Satocce2d452008-08-04 16:33:47 +0900152 unsigned long v;
Paul Mundt983f4c52009-09-01 21:12:55 +0900153 unsigned long flags;
Yoshinori Satocce2d452008-08-04 16:33:47 +0900154
Paul Mundtf26b2a52009-08-21 17:23:14 +0900155 start = data->addr1 & ~(L1_CACHE_BYTES-1);
156 end = (data->addr2 + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1);
Yoshinori Satocce2d452008-08-04 16:33:47 +0900157
Phil Edworthyc1537b42012-01-09 16:08:47 +0000158#ifdef CONFIG_CACHE_WRITEBACK
159 sh2a__flush_wback_region((void *)start, end-start);
160#endif
161
Paul Mundt983f4c52009-09-01 21:12:55 +0900162 local_irq_save(flags);
Yoshinori Satocce2d452008-08-04 16:33:47 +0900163 jump_to_uncached();
164
Phil Edworthyc1537b42012-01-09 16:08:47 +0000165 /* I-Cache invalidate */
166 /* If there are too many pages then just blow the cache */
167 if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
168 __raw_writel(__raw_readl(CCR) | CCR_ICACHE_INVALIDATE, CCR);
169 } else {
170 for (v = start; v < end; v += L1_CACHE_BYTES)
171 sh2a_invalidate_line(CACHE_IC_ADDRESS_ARRAY, v);
Yoshinori Satocce2d452008-08-04 16:33:47 +0900172 }
173
174 back_to_cached();
Paul Mundt983f4c52009-09-01 21:12:55 +0900175 local_irq_restore(flags);
Yoshinori Satocce2d452008-08-04 16:33:47 +0900176}
Paul Mundta58e1a22009-08-15 12:38:29 +0900177
178void __init sh2a_cache_init(void)
179{
Paul Mundtf26b2a52009-08-21 17:23:14 +0900180 local_flush_icache_range = sh2a_flush_icache_range;
Paul Mundta58e1a22009-08-15 12:38:29 +0900181
182 __flush_wback_region = sh2a__flush_wback_region;
183 __flush_purge_region = sh2a__flush_purge_region;
184 __flush_invalidate_region = sh2a__flush_invalidate_region;
185}