blob: ee87d081259b86950d527151618a3e52814ade5a [file] [log] [blame]
Yoshinori Satocce2d452008-08-04 16:33:47 +09001/*
2 * arch/sh/mm/cache-sh2a.c
3 *
4 * Copyright (C) 2008 Yoshinori Sato
5 *
6 * Released under the terms of the GNU GPL v2.0.
7 */
8
9#include <linux/init.h>
10#include <linux/mm.h>
11
12#include <asm/cache.h>
13#include <asm/addrspace.h>
14#include <asm/processor.h>
15#include <asm/cacheflush.h>
16#include <asm/io.h>
17
Phil Edworthyc1537b42012-01-09 16:08:47 +000018/*
19 * The maximum number of pages we support up to when doing ranged dcache
20 * flushing. Anything exceeding this will simply flush the dcache in its
21 * entirety.
22 */
23#define MAX_OCACHE_PAGES 32
24#define MAX_ICACHE_PAGES 32
25
Phil Edworthy1ae911c2012-02-21 08:29:57 +000026#ifdef CONFIG_CACHE_WRITEBACK
Phil Edworthyc1537b42012-01-09 16:08:47 +000027static void sh2a_flush_oc_line(unsigned long v, int way)
28{
29 unsigned long addr = (v & 0x000007f0) | (way << 11);
30 unsigned long data;
31
32 data = __raw_readl(CACHE_OC_ADDRESS_ARRAY | addr);
33 if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) {
34 data &= ~SH_CACHE_UPDATED;
35 __raw_writel(data, CACHE_OC_ADDRESS_ARRAY | addr);
36 }
37}
Phil Edworthy1ae911c2012-02-21 08:29:57 +000038#endif
Phil Edworthyc1537b42012-01-09 16:08:47 +000039
40static void sh2a_invalidate_line(unsigned long cache_addr, unsigned long v)
41{
42 /* Set associative bit to hit all ways */
43 unsigned long addr = (v & 0x000007f0) | SH_CACHE_ASSOC;
44 __raw_writel((addr & CACHE_PHYSADDR_MASK), cache_addr | addr);
45}
46
47/*
48 * Write back the dirty D-caches, but not invalidate them.
49 */
Paul Mundta58e1a22009-08-15 12:38:29 +090050static void sh2a__flush_wback_region(void *start, int size)
Yoshinori Satocce2d452008-08-04 16:33:47 +090051{
Phil Edworthyc1537b42012-01-09 16:08:47 +000052#ifdef CONFIG_CACHE_WRITEBACK
Yoshinori Satocce2d452008-08-04 16:33:47 +090053 unsigned long v;
54 unsigned long begin, end;
55 unsigned long flags;
Phil Edworthyc1537b42012-01-09 16:08:47 +000056 int nr_ways;
Yoshinori Satocce2d452008-08-04 16:33:47 +090057
58 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
59 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
60 & ~(L1_CACHE_BYTES-1);
Phil Edworthyc1537b42012-01-09 16:08:47 +000061 nr_ways = current_cpu_data.dcache.ways;
Yoshinori Satocce2d452008-08-04 16:33:47 +090062
63 local_irq_save(flags);
64 jump_to_uncached();
65
Phil Edworthyc1537b42012-01-09 16:08:47 +000066 /* If there are too many pages then flush the entire cache */
67 if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) {
68 begin = CACHE_OC_ADDRESS_ARRAY;
69 end = begin + (nr_ways * current_cpu_data.dcache.way_size);
70
71 for (v = begin; v < end; v += L1_CACHE_BYTES) {
72 unsigned long data = __raw_readl(v);
73 if (data & SH_CACHE_UPDATED)
74 __raw_writel(data & ~SH_CACHE_UPDATED, v);
75 }
76 } else {
Yoshinori Satocce2d452008-08-04 16:33:47 +090077 int way;
Phil Edworthyc1537b42012-01-09 16:08:47 +000078 for (way = 0; way < nr_ways; way++) {
79 for (v = begin; v < end; v += L1_CACHE_BYTES)
80 sh2a_flush_oc_line(v, way);
Yoshinori Satocce2d452008-08-04 16:33:47 +090081 }
82 }
83
84 back_to_cached();
85 local_irq_restore(flags);
Phil Edworthyc1537b42012-01-09 16:08:47 +000086#endif
Yoshinori Satocce2d452008-08-04 16:33:47 +090087}
88
Phil Edworthyc1537b42012-01-09 16:08:47 +000089/*
90 * Write back the dirty D-caches and invalidate them.
91 */
Paul Mundta58e1a22009-08-15 12:38:29 +090092static void sh2a__flush_purge_region(void *start, int size)
Yoshinori Satocce2d452008-08-04 16:33:47 +090093{
94 unsigned long v;
95 unsigned long begin, end;
96 unsigned long flags;
97
98 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
99 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
100 & ~(L1_CACHE_BYTES-1);
101
102 local_irq_save(flags);
103 jump_to_uncached();
104
105 for (v = begin; v < end; v+=L1_CACHE_BYTES) {
Phil Edworthyc1537b42012-01-09 16:08:47 +0000106#ifdef CONFIG_CACHE_WRITEBACK
107 int way;
108 int nr_ways = current_cpu_data.dcache.ways;
109 for (way = 0; way < nr_ways; way++)
110 sh2a_flush_oc_line(v, way);
111#endif
112 sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v);
Yoshinori Satocce2d452008-08-04 16:33:47 +0900113 }
Phil Edworthyc1537b42012-01-09 16:08:47 +0000114
Yoshinori Satocce2d452008-08-04 16:33:47 +0900115 back_to_cached();
116 local_irq_restore(flags);
117}
118
Phil Edworthyc1537b42012-01-09 16:08:47 +0000119/*
120 * Invalidate the D-caches, but no write back please
121 */
Paul Mundta58e1a22009-08-15 12:38:29 +0900122static void sh2a__flush_invalidate_region(void *start, int size)
Yoshinori Satocce2d452008-08-04 16:33:47 +0900123{
124 unsigned long v;
125 unsigned long begin, end;
126 unsigned long flags;
127
128 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
129 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
130 & ~(L1_CACHE_BYTES-1);
Phil Edworthyc1537b42012-01-09 16:08:47 +0000131
Yoshinori Satocce2d452008-08-04 16:33:47 +0900132 local_irq_save(flags);
133 jump_to_uncached();
134
Phil Edworthyc1537b42012-01-09 16:08:47 +0000135 /* If there are too many pages then just blow the cache */
136 if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) {
Geert Uytterhoevena5f6ea22014-03-03 15:38:33 -0800137 __raw_writel(__raw_readl(SH_CCR) | CCR_OCACHE_INVALIDATE,
138 SH_CCR);
Phil Edworthyc1537b42012-01-09 16:08:47 +0000139 } else {
140 for (v = begin; v < end; v += L1_CACHE_BYTES)
141 sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v);
Yoshinori Satocce2d452008-08-04 16:33:47 +0900142 }
Phil Edworthyc1537b42012-01-09 16:08:47 +0000143
Yoshinori Satocce2d452008-08-04 16:33:47 +0900144 back_to_cached();
145 local_irq_restore(flags);
146}
147
Phil Edworthyc1537b42012-01-09 16:08:47 +0000148/*
149 * Write back the range of D-cache, and purge the I-cache.
150 */
Paul Mundtf26b2a52009-08-21 17:23:14 +0900151static void sh2a_flush_icache_range(void *args)
Yoshinori Satocce2d452008-08-04 16:33:47 +0900152{
Paul Mundtf26b2a52009-08-21 17:23:14 +0900153 struct flusher_data *data = args;
154 unsigned long start, end;
Yoshinori Satocce2d452008-08-04 16:33:47 +0900155 unsigned long v;
Paul Mundt983f4c52009-09-01 21:12:55 +0900156 unsigned long flags;
Yoshinori Satocce2d452008-08-04 16:33:47 +0900157
Paul Mundtf26b2a52009-08-21 17:23:14 +0900158 start = data->addr1 & ~(L1_CACHE_BYTES-1);
159 end = (data->addr2 + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1);
Yoshinori Satocce2d452008-08-04 16:33:47 +0900160
Phil Edworthyc1537b42012-01-09 16:08:47 +0000161#ifdef CONFIG_CACHE_WRITEBACK
162 sh2a__flush_wback_region((void *)start, end-start);
163#endif
164
Paul Mundt983f4c52009-09-01 21:12:55 +0900165 local_irq_save(flags);
Yoshinori Satocce2d452008-08-04 16:33:47 +0900166 jump_to_uncached();
167
Phil Edworthyc1537b42012-01-09 16:08:47 +0000168 /* I-Cache invalidate */
169 /* If there are too many pages then just blow the cache */
170 if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
Geert Uytterhoevena5f6ea22014-03-03 15:38:33 -0800171 __raw_writel(__raw_readl(SH_CCR) | CCR_ICACHE_INVALIDATE,
172 SH_CCR);
Phil Edworthyc1537b42012-01-09 16:08:47 +0000173 } else {
174 for (v = start; v < end; v += L1_CACHE_BYTES)
175 sh2a_invalidate_line(CACHE_IC_ADDRESS_ARRAY, v);
Yoshinori Satocce2d452008-08-04 16:33:47 +0900176 }
177
178 back_to_cached();
Paul Mundt983f4c52009-09-01 21:12:55 +0900179 local_irq_restore(flags);
Yoshinori Satocce2d452008-08-04 16:33:47 +0900180}
Paul Mundta58e1a22009-08-15 12:38:29 +0900181
182void __init sh2a_cache_init(void)
183{
Paul Mundtf26b2a52009-08-21 17:23:14 +0900184 local_flush_icache_range = sh2a_flush_icache_range;
Paul Mundta58e1a22009-08-15 12:38:29 +0900185
186 __flush_wback_region = sh2a__flush_wback_region;
187 __flush_purge_region = sh2a__flush_purge_region;
188 __flush_invalidate_region = sh2a__flush_invalidate_region;
189}