blob: 521fb2bf90bdcb4d8ba6193770d20bf6581dad01 [file] [log] [blame]
Vineet Gupta95d69762013-01-18 15:12:19 +05301/*
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05302 * ARC Cache Management
Vineet Gupta95d69762013-01-18 15:12:19 +05303 *
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05304 * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
Vineet Gupta95d69762013-01-18 15:12:19 +05305 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
Vineet Gupta95d69762013-01-18 15:12:19 +053010 */
11
12#include <linux/module.h>
13#include <linux/mm.h>
14#include <linux/sched.h>
15#include <linux/cache.h>
16#include <linux/mmu_context.h>
17#include <linux/syscalls.h>
18#include <linux/uaccess.h>
Vineet Gupta4102b532013-05-09 21:54:51 +053019#include <linux/pagemap.h>
Vineet Gupta95d69762013-01-18 15:12:19 +053020#include <asm/cacheflush.h>
21#include <asm/cachectl.h>
22#include <asm/setup.h>
23
Vineet Gupta795f4552015-04-03 12:37:07 +030024static int l2_line_sz;
Alexey Brodkinf2b0b252015-05-25 19:54:28 +030025int ioc_exists;
Alexey Brodkin1648c702015-06-09 11:25:22 +030026volatile int slc_enable = 1, ioc_enable = 1;
Vineet Gupta795f4552015-04-03 12:37:07 +030027
Vineet Guptabcc4d652015-06-04 14:39:15 +053028void (*_cache_line_loop_ic_fn)(unsigned long paddr, unsigned long vaddr,
29 unsigned long sz, const int cacheop);
30
Alexey Brodkinf2b0b252015-05-25 19:54:28 +030031void (*__dma_cache_wback_inv)(unsigned long start, unsigned long sz);
32void (*__dma_cache_inv)(unsigned long start, unsigned long sz);
33void (*__dma_cache_wback)(unsigned long start, unsigned long sz);
34
Vineet Guptac3441ed2014-02-24 11:42:50 +080035char *arc_cache_mumbojumbo(int c, char *buf, int len)
Vineet Guptaaf617422013-01-18 15:12:24 +053036{
37 int n = 0;
Vineet Guptad1f317d2015-04-06 17:23:57 +053038 struct cpuinfo_arc_cache *p;
Vineet Guptaaf617422013-01-18 15:12:24 +053039
Vineet Guptada40ff42014-06-27 15:49:47 +053040#define PR_CACHE(p, cfg, str) \
Vineet Guptaaf617422013-01-18 15:12:24 +053041 if (!(p)->ver) \
42 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
43 else \
44 n += scnprintf(buf + n, len - n, \
Vineet Guptada40ff42014-06-27 15:49:47 +053045 str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \
46 (p)->sz_k, (p)->assoc, (p)->line_len, \
47 (p)->vipt ? "VIPT" : "PIPT", \
48 (p)->alias ? " aliasing" : "", \
Vineet Gupta964cf282015-10-02 19:20:27 +053049 IS_USED_CFG(cfg));
Vineet Guptaaf617422013-01-18 15:12:24 +053050
Vineet Guptada40ff42014-06-27 15:49:47 +053051 PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
52 PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
Vineet Guptaaf617422013-01-18 15:12:24 +053053
Vineet Guptafd0881a22015-08-21 15:06:43 +053054 if (!is_isa_arcv2())
55 return buf;
56
Vineet Guptad1f317d2015-04-06 17:23:57 +053057 p = &cpuinfo_arc700[c].slc;
58 if (p->ver)
59 n += scnprintf(buf + n, len - n,
Vineet Gupta79335a22015-06-04 18:30:23 +053060 "SLC\t\t: %uK, %uB Line%s\n",
61 p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
Vineet Guptad1f317d2015-04-06 17:23:57 +053062
Alexey Brodkinf2b0b252015-05-25 19:54:28 +030063 if (ioc_exists)
Alexey Brodkin1648c702015-06-09 11:25:22 +030064 n += scnprintf(buf + n, len - n, "IOC\t\t:%s\n",
Vineet Gupta964cf282015-10-02 19:20:27 +053065 IS_DISABLED_RUN(ioc_enable));
Alexey Brodkinf2b0b252015-05-25 19:54:28 +030066
Vineet Guptaaf617422013-01-18 15:12:24 +053067 return buf;
68}
69
Vineet Gupta95d69762013-01-18 15:12:19 +053070/*
71 * Read the Cache Build Confuration Registers, Decode them and save into
72 * the cpuinfo structure for later use.
73 * No Validation done here, simply read/convert the BCRs
74 */
Vineet Guptafd0881a22015-08-21 15:06:43 +053075static void read_decode_cache_bcr_arcv2(int cpu)
Vineet Gupta95d69762013-01-18 15:12:19 +053076{
Vineet Guptafd0881a22015-08-21 15:06:43 +053077 struct cpuinfo_arc_cache *p_slc = &cpuinfo_arc700[cpu].slc;
Vineet Guptad1f317d2015-04-06 17:23:57 +053078 struct bcr_generic sbcr;
79
80 struct bcr_slc_cfg {
81#ifdef CONFIG_CPU_BIG_ENDIAN
82 unsigned int pad:24, way:2, lsz:2, sz:4;
83#else
84 unsigned int sz:4, lsz:2, way:2, pad:24;
85#endif
86 } slc_cfg;
87
Alexey Brodkinf2b0b252015-05-25 19:54:28 +030088 struct bcr_clust_cfg {
89#ifdef CONFIG_CPU_BIG_ENDIAN
90 unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
91#else
92 unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
93#endif
94 } cbcr;
95
Vineet Guptafd0881a22015-08-21 15:06:43 +053096 READ_BCR(ARC_REG_SLC_BCR, sbcr);
97 if (sbcr.ver) {
98 READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
99 p_slc->ver = sbcr.ver;
100 p_slc->sz_k = 128 << slc_cfg.sz;
101 l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
102 }
103
104 READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
105 if (cbcr.c && ioc_enable)
106 ioc_exists = 1;
107}
108
109void read_decode_cache_bcr(void)
110{
111 struct cpuinfo_arc_cache *p_ic, *p_dc;
112 unsigned int cpu = smp_processor_id();
113 struct bcr_cache {
114#ifdef CONFIG_CPU_BIG_ENDIAN
115 unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
116#else
117 unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
118#endif
119 } ibcr, dbcr;
120
Vineet Gupta95d69762013-01-18 15:12:19 +0530121 p_ic = &cpuinfo_arc700[cpu].icache;
122 READ_BCR(ARC_REG_IC_BCR, ibcr);
123
Vineet Guptada40ff42014-06-27 15:49:47 +0530124 if (!ibcr.ver)
125 goto dc_chk;
126
Vineet Guptad1f317d2015-04-06 17:23:57 +0530127 if (ibcr.ver <= 3) {
128 BUG_ON(ibcr.config != 3);
129 p_ic->assoc = 2; /* Fixed to 2w set assoc */
130 } else if (ibcr.ver >= 4) {
131 p_ic->assoc = 1 << ibcr.config; /* 1,2,4,8 */
132 }
133
Vineet Gupta95d69762013-01-18 15:12:19 +0530134 p_ic->line_len = 8 << ibcr.line_len;
Vineet Guptada40ff42014-06-27 15:49:47 +0530135 p_ic->sz_k = 1 << (ibcr.sz - 1);
Vineet Gupta95d69762013-01-18 15:12:19 +0530136 p_ic->ver = ibcr.ver;
Vineet Guptada40ff42014-06-27 15:49:47 +0530137 p_ic->vipt = 1;
138 p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
Vineet Gupta95d69762013-01-18 15:12:19 +0530139
Vineet Guptada40ff42014-06-27 15:49:47 +0530140dc_chk:
Vineet Gupta95d69762013-01-18 15:12:19 +0530141 p_dc = &cpuinfo_arc700[cpu].dcache;
142 READ_BCR(ARC_REG_DC_BCR, dbcr);
143
Vineet Guptada40ff42014-06-27 15:49:47 +0530144 if (!dbcr.ver)
Vineet Guptad1f317d2015-04-06 17:23:57 +0530145 goto slc_chk;
Vineet Guptada40ff42014-06-27 15:49:47 +0530146
Vineet Guptad1f317d2015-04-06 17:23:57 +0530147 if (dbcr.ver <= 3) {
148 BUG_ON(dbcr.config != 2);
149 p_dc->assoc = 4; /* Fixed to 4w set assoc */
150 p_dc->vipt = 1;
151 p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1;
152 } else if (dbcr.ver >= 4) {
153 p_dc->assoc = 1 << dbcr.config; /* 1,2,4,8 */
154 p_dc->vipt = 0;
155 p_dc->alias = 0; /* PIPT so can't VIPT alias */
156 }
157
Vineet Gupta95d69762013-01-18 15:12:19 +0530158 p_dc->line_len = 16 << dbcr.line_len;
Vineet Guptada40ff42014-06-27 15:49:47 +0530159 p_dc->sz_k = 1 << (dbcr.sz - 1);
Vineet Gupta95d69762013-01-18 15:12:19 +0530160 p_dc->ver = dbcr.ver;
Vineet Guptad1f317d2015-04-06 17:23:57 +0530161
162slc_chk:
Vineet Guptafd0881a22015-08-21 15:06:43 +0530163 if (is_isa_arcv2())
164 read_decode_cache_bcr_arcv2(cpu);
Vineet Gupta95d69762013-01-18 15:12:19 +0530165}
166
167/*
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530168 * Line Operation on {I,D}-Cache
Vineet Gupta95d69762013-01-18 15:12:19 +0530169 */
Vineet Gupta95d69762013-01-18 15:12:19 +0530170
171#define OP_INV 0x1
172#define OP_FLUSH 0x2
173#define OP_FLUSH_N_INV 0x3
Vineet Guptabd129762013-09-05 13:43:03 +0530174#define OP_INV_IC 0x4
175
176/*
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530177 * I-Cache Aliasing in ARC700 VIPT caches (MMU v1-v3)
178 *
179 * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag.
180 * The orig Cache Management Module "CDU" only required paddr to invalidate a
181 * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry.
182 * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching
183 * the exact same line.
184 *
185 * However for larger Caches (way-size > page-size) - i.e. in Aliasing config,
186 * paddr alone could not be used to correctly index the cache.
187 *
188 * ------------------
189 * MMU v1/v2 (Fixed Page Size 8k)
190 * ------------------
191 * The solution was to provide CDU with these additonal vaddr bits. These
192 * would be bits [x:13], x would depend on cache-geometry, 13 comes from
193 * standard page size of 8k.
194 * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits
195 * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
196 * orig 5 bits of paddr were anyways ignored by CDU line ops, as they
197 * represent the offset within cache-line. The adv of using this "clumsy"
198 * interface for additional info was no new reg was needed in CDU programming
199 * model.
200 *
201 * 17:13 represented the max num of bits passable, actual bits needed were
202 * fewer, based on the num-of-aliases possible.
203 * -for 2 alias possibility, only bit 13 needed (32K cache)
204 * -for 4 alias possibility, bits 14:13 needed (64K cache)
205 *
206 * ------------------
207 * MMU v3
208 * ------------------
209 * This ver of MMU supports variable page sizes (1k-16k): although Linux will
210 * only support 8k (default), 16k and 4k.
211 * However from hardware perspective, smaller page sizes aggrevate aliasing
212 * meaning more vaddr bits needed to disambiguate the cache-line-op ;
213 * the existing scheme of piggybacking won't work for certain configurations.
214 * Two new registers IC_PTAG and DC_PTAG inttoduced.
215 * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs
Vineet Guptabd129762013-09-05 13:43:03 +0530216 */
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530217
Vineet Gupta11e14892014-08-04 08:32:31 -0700218static inline
219void __cache_line_loop_v2(unsigned long paddr, unsigned long vaddr,
220 unsigned long sz, const int op)
Vineet Guptabd129762013-09-05 13:43:03 +0530221{
Vineet Gupta11e14892014-08-04 08:32:31 -0700222 unsigned int aux_cmd;
Vineet Guptabd129762013-09-05 13:43:03 +0530223 int num_lines;
Vineet Gupta11e14892014-08-04 08:32:31 -0700224 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
Vineet Guptabd129762013-09-05 13:43:03 +0530225
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530226 if (op == OP_INV_IC) {
Vineet Guptabd129762013-09-05 13:43:03 +0530227 aux_cmd = ARC_REG_IC_IVIL;
Vineet Gupta11e14892014-08-04 08:32:31 -0700228 } else {
Vineet Guptabd129762013-09-05 13:43:03 +0530229 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530230 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
Vineet Guptabd129762013-09-05 13:43:03 +0530231 }
232
233 /* Ensure we properly floor/ceil the non-line aligned/sized requests
234 * and have @paddr - aligned to cache line and integral @num_lines.
235 * This however can be avoided for page sized since:
236 * -@paddr will be cache-line aligned already (being page aligned)
237 * -@sz will be integral multiple of line size (being page sized).
238 */
Vineet Gupta11e14892014-08-04 08:32:31 -0700239 if (!full_page) {
Vineet Guptabd129762013-09-05 13:43:03 +0530240 sz += paddr & ~CACHE_LINE_MASK;
241 paddr &= CACHE_LINE_MASK;
242 vaddr &= CACHE_LINE_MASK;
243 }
244
245 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
246
Vineet Guptabd129762013-09-05 13:43:03 +0530247 /* MMUv2 and before: paddr contains stuffed vaddrs bits */
248 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
Vineet Guptabd129762013-09-05 13:43:03 +0530249
250 while (num_lines-- > 0) {
Vineet Gupta11e14892014-08-04 08:32:31 -0700251 write_aux_reg(aux_cmd, paddr);
252 paddr += L1_CACHE_BYTES;
253 }
254}
255
256static inline
257void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr,
258 unsigned long sz, const int op)
259{
260 unsigned int aux_cmd, aux_tag;
261 int num_lines;
262 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
263
264 if (op == OP_INV_IC) {
265 aux_cmd = ARC_REG_IC_IVIL;
266 aux_tag = ARC_REG_IC_PTAG;
267 } else {
268 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
269 aux_tag = ARC_REG_DC_PTAG;
270 }
271
272 /* Ensure we properly floor/ceil the non-line aligned/sized requests
273 * and have @paddr - aligned to cache line and integral @num_lines.
274 * This however can be avoided for page sized since:
275 * -@paddr will be cache-line aligned already (being page aligned)
276 * -@sz will be integral multiple of line size (being page sized).
277 */
278 if (!full_page) {
279 sz += paddr & ~CACHE_LINE_MASK;
280 paddr &= CACHE_LINE_MASK;
281 vaddr &= CACHE_LINE_MASK;
282 }
283 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
284
285 /*
286 * MMUv3, cache ops require paddr in PTAG reg
287 * if V-P const for loop, PTAG can be written once outside loop
288 */
289 if (full_page)
290 write_aux_reg(aux_tag, paddr);
291
292 while (num_lines-- > 0) {
293 if (!full_page) {
Vineet Guptad4599ba2013-09-05 14:45:51 +0530294 write_aux_reg(aux_tag, paddr);
295 paddr += L1_CACHE_BYTES;
296 }
Vineet Guptabd129762013-09-05 13:43:03 +0530297
298 write_aux_reg(aux_cmd, vaddr);
299 vaddr += L1_CACHE_BYTES;
Vineet Guptabd129762013-09-05 13:43:03 +0530300 }
301}
Vineet Gupta95d69762013-01-18 15:12:19 +0530302
Vineet Guptad1f317d2015-04-06 17:23:57 +0530303/*
304 * In HS38x (MMU v4), although icache is VIPT, only paddr is needed for cache
305 * maintenance ops (in IVIL reg), as long as icache doesn't alias.
306 *
307 * For Aliasing icache, vaddr is also needed (in IVIL), while paddr is
308 * specified in PTAG (similar to MMU v3)
309 */
310static inline
311void __cache_line_loop_v4(unsigned long paddr, unsigned long vaddr,
312 unsigned long sz, const int cacheop)
313{
314 unsigned int aux_cmd;
315 int num_lines;
316 const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE;
317
318 if (cacheop == OP_INV_IC) {
319 aux_cmd = ARC_REG_IC_IVIL;
320 } else {
321 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
322 aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
323 }
324
325 /* Ensure we properly floor/ceil the non-line aligned/sized requests
326 * and have @paddr - aligned to cache line and integral @num_lines.
327 * This however can be avoided for page sized since:
328 * -@paddr will be cache-line aligned already (being page aligned)
329 * -@sz will be integral multiple of line size (being page sized).
330 */
331 if (!full_page_op) {
332 sz += paddr & ~CACHE_LINE_MASK;
333 paddr &= CACHE_LINE_MASK;
334 }
335
336 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
337
338 while (num_lines-- > 0) {
339 write_aux_reg(aux_cmd, paddr);
340 paddr += L1_CACHE_BYTES;
341 }
342}
343
Vineet Gupta11e14892014-08-04 08:32:31 -0700344#if (CONFIG_ARC_MMU_VER < 3)
345#define __cache_line_loop __cache_line_loop_v2
346#elif (CONFIG_ARC_MMU_VER == 3)
347#define __cache_line_loop __cache_line_loop_v3
Vineet Guptad1f317d2015-04-06 17:23:57 +0530348#elif (CONFIG_ARC_MMU_VER > 3)
349#define __cache_line_loop __cache_line_loop_v4
Vineet Gupta11e14892014-08-04 08:32:31 -0700350#endif
351
Vineet Gupta95d69762013-01-18 15:12:19 +0530352#ifdef CONFIG_ARC_HAS_DCACHE
353
354/***************************************************************
355 * Machine specific helpers for Entire D-Cache or Per Line ops
356 */
357
Vineet Gupta6c310682015-06-04 08:53:47 +0530358static inline void __before_dc_op(const int op)
Vineet Gupta95d69762013-01-18 15:12:19 +0530359{
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530360 if (op == OP_FLUSH_N_INV) {
361 /* Dcache provides 2 cmd: FLUSH or INV
362 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
363 * flush-n-inv is achieved by INV cmd but with IM=1
364 * So toggle INV sub-mode depending on op request and default
365 */
Vineet Gupta6c310682015-06-04 08:53:47 +0530366 const unsigned int ctl = ARC_REG_DC_CTRL;
367 write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH);
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530368 }
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530369}
370
Vineet Gupta6c310682015-06-04 08:53:47 +0530371static inline void __after_dc_op(const int op)
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530372{
Vineet Gupta6c310682015-06-04 08:53:47 +0530373 if (op & OP_FLUSH) {
374 const unsigned int ctl = ARC_REG_DC_CTRL;
375 unsigned int reg;
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530376
Vineet Gupta6c310682015-06-04 08:53:47 +0530377 /* flush / flush-n-inv both wait */
378 while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS)
379 ;
380
381 /* Switch back to default Invalidate mode */
382 if (op == OP_FLUSH_N_INV)
383 write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH);
384 }
Vineet Gupta95d69762013-01-18 15:12:19 +0530385}
386
387/*
388 * Operation on Entire D-Cache
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530389 * @op = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
Vineet Gupta95d69762013-01-18 15:12:19 +0530390 * Note that constant propagation ensures all the checks are gone
391 * in generated code
392 */
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530393static inline void __dc_entire_op(const int op)
Vineet Gupta95d69762013-01-18 15:12:19 +0530394{
Vineet Gupta95d69762013-01-18 15:12:19 +0530395 int aux;
396
Vineet Gupta6c310682015-06-04 08:53:47 +0530397 __before_dc_op(op);
Vineet Gupta95d69762013-01-18 15:12:19 +0530398
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530399 if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
Vineet Gupta95d69762013-01-18 15:12:19 +0530400 aux = ARC_REG_DC_IVDC;
401 else
402 aux = ARC_REG_DC_FLSH;
403
404 write_aux_reg(aux, 0x1);
405
Vineet Gupta6c310682015-06-04 08:53:47 +0530406 __after_dc_op(op);
Vineet Gupta95d69762013-01-18 15:12:19 +0530407}
408
Vineet Gupta4102b532013-05-09 21:54:51 +0530409/* For kernel mappings cache operation: index is same as paddr */
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530410#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
411
Vineet Gupta95d69762013-01-18 15:12:19 +0530412/*
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530413 * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback)
Vineet Gupta95d69762013-01-18 15:12:19 +0530414 */
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530415static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530416 unsigned long sz, const int op)
Vineet Gupta95d69762013-01-18 15:12:19 +0530417{
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530418 unsigned long flags;
Vineet Gupta95d69762013-01-18 15:12:19 +0530419
420 local_irq_save(flags);
421
Vineet Gupta6c310682015-06-04 08:53:47 +0530422 __before_dc_op(op);
Vineet Gupta95d69762013-01-18 15:12:19 +0530423
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530424 __cache_line_loop(paddr, vaddr, sz, op);
Vineet Gupta95d69762013-01-18 15:12:19 +0530425
Vineet Gupta6c310682015-06-04 08:53:47 +0530426 __after_dc_op(op);
Vineet Gupta95d69762013-01-18 15:12:19 +0530427
428 local_irq_restore(flags);
429}
430
431#else
432
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530433#define __dc_entire_op(op)
434#define __dc_line_op(paddr, vaddr, sz, op)
435#define __dc_line_op_k(paddr, sz, op)
Vineet Gupta95d69762013-01-18 15:12:19 +0530436
437#endif /* CONFIG_ARC_HAS_DCACHE */
438
Vineet Gupta95d69762013-01-18 15:12:19 +0530439#ifdef CONFIG_ARC_HAS_ICACHE
440
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530441static inline void __ic_entire_inv(void)
442{
443 write_aux_reg(ARC_REG_IC_IVIC, 1);
444 read_aux_reg(ARC_REG_IC_CTRL); /* blocks */
445}
446
447static inline void
448__ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr,
449 unsigned long sz)
Vineet Gupta95d69762013-01-18 15:12:19 +0530450{
451 unsigned long flags;
Vineet Gupta95d69762013-01-18 15:12:19 +0530452
453 local_irq_save(flags);
Vineet Guptabcc4d652015-06-04 14:39:15 +0530454 (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC);
Vineet Gupta95d69762013-01-18 15:12:19 +0530455 local_irq_restore(flags);
456}
457
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530458#ifndef CONFIG_SMP
Vineet Gupta336e1992013-06-22 19:22:42 +0530459
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530460#define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s)
461
462#else
463
464struct ic_inv_args {
Vineet Gupta2328af02013-02-17 12:51:42 +0200465 unsigned long paddr, vaddr;
466 int sz;
467};
468
469static void __ic_line_inv_vaddr_helper(void *info)
470{
Noam Camus014018e2014-09-03 14:41:11 +0300471 struct ic_inv_args *ic_inv = info;
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530472
Vineet Gupta2328af02013-02-17 12:51:42 +0200473 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
474}
475
476static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
477 unsigned long sz)
478{
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530479 struct ic_inv_args ic_inv = {
480 .paddr = paddr,
481 .vaddr = vaddr,
482 .sz = sz
483 };
484
Vineet Gupta2328af02013-02-17 12:51:42 +0200485 on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
486}
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530487
488#endif /* CONFIG_SMP */
489
490#else /* !CONFIG_ARC_HAS_ICACHE */
Vineet Gupta95d69762013-01-18 15:12:19 +0530491
Vineet Gupta336e1992013-06-22 19:22:42 +0530492#define __ic_entire_inv()
Vineet Gupta95d69762013-01-18 15:12:19 +0530493#define __ic_line_inv_vaddr(pstart, vstart, sz)
494
495#endif /* CONFIG_ARC_HAS_ICACHE */
496
Vineet Gupta795f4552015-04-03 12:37:07 +0300497noinline void slc_op(unsigned long paddr, unsigned long sz, const int op)
498{
499#ifdef CONFIG_ISA_ARCV2
Alexey Brodkinb607edd2015-06-29 15:24:37 +0300500 /*
501 * SLC is shared between all cores and concurrent aux operations from
502 * multiple cores need to be serialized using a spinlock
503 * A concurrent operation can be silently ignored and/or the old/new
504 * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
505 * below)
506 */
507 static DEFINE_SPINLOCK(lock);
Vineet Gupta795f4552015-04-03 12:37:07 +0300508 unsigned long flags;
509 unsigned int ctrl;
510
Alexey Brodkinb607edd2015-06-29 15:24:37 +0300511 spin_lock_irqsave(&lock, flags);
Vineet Gupta795f4552015-04-03 12:37:07 +0300512
513 /*
514 * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
515 * - b'000 (default) is Flush,
516 * - b'001 is Invalidate if CTRL.IM == 0
517 * - b'001 is Flush-n-Invalidate if CTRL.IM == 1
518 */
519 ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
520
521 /* Don't rely on default value of IM bit */
522 if (!(op & OP_FLUSH)) /* i.e. OP_INV */
523 ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
524 else
525 ctrl |= SLC_CTRL_IM;
526
527 if (op & OP_INV)
528 ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */
529 else
530 ctrl &= ~SLC_CTRL_RGN_OP_INV;
531
532 write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
533
534 /*
535 * Lower bits are ignored, no need to clip
536 * END needs to be setup before START (latter triggers the operation)
537 * END can't be same as START, so add (l2_line_sz - 1) to sz
538 */
539 write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1));
540 write_aux_reg(ARC_REG_SLC_RGN_START, paddr);
541
542 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
543
Alexey Brodkinb607edd2015-06-29 15:24:37 +0300544 spin_unlock_irqrestore(&lock, flags);
Vineet Gupta795f4552015-04-03 12:37:07 +0300545#endif
546}
547
Vineet Gupta95d69762013-01-18 15:12:19 +0530548/***********************************************************
549 * Exported APIs
550 */
551
Vineet Gupta4102b532013-05-09 21:54:51 +0530552/*
553 * Handle cache congruency of kernel and userspace mappings of page when kernel
554 * writes-to/reads-from
555 *
556 * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
557 * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
558 * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
559 * -In SMP, if hardware caches are coherent
560 *
561 * There's a corollary case, where kernel READs from a userspace mapped page.
562 * If the U-mapping is not congruent to to K-mapping, former needs flushing.
563 */
Vineet Gupta95d69762013-01-18 15:12:19 +0530564void flush_dcache_page(struct page *page)
565{
Vineet Gupta4102b532013-05-09 21:54:51 +0530566 struct address_space *mapping;
567
568 if (!cache_is_vipt_aliasing()) {
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530569 clear_bit(PG_dc_clean, &page->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +0530570 return;
571 }
572
573 /* don't handle anon pages here */
574 mapping = page_mapping(page);
575 if (!mapping)
576 return;
577
578 /*
579 * pagecache page, file not yet mapped to userspace
580 * Make a note that K-mapping is dirty
581 */
582 if (!mapping_mapped(mapping)) {
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530583 clear_bit(PG_dc_clean, &page->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +0530584 } else if (page_mapped(page)) {
585
586 /* kernel reading from page with U-mapping */
Vineet Gupta45309492015-05-18 12:46:37 +0530587 unsigned long paddr = (unsigned long)page_address(page);
Vineet Gupta4102b532013-05-09 21:54:51 +0530588 unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
589
590 if (addr_not_cache_congruent(paddr, vaddr))
591 __flush_dcache_page(paddr, vaddr);
592 }
Vineet Gupta95d69762013-01-18 15:12:19 +0530593}
594EXPORT_SYMBOL(flush_dcache_page);
595
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300596/*
597 * DMA ops for systems with L1 cache only
598 * Make memory coherent with L1 cache by flushing/invalidating L1 lines
599 */
600static void __dma_cache_wback_inv_l1(unsigned long start, unsigned long sz)
Vineet Gupta95d69762013-01-18 15:12:19 +0530601{
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530602 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300603}
Vineet Gupta795f4552015-04-03 12:37:07 +0300604
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300605static void __dma_cache_inv_l1(unsigned long start, unsigned long sz)
606{
607 __dc_line_op_k(start, sz, OP_INV);
608}
609
610static void __dma_cache_wback_l1(unsigned long start, unsigned long sz)
611{
612 __dc_line_op_k(start, sz, OP_FLUSH);
613}
614
615/*
616 * DMA ops for systems with both L1 and L2 caches, but without IOC
617 * Both L1 and L2 lines need to be explicity flushed/invalidated
618 */
619static void __dma_cache_wback_inv_slc(unsigned long start, unsigned long sz)
620{
621 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
622 slc_op(start, sz, OP_FLUSH_N_INV);
623}
624
625static void __dma_cache_inv_slc(unsigned long start, unsigned long sz)
626{
627 __dc_line_op_k(start, sz, OP_INV);
628 slc_op(start, sz, OP_INV);
629}
630
631static void __dma_cache_wback_slc(unsigned long start, unsigned long sz)
632{
633 __dc_line_op_k(start, sz, OP_FLUSH);
634 slc_op(start, sz, OP_FLUSH);
635}
636
637/*
638 * DMA ops for systems with IOC
639 * IOC hardware snoops all DMA traffic keeping the caches consistent with
640 * memory - eliding need for any explicit cache maintenance of DMA buffers
641 */
642static void __dma_cache_wback_inv_ioc(unsigned long start, unsigned long sz) {}
643static void __dma_cache_inv_ioc(unsigned long start, unsigned long sz) {}
644static void __dma_cache_wback_ioc(unsigned long start, unsigned long sz) {}
645
646/*
647 * Exported DMA API
648 */
649void dma_cache_wback_inv(unsigned long start, unsigned long sz)
650{
651 __dma_cache_wback_inv(start, sz);
Vineet Gupta95d69762013-01-18 15:12:19 +0530652}
653EXPORT_SYMBOL(dma_cache_wback_inv);
654
655void dma_cache_inv(unsigned long start, unsigned long sz)
656{
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300657 __dma_cache_inv(start, sz);
Vineet Gupta95d69762013-01-18 15:12:19 +0530658}
659EXPORT_SYMBOL(dma_cache_inv);
660
661void dma_cache_wback(unsigned long start, unsigned long sz)
662{
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300663 __dma_cache_wback(start, sz);
Vineet Gupta95d69762013-01-18 15:12:19 +0530664}
665EXPORT_SYMBOL(dma_cache_wback);
666
667/*
Vineet Gupta7586bf722013-04-12 12:18:25 +0530668 * This is API for making I/D Caches consistent when modifying
669 * kernel code (loadable modules, kprobes, kgdb...)
Vineet Gupta95d69762013-01-18 15:12:19 +0530670 * This is called on insmod, with kernel virtual address for CODE of
671 * the module. ARC cache maintenance ops require PHY address thus we
672 * need to convert vmalloc addr to PHY addr
673 */
674void flush_icache_range(unsigned long kstart, unsigned long kend)
675{
Vineet Guptac59414c2014-09-24 11:36:20 +0530676 unsigned int tot_sz;
Vineet Gupta95d69762013-01-18 15:12:19 +0530677
Vineet Guptac59414c2014-09-24 11:36:20 +0530678 WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__);
Vineet Gupta95d69762013-01-18 15:12:19 +0530679
680 /* Shortcut for bigger flush ranges.
681 * Here we don't care if this was kernel virtual or phy addr
682 */
683 tot_sz = kend - kstart;
684 if (tot_sz > PAGE_SIZE) {
685 flush_cache_all();
686 return;
687 }
688
689 /* Case: Kernel Phy addr (0x8000_0000 onwards) */
690 if (likely(kstart > PAGE_OFFSET)) {
Vineet Gupta7586bf722013-04-12 12:18:25 +0530691 /*
692 * The 2nd arg despite being paddr will be used to index icache
693 * This is OK since no alternate virtual mappings will exist
694 * given the callers for this case: kprobe/kgdb in built-in
695 * kernel code only.
696 */
Vineet Gupta94bad1a2013-04-12 12:20:23 +0530697 __sync_icache_dcache(kstart, kstart, kend - kstart);
Vineet Gupta95d69762013-01-18 15:12:19 +0530698 return;
699 }
700
701 /*
702 * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
703 * (1) ARC Cache Maintenance ops only take Phy addr, hence special
704 * handling of kernel vaddr.
705 *
706 * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
707 * it still needs to handle a 2 page scenario, where the range
708 * straddles across 2 virtual pages and hence need for loop
709 */
710 while (tot_sz > 0) {
Vineet Guptac59414c2014-09-24 11:36:20 +0530711 unsigned int off, sz;
712 unsigned long phy, pfn;
713
Vineet Gupta95d69762013-01-18 15:12:19 +0530714 off = kstart % PAGE_SIZE;
715 pfn = vmalloc_to_pfn((void *)kstart);
716 phy = (pfn << PAGE_SHIFT) + off;
717 sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
Vineet Gupta94bad1a2013-04-12 12:20:23 +0530718 __sync_icache_dcache(phy, kstart, sz);
Vineet Gupta95d69762013-01-18 15:12:19 +0530719 kstart += sz;
720 tot_sz -= sz;
721 }
722}
Pranith Kumare3560302014-08-29 15:19:09 -0700723EXPORT_SYMBOL(flush_icache_range);
Vineet Gupta95d69762013-01-18 15:12:19 +0530724
725/*
Vineet Gupta94bad1a2013-04-12 12:20:23 +0530726 * General purpose helper to make I and D cache lines consistent.
727 * @paddr is phy addr of region
Vineet Gupta4b06ff32013-07-10 11:40:27 +0530728 * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc)
729 * However in one instance, when called by kprobe (for a breakpt in
Vineet Gupta94bad1a2013-04-12 12:20:23 +0530730 * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
731 * use a paddr to index the cache (despite VIPT). This is fine since since a
Vineet Gupta4b06ff32013-07-10 11:40:27 +0530732 * builtin kernel page will not have any virtual mappings.
733 * kprobe on loadable module will be kernel vaddr.
Vineet Gupta95d69762013-01-18 15:12:19 +0530734 */
Vineet Gupta94bad1a2013-04-12 12:20:23 +0530735void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
Vineet Gupta95d69762013-01-18 15:12:19 +0530736{
Vineet Guptaf5388812013-05-16 12:19:29 +0530737 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
Vineet Gupta2328af02013-02-17 12:51:42 +0200738 __ic_line_inv_vaddr(paddr, vaddr, len);
Vineet Gupta95d69762013-01-18 15:12:19 +0530739}
740
Vineet Gupta24603fd2013-04-11 18:36:35 +0530741/* wrapper to compile time eliminate alignment checks in flush loop */
742void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
Vineet Gupta95d69762013-01-18 15:12:19 +0530743{
Vineet Gupta24603fd2013-04-11 18:36:35 +0530744 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
Vineet Gupta95d69762013-01-18 15:12:19 +0530745}
746
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530747/*
748 * wrapper to clearout kernel or userspace mappings of a page
749 * For kernel mappings @vaddr == @paddr
750 */
Vineet Gupta45309492015-05-18 12:46:37 +0530751void __flush_dcache_page(unsigned long paddr, unsigned long vaddr)
Vineet Guptaeacd0e92013-04-16 14:10:48 +0530752{
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530753 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
Vineet Guptaeacd0e92013-04-16 14:10:48 +0530754}
755
Vineet Gupta95d69762013-01-18 15:12:19 +0530756noinline void flush_cache_all(void)
757{
758 unsigned long flags;
759
760 local_irq_save(flags);
761
Vineet Gupta336e1992013-06-22 19:22:42 +0530762 __ic_entire_inv();
Vineet Gupta95d69762013-01-18 15:12:19 +0530763 __dc_entire_op(OP_FLUSH_N_INV);
764
765 local_irq_restore(flags);
766
767}
768
Vineet Gupta4102b532013-05-09 21:54:51 +0530769#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
770
771void flush_cache_mm(struct mm_struct *mm)
772{
773 flush_cache_all();
774}
775
776void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
777 unsigned long pfn)
778{
779 unsigned int paddr = pfn << PAGE_SHIFT;
780
Vineet Gupta5971bc72013-05-16 12:23:31 +0530781 u_vaddr &= PAGE_MASK;
782
Vineet Gupta45309492015-05-18 12:46:37 +0530783 __flush_dcache_page(paddr, u_vaddr);
Vineet Gupta5971bc72013-05-16 12:23:31 +0530784
785 if (vma->vm_flags & VM_EXEC)
786 __inv_icache_page(paddr, u_vaddr);
Vineet Gupta4102b532013-05-09 21:54:51 +0530787}
788
789void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
790 unsigned long end)
791{
792 flush_cache_all();
793}
794
Vineet Gupta7bb66f62013-05-25 14:04:25 +0530795void flush_anon_page(struct vm_area_struct *vma, struct page *page,
796 unsigned long u_vaddr)
797{
798 /* TBD: do we really need to clear the kernel mapping */
799 __flush_dcache_page(page_address(page), u_vaddr);
800 __flush_dcache_page(page_address(page), page_address(page));
801
802}
803
804#endif
805
Vineet Gupta4102b532013-05-09 21:54:51 +0530806void copy_user_highpage(struct page *to, struct page *from,
807 unsigned long u_vaddr, struct vm_area_struct *vma)
808{
Vineet Gupta336e2132015-03-05 17:06:31 +0530809 void *kfrom = kmap_atomic(from);
810 void *kto = kmap_atomic(to);
Vineet Gupta4102b532013-05-09 21:54:51 +0530811 int clean_src_k_mappings = 0;
812
813 /*
814 * If SRC page was already mapped in userspace AND it's U-mapping is
815 * not congruent with K-mapping, sync former to physical page so that
816 * K-mapping in memcpy below, sees the right data
817 *
818 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
819 * equally valid for SRC page as well
Vineet Gupta336e2132015-03-05 17:06:31 +0530820 *
821 * For !VIPT cache, all of this gets compiled out as
822 * addr_not_cache_congruent() is 0
Vineet Gupta4102b532013-05-09 21:54:51 +0530823 */
824 if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
Vineet Gupta336e2132015-03-05 17:06:31 +0530825 __flush_dcache_page((unsigned long)kfrom, u_vaddr);
Vineet Gupta4102b532013-05-09 21:54:51 +0530826 clean_src_k_mappings = 1;
827 }
828
Vineet Gupta336e2132015-03-05 17:06:31 +0530829 copy_page(kto, kfrom);
Vineet Gupta4102b532013-05-09 21:54:51 +0530830
831 /*
832 * Mark DST page K-mapping as dirty for a later finalization by
833 * update_mmu_cache(). Although the finalization could have been done
834 * here as well (given that both vaddr/paddr are available).
835 * But update_mmu_cache() already has code to do that for other
836 * non copied user pages (e.g. read faults which wire in pagecache page
837 * directly).
838 */
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530839 clear_bit(PG_dc_clean, &to->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +0530840
841 /*
842 * if SRC was already usermapped and non-congruent to kernel mapping
843 * sync the kernel mapping back to physical page
844 */
845 if (clean_src_k_mappings) {
Vineet Gupta336e2132015-03-05 17:06:31 +0530846 __flush_dcache_page((unsigned long)kfrom, (unsigned long)kfrom);
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530847 set_bit(PG_dc_clean, &from->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +0530848 } else {
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530849 clear_bit(PG_dc_clean, &from->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +0530850 }
Vineet Gupta336e2132015-03-05 17:06:31 +0530851
852 kunmap_atomic(kto);
853 kunmap_atomic(kfrom);
Vineet Gupta4102b532013-05-09 21:54:51 +0530854}
855
856void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
857{
858 clear_page(to);
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530859 clear_bit(PG_dc_clean, &page->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +0530860}
861
Vineet Gupta4102b532013-05-09 21:54:51 +0530862
Vineet Gupta95d69762013-01-18 15:12:19 +0530863/**********************************************************************
864 * Explicit Cache flush request from user space via syscall
865 * Needed for JITs which generate code on the fly
866 */
867SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
868{
869 /* TBD: optimize this */
870 flush_cache_all();
871 return 0;
872}
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530873
874void arc_cache_init(void)
875{
876 unsigned int __maybe_unused cpu = smp_processor_id();
877 char str[256];
878
879 printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
880
881 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
882 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
883
884 if (!ic->ver)
885 panic("cache support enabled but non-existent cache\n");
886
887 if (ic->line_len != L1_CACHE_BYTES)
888 panic("ICache line [%d] != kernel Config [%d]",
889 ic->line_len, L1_CACHE_BYTES);
890
891 if (ic->ver != CONFIG_ARC_MMU_VER)
892 panic("Cache ver [%d] doesn't match MMU ver [%d]\n",
893 ic->ver, CONFIG_ARC_MMU_VER);
Vineet Guptabcc4d652015-06-04 14:39:15 +0530894
895 /*
896 * In MMU v4 (HS38x) the alising icache config uses IVIL/PTAG
897 * pair to provide vaddr/paddr respectively, just as in MMU v3
898 */
899 if (is_isa_arcv2() && ic->alias)
900 _cache_line_loop_ic_fn = __cache_line_loop_v3;
901 else
902 _cache_line_loop_ic_fn = __cache_line_loop;
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530903 }
904
905 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
906 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530907
908 if (!dc->ver)
909 panic("cache support enabled but non-existent cache\n");
910
911 if (dc->line_len != L1_CACHE_BYTES)
912 panic("DCache line [%d] != kernel Config [%d]",
913 dc->line_len, L1_CACHE_BYTES);
914
Vineet Guptad1f317d2015-04-06 17:23:57 +0530915 /* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
916 if (is_isa_arcompact()) {
917 int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530918
Vineet Guptad1f317d2015-04-06 17:23:57 +0530919 if (dc->alias && !handled)
920 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
921 else if (!dc->alias && handled)
922 panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
923 }
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530924 }
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300925
Vineet Gupta79335a22015-06-04 18:30:23 +0530926 if (is_isa_arcv2() && l2_line_sz && !slc_enable) {
927
928 /* IM set : flush before invalidate */
929 write_aux_reg(ARC_REG_SLC_CTRL,
930 read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_IM);
931
932 write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
933
934 /* Important to wait for flush to complete */
935 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
936 write_aux_reg(ARC_REG_SLC_CTRL,
937 read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_DISABLE);
938 }
939
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300940 if (is_isa_arcv2() && ioc_exists) {
941 /* IO coherency base - 0x8z */
942 write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
943 /* IO coherency aperture size - 512Mb: 0x8z-0xAz */
944 write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, 0x11);
945 /* Enable partial writes */
946 write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
947 /* Enable IO coherency */
948 write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
949
950 __dma_cache_wback_inv = __dma_cache_wback_inv_ioc;
951 __dma_cache_inv = __dma_cache_inv_ioc;
952 __dma_cache_wback = __dma_cache_wback_ioc;
Vineet Gupta79335a22015-06-04 18:30:23 +0530953 } else if (is_isa_arcv2() && l2_line_sz && slc_enable) {
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300954 __dma_cache_wback_inv = __dma_cache_wback_inv_slc;
955 __dma_cache_inv = __dma_cache_inv_slc;
956 __dma_cache_wback = __dma_cache_wback_slc;
957 } else {
958 __dma_cache_wback_inv = __dma_cache_wback_inv_l1;
959 __dma_cache_inv = __dma_cache_inv_l1;
960 __dma_cache_wback = __dma_cache_wback_l1;
961 }
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530962}