blob: 7a898f57d84baa6c8ae809505fb5c13f3c9b7f25 [file] [log] [blame]
Vineet Gupta95d69762013-01-18 15:12:19 +05301/*
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05302 * ARC Cache Management
Vineet Gupta95d69762013-01-18 15:12:19 +05303 *
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05304 * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
Vineet Gupta95d69762013-01-18 15:12:19 +05305 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
Vineet Gupta95d69762013-01-18 15:12:19 +053010 */
11
12#include <linux/module.h>
13#include <linux/mm.h>
14#include <linux/sched.h>
15#include <linux/cache.h>
16#include <linux/mmu_context.h>
17#include <linux/syscalls.h>
18#include <linux/uaccess.h>
Vineet Gupta4102b532013-05-09 21:54:51 +053019#include <linux/pagemap.h>
Vineet Gupta95d69762013-01-18 15:12:19 +053020#include <asm/cacheflush.h>
21#include <asm/cachectl.h>
22#include <asm/setup.h>
23
Vineet Guptac3441ed2014-02-24 11:42:50 +080024char *arc_cache_mumbojumbo(int c, char *buf, int len)
Vineet Guptaaf617422013-01-18 15:12:24 +053025{
26 int n = 0;
Vineet Guptad1f317d2015-04-06 17:23:57 +053027 struct cpuinfo_arc_cache *p;
Vineet Guptaaf617422013-01-18 15:12:24 +053028
Vineet Guptada40ff42014-06-27 15:49:47 +053029#define PR_CACHE(p, cfg, str) \
Vineet Guptaaf617422013-01-18 15:12:24 +053030 if (!(p)->ver) \
31 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
32 else \
33 n += scnprintf(buf + n, len - n, \
Vineet Guptada40ff42014-06-27 15:49:47 +053034 str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \
35 (p)->sz_k, (p)->assoc, (p)->line_len, \
36 (p)->vipt ? "VIPT" : "PIPT", \
37 (p)->alias ? " aliasing" : "", \
38 IS_ENABLED(cfg) ? "" : " (not used)");
Vineet Guptaaf617422013-01-18 15:12:24 +053039
Vineet Guptada40ff42014-06-27 15:49:47 +053040 PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
41 PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
Vineet Guptaaf617422013-01-18 15:12:24 +053042
Vineet Guptad1f317d2015-04-06 17:23:57 +053043 p = &cpuinfo_arc700[c].slc;
44 if (p->ver)
45 n += scnprintf(buf + n, len - n,
46 "SLC\t\t: %uK, %uB Line\n", p->sz_k, p->line_len);
47
Vineet Guptaaf617422013-01-18 15:12:24 +053048 return buf;
49}
50
Vineet Gupta95d69762013-01-18 15:12:19 +053051/*
52 * Read the Cache Build Confuration Registers, Decode them and save into
53 * the cpuinfo structure for later use.
54 * No Validation done here, simply read/convert the BCRs
55 */
Paul Gortmakerce759952013-06-24 15:30:15 -040056void read_decode_cache_bcr(void)
Vineet Gupta95d69762013-01-18 15:12:19 +053057{
Vineet Guptad1f317d2015-04-06 17:23:57 +053058 struct cpuinfo_arc_cache *p_ic, *p_dc, *p_slc;
Vineet Gupta95d69762013-01-18 15:12:19 +053059 unsigned int cpu = smp_processor_id();
Vineet Guptada1677b2013-05-14 13:28:17 +053060 struct bcr_cache {
61#ifdef CONFIG_CPU_BIG_ENDIAN
62 unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
63#else
64 unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
65#endif
66 } ibcr, dbcr;
Vineet Gupta95d69762013-01-18 15:12:19 +053067
Vineet Guptad1f317d2015-04-06 17:23:57 +053068 struct bcr_generic sbcr;
69
70 struct bcr_slc_cfg {
71#ifdef CONFIG_CPU_BIG_ENDIAN
72 unsigned int pad:24, way:2, lsz:2, sz:4;
73#else
74 unsigned int sz:4, lsz:2, way:2, pad:24;
75#endif
76 } slc_cfg;
77
Vineet Gupta95d69762013-01-18 15:12:19 +053078 p_ic = &cpuinfo_arc700[cpu].icache;
79 READ_BCR(ARC_REG_IC_BCR, ibcr);
80
Vineet Guptada40ff42014-06-27 15:49:47 +053081 if (!ibcr.ver)
82 goto dc_chk;
83
Vineet Guptad1f317d2015-04-06 17:23:57 +053084 if (ibcr.ver <= 3) {
85 BUG_ON(ibcr.config != 3);
86 p_ic->assoc = 2; /* Fixed to 2w set assoc */
87 } else if (ibcr.ver >= 4) {
88 p_ic->assoc = 1 << ibcr.config; /* 1,2,4,8 */
89 }
90
Vineet Gupta95d69762013-01-18 15:12:19 +053091 p_ic->line_len = 8 << ibcr.line_len;
Vineet Guptada40ff42014-06-27 15:49:47 +053092 p_ic->sz_k = 1 << (ibcr.sz - 1);
Vineet Gupta95d69762013-01-18 15:12:19 +053093 p_ic->ver = ibcr.ver;
Vineet Guptada40ff42014-06-27 15:49:47 +053094 p_ic->vipt = 1;
95 p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
Vineet Gupta95d69762013-01-18 15:12:19 +053096
Vineet Guptada40ff42014-06-27 15:49:47 +053097dc_chk:
Vineet Gupta95d69762013-01-18 15:12:19 +053098 p_dc = &cpuinfo_arc700[cpu].dcache;
99 READ_BCR(ARC_REG_DC_BCR, dbcr);
100
Vineet Guptada40ff42014-06-27 15:49:47 +0530101 if (!dbcr.ver)
Vineet Guptad1f317d2015-04-06 17:23:57 +0530102 goto slc_chk;
Vineet Guptada40ff42014-06-27 15:49:47 +0530103
Vineet Guptad1f317d2015-04-06 17:23:57 +0530104 if (dbcr.ver <= 3) {
105 BUG_ON(dbcr.config != 2);
106 p_dc->assoc = 4; /* Fixed to 4w set assoc */
107 p_dc->vipt = 1;
108 p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1;
109 } else if (dbcr.ver >= 4) {
110 p_dc->assoc = 1 << dbcr.config; /* 1,2,4,8 */
111 p_dc->vipt = 0;
112 p_dc->alias = 0; /* PIPT so can't VIPT alias */
113 }
114
Vineet Gupta95d69762013-01-18 15:12:19 +0530115 p_dc->line_len = 16 << dbcr.line_len;
Vineet Guptada40ff42014-06-27 15:49:47 +0530116 p_dc->sz_k = 1 << (dbcr.sz - 1);
Vineet Gupta95d69762013-01-18 15:12:19 +0530117 p_dc->ver = dbcr.ver;
Vineet Guptad1f317d2015-04-06 17:23:57 +0530118
119slc_chk:
120 p_slc = &cpuinfo_arc700[cpu].slc;
121 READ_BCR(ARC_REG_SLC_BCR, sbcr);
122 if (sbcr.ver) {
123 READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
124 p_slc->ver = sbcr.ver;
125 p_slc->sz_k = 128 << slc_cfg.sz;
126 p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
127 }
Vineet Gupta95d69762013-01-18 15:12:19 +0530128}
129
130/*
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530131 * Line Operation on {I,D}-Cache
Vineet Gupta95d69762013-01-18 15:12:19 +0530132 */
Vineet Gupta95d69762013-01-18 15:12:19 +0530133
134#define OP_INV 0x1
135#define OP_FLUSH 0x2
136#define OP_FLUSH_N_INV 0x3
Vineet Guptabd129762013-09-05 13:43:03 +0530137#define OP_INV_IC 0x4
138
139/*
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530140 * I-Cache Aliasing in ARC700 VIPT caches (MMU v1-v3)
141 *
142 * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag.
143 * The orig Cache Management Module "CDU" only required paddr to invalidate a
144 * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry.
145 * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching
146 * the exact same line.
147 *
148 * However for larger Caches (way-size > page-size) - i.e. in Aliasing config,
149 * paddr alone could not be used to correctly index the cache.
150 *
151 * ------------------
152 * MMU v1/v2 (Fixed Page Size 8k)
153 * ------------------
154 * The solution was to provide CDU with these additonal vaddr bits. These
155 * would be bits [x:13], x would depend on cache-geometry, 13 comes from
156 * standard page size of 8k.
157 * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits
158 * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
159 * orig 5 bits of paddr were anyways ignored by CDU line ops, as they
160 * represent the offset within cache-line. The adv of using this "clumsy"
161 * interface for additional info was no new reg was needed in CDU programming
162 * model.
163 *
164 * 17:13 represented the max num of bits passable, actual bits needed were
165 * fewer, based on the num-of-aliases possible.
166 * -for 2 alias possibility, only bit 13 needed (32K cache)
167 * -for 4 alias possibility, bits 14:13 needed (64K cache)
168 *
169 * ------------------
170 * MMU v3
171 * ------------------
172 * This ver of MMU supports variable page sizes (1k-16k): although Linux will
173 * only support 8k (default), 16k and 4k.
174 * However from hardware perspective, smaller page sizes aggrevate aliasing
175 * meaning more vaddr bits needed to disambiguate the cache-line-op ;
176 * the existing scheme of piggybacking won't work for certain configurations.
177 * Two new registers IC_PTAG and DC_PTAG inttoduced.
178 * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs
Vineet Guptabd129762013-09-05 13:43:03 +0530179 */
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530180
Vineet Gupta11e14892014-08-04 08:32:31 -0700181static inline
182void __cache_line_loop_v2(unsigned long paddr, unsigned long vaddr,
183 unsigned long sz, const int op)
Vineet Guptabd129762013-09-05 13:43:03 +0530184{
Vineet Gupta11e14892014-08-04 08:32:31 -0700185 unsigned int aux_cmd;
Vineet Guptabd129762013-09-05 13:43:03 +0530186 int num_lines;
Vineet Gupta11e14892014-08-04 08:32:31 -0700187 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
Vineet Guptabd129762013-09-05 13:43:03 +0530188
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530189 if (op == OP_INV_IC) {
Vineet Guptabd129762013-09-05 13:43:03 +0530190 aux_cmd = ARC_REG_IC_IVIL;
Vineet Gupta11e14892014-08-04 08:32:31 -0700191 } else {
Vineet Guptabd129762013-09-05 13:43:03 +0530192 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530193 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
Vineet Guptabd129762013-09-05 13:43:03 +0530194 }
195
196 /* Ensure we properly floor/ceil the non-line aligned/sized requests
197 * and have @paddr - aligned to cache line and integral @num_lines.
198 * This however can be avoided for page sized since:
199 * -@paddr will be cache-line aligned already (being page aligned)
200 * -@sz will be integral multiple of line size (being page sized).
201 */
Vineet Gupta11e14892014-08-04 08:32:31 -0700202 if (!full_page) {
Vineet Guptabd129762013-09-05 13:43:03 +0530203 sz += paddr & ~CACHE_LINE_MASK;
204 paddr &= CACHE_LINE_MASK;
205 vaddr &= CACHE_LINE_MASK;
206 }
207
208 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
209
Vineet Guptabd129762013-09-05 13:43:03 +0530210 /* MMUv2 and before: paddr contains stuffed vaddrs bits */
211 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
Vineet Guptabd129762013-09-05 13:43:03 +0530212
213 while (num_lines-- > 0) {
Vineet Gupta11e14892014-08-04 08:32:31 -0700214 write_aux_reg(aux_cmd, paddr);
215 paddr += L1_CACHE_BYTES;
216 }
217}
218
219static inline
220void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr,
221 unsigned long sz, const int op)
222{
223 unsigned int aux_cmd, aux_tag;
224 int num_lines;
225 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
226
227 if (op == OP_INV_IC) {
228 aux_cmd = ARC_REG_IC_IVIL;
229 aux_tag = ARC_REG_IC_PTAG;
230 } else {
231 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
232 aux_tag = ARC_REG_DC_PTAG;
233 }
234
235 /* Ensure we properly floor/ceil the non-line aligned/sized requests
236 * and have @paddr - aligned to cache line and integral @num_lines.
237 * This however can be avoided for page sized since:
238 * -@paddr will be cache-line aligned already (being page aligned)
239 * -@sz will be integral multiple of line size (being page sized).
240 */
241 if (!full_page) {
242 sz += paddr & ~CACHE_LINE_MASK;
243 paddr &= CACHE_LINE_MASK;
244 vaddr &= CACHE_LINE_MASK;
245 }
246 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
247
248 /*
249 * MMUv3, cache ops require paddr in PTAG reg
250 * if V-P const for loop, PTAG can be written once outside loop
251 */
252 if (full_page)
253 write_aux_reg(aux_tag, paddr);
254
255 while (num_lines-- > 0) {
256 if (!full_page) {
Vineet Guptad4599ba2013-09-05 14:45:51 +0530257 write_aux_reg(aux_tag, paddr);
258 paddr += L1_CACHE_BYTES;
259 }
Vineet Guptabd129762013-09-05 13:43:03 +0530260
261 write_aux_reg(aux_cmd, vaddr);
262 vaddr += L1_CACHE_BYTES;
Vineet Guptabd129762013-09-05 13:43:03 +0530263 }
264}
Vineet Gupta95d69762013-01-18 15:12:19 +0530265
Vineet Guptad1f317d2015-04-06 17:23:57 +0530266/*
267 * In HS38x (MMU v4), although icache is VIPT, only paddr is needed for cache
268 * maintenance ops (in IVIL reg), as long as icache doesn't alias.
269 *
270 * For Aliasing icache, vaddr is also needed (in IVIL), while paddr is
271 * specified in PTAG (similar to MMU v3)
272 */
273static inline
274void __cache_line_loop_v4(unsigned long paddr, unsigned long vaddr,
275 unsigned long sz, const int cacheop)
276{
277 unsigned int aux_cmd;
278 int num_lines;
279 const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE;
280
281 if (cacheop == OP_INV_IC) {
282 aux_cmd = ARC_REG_IC_IVIL;
283 } else {
284 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
285 aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
286 }
287
288 /* Ensure we properly floor/ceil the non-line aligned/sized requests
289 * and have @paddr - aligned to cache line and integral @num_lines.
290 * This however can be avoided for page sized since:
291 * -@paddr will be cache-line aligned already (being page aligned)
292 * -@sz will be integral multiple of line size (being page sized).
293 */
294 if (!full_page_op) {
295 sz += paddr & ~CACHE_LINE_MASK;
296 paddr &= CACHE_LINE_MASK;
297 }
298
299 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
300
301 while (num_lines-- > 0) {
302 write_aux_reg(aux_cmd, paddr);
303 paddr += L1_CACHE_BYTES;
304 }
305}
306
Vineet Gupta11e14892014-08-04 08:32:31 -0700307#if (CONFIG_ARC_MMU_VER < 3)
308#define __cache_line_loop __cache_line_loop_v2
309#elif (CONFIG_ARC_MMU_VER == 3)
310#define __cache_line_loop __cache_line_loop_v3
Vineet Guptad1f317d2015-04-06 17:23:57 +0530311#elif (CONFIG_ARC_MMU_VER > 3)
312#define __cache_line_loop __cache_line_loop_v4
Vineet Gupta11e14892014-08-04 08:32:31 -0700313#endif
314
Vineet Gupta95d69762013-01-18 15:12:19 +0530315#ifdef CONFIG_ARC_HAS_DCACHE
316
317/***************************************************************
318 * Machine specific helpers for Entire D-Cache or Per Line ops
319 */
320
Vineet Gupta6c310682015-06-04 08:53:47 +0530321static inline void __before_dc_op(const int op)
Vineet Gupta95d69762013-01-18 15:12:19 +0530322{
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530323 if (op == OP_FLUSH_N_INV) {
324 /* Dcache provides 2 cmd: FLUSH or INV
325 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
326 * flush-n-inv is achieved by INV cmd but with IM=1
327 * So toggle INV sub-mode depending on op request and default
328 */
Vineet Gupta6c310682015-06-04 08:53:47 +0530329 const unsigned int ctl = ARC_REG_DC_CTRL;
330 write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH);
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530331 }
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530332}
333
Vineet Gupta6c310682015-06-04 08:53:47 +0530334static inline void __after_dc_op(const int op)
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530335{
Vineet Gupta6c310682015-06-04 08:53:47 +0530336 if (op & OP_FLUSH) {
337 const unsigned int ctl = ARC_REG_DC_CTRL;
338 unsigned int reg;
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530339
Vineet Gupta6c310682015-06-04 08:53:47 +0530340 /* flush / flush-n-inv both wait */
341 while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS)
342 ;
343
344 /* Switch back to default Invalidate mode */
345 if (op == OP_FLUSH_N_INV)
346 write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH);
347 }
Vineet Gupta95d69762013-01-18 15:12:19 +0530348}
349
350/*
351 * Operation on Entire D-Cache
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530352 * @op = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
Vineet Gupta95d69762013-01-18 15:12:19 +0530353 * Note that constant propagation ensures all the checks are gone
354 * in generated code
355 */
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530356static inline void __dc_entire_op(const int op)
Vineet Gupta95d69762013-01-18 15:12:19 +0530357{
Vineet Gupta95d69762013-01-18 15:12:19 +0530358 int aux;
359
Vineet Gupta6c310682015-06-04 08:53:47 +0530360 __before_dc_op(op);
Vineet Gupta95d69762013-01-18 15:12:19 +0530361
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530362 if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
Vineet Gupta95d69762013-01-18 15:12:19 +0530363 aux = ARC_REG_DC_IVDC;
364 else
365 aux = ARC_REG_DC_FLSH;
366
367 write_aux_reg(aux, 0x1);
368
Vineet Gupta6c310682015-06-04 08:53:47 +0530369 __after_dc_op(op);
Vineet Gupta95d69762013-01-18 15:12:19 +0530370}
371
Vineet Gupta4102b532013-05-09 21:54:51 +0530372/* For kernel mappings cache operation: index is same as paddr */
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530373#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
374
Vineet Gupta95d69762013-01-18 15:12:19 +0530375/*
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530376 * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback)
Vineet Gupta95d69762013-01-18 15:12:19 +0530377 */
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530378static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530379 unsigned long sz, const int op)
Vineet Gupta95d69762013-01-18 15:12:19 +0530380{
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530381 unsigned long flags;
Vineet Gupta95d69762013-01-18 15:12:19 +0530382
383 local_irq_save(flags);
384
Vineet Gupta6c310682015-06-04 08:53:47 +0530385 __before_dc_op(op);
Vineet Gupta95d69762013-01-18 15:12:19 +0530386
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530387 __cache_line_loop(paddr, vaddr, sz, op);
Vineet Gupta95d69762013-01-18 15:12:19 +0530388
Vineet Gupta6c310682015-06-04 08:53:47 +0530389 __after_dc_op(op);
Vineet Gupta95d69762013-01-18 15:12:19 +0530390
391 local_irq_restore(flags);
392}
393
394#else
395
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530396#define __dc_entire_op(op)
397#define __dc_line_op(paddr, vaddr, sz, op)
398#define __dc_line_op_k(paddr, sz, op)
Vineet Gupta95d69762013-01-18 15:12:19 +0530399
400#endif /* CONFIG_ARC_HAS_DCACHE */
401
Vineet Gupta95d69762013-01-18 15:12:19 +0530402#ifdef CONFIG_ARC_HAS_ICACHE
403
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530404static inline void __ic_entire_inv(void)
405{
406 write_aux_reg(ARC_REG_IC_IVIC, 1);
407 read_aux_reg(ARC_REG_IC_CTRL); /* blocks */
408}
409
410static inline void
411__ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr,
412 unsigned long sz)
Vineet Gupta95d69762013-01-18 15:12:19 +0530413{
414 unsigned long flags;
Vineet Gupta95d69762013-01-18 15:12:19 +0530415
416 local_irq_save(flags);
Vineet Guptabd129762013-09-05 13:43:03 +0530417 __cache_line_loop(paddr, vaddr, sz, OP_INV_IC);
Vineet Gupta95d69762013-01-18 15:12:19 +0530418 local_irq_restore(flags);
419}
420
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530421#ifndef CONFIG_SMP
Vineet Gupta336e1992013-06-22 19:22:42 +0530422
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530423#define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s)
424
425#else
426
427struct ic_inv_args {
Vineet Gupta2328af02013-02-17 12:51:42 +0200428 unsigned long paddr, vaddr;
429 int sz;
430};
431
432static void __ic_line_inv_vaddr_helper(void *info)
433{
Noam Camus014018e2014-09-03 14:41:11 +0300434 struct ic_inv_args *ic_inv = info;
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530435
Vineet Gupta2328af02013-02-17 12:51:42 +0200436 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
437}
438
439static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
440 unsigned long sz)
441{
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530442 struct ic_inv_args ic_inv = {
443 .paddr = paddr,
444 .vaddr = vaddr,
445 .sz = sz
446 };
447
Vineet Gupta2328af02013-02-17 12:51:42 +0200448 on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
449}
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530450
451#endif /* CONFIG_SMP */
452
453#else /* !CONFIG_ARC_HAS_ICACHE */
Vineet Gupta95d69762013-01-18 15:12:19 +0530454
Vineet Gupta336e1992013-06-22 19:22:42 +0530455#define __ic_entire_inv()
Vineet Gupta95d69762013-01-18 15:12:19 +0530456#define __ic_line_inv_vaddr(pstart, vstart, sz)
457
458#endif /* CONFIG_ARC_HAS_ICACHE */
459
460
461/***********************************************************
462 * Exported APIs
463 */
464
Vineet Gupta4102b532013-05-09 21:54:51 +0530465/*
466 * Handle cache congruency of kernel and userspace mappings of page when kernel
467 * writes-to/reads-from
468 *
469 * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
470 * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
471 * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
472 * -In SMP, if hardware caches are coherent
473 *
474 * There's a corollary case, where kernel READs from a userspace mapped page.
475 * If the U-mapping is not congruent to to K-mapping, former needs flushing.
476 */
Vineet Gupta95d69762013-01-18 15:12:19 +0530477void flush_dcache_page(struct page *page)
478{
Vineet Gupta4102b532013-05-09 21:54:51 +0530479 struct address_space *mapping;
480
481 if (!cache_is_vipt_aliasing()) {
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530482 clear_bit(PG_dc_clean, &page->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +0530483 return;
484 }
485
486 /* don't handle anon pages here */
487 mapping = page_mapping(page);
488 if (!mapping)
489 return;
490
491 /*
492 * pagecache page, file not yet mapped to userspace
493 * Make a note that K-mapping is dirty
494 */
495 if (!mapping_mapped(mapping)) {
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530496 clear_bit(PG_dc_clean, &page->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +0530497 } else if (page_mapped(page)) {
498
499 /* kernel reading from page with U-mapping */
Vineet Gupta45309492015-05-18 12:46:37 +0530500 unsigned long paddr = (unsigned long)page_address(page);
Vineet Gupta4102b532013-05-09 21:54:51 +0530501 unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
502
503 if (addr_not_cache_congruent(paddr, vaddr))
504 __flush_dcache_page(paddr, vaddr);
505 }
Vineet Gupta95d69762013-01-18 15:12:19 +0530506}
507EXPORT_SYMBOL(flush_dcache_page);
508
509
510void dma_cache_wback_inv(unsigned long start, unsigned long sz)
511{
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530512 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
Vineet Gupta95d69762013-01-18 15:12:19 +0530513}
514EXPORT_SYMBOL(dma_cache_wback_inv);
515
516void dma_cache_inv(unsigned long start, unsigned long sz)
517{
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530518 __dc_line_op_k(start, sz, OP_INV);
Vineet Gupta95d69762013-01-18 15:12:19 +0530519}
520EXPORT_SYMBOL(dma_cache_inv);
521
522void dma_cache_wback(unsigned long start, unsigned long sz)
523{
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530524 __dc_line_op_k(start, sz, OP_FLUSH);
Vineet Gupta95d69762013-01-18 15:12:19 +0530525}
526EXPORT_SYMBOL(dma_cache_wback);
527
528/*
Vineet Gupta7586bf722013-04-12 12:18:25 +0530529 * This is API for making I/D Caches consistent when modifying
530 * kernel code (loadable modules, kprobes, kgdb...)
Vineet Gupta95d69762013-01-18 15:12:19 +0530531 * This is called on insmod, with kernel virtual address for CODE of
532 * the module. ARC cache maintenance ops require PHY address thus we
533 * need to convert vmalloc addr to PHY addr
534 */
535void flush_icache_range(unsigned long kstart, unsigned long kend)
536{
Vineet Guptac59414c2014-09-24 11:36:20 +0530537 unsigned int tot_sz;
Vineet Gupta95d69762013-01-18 15:12:19 +0530538
Vineet Guptac59414c2014-09-24 11:36:20 +0530539 WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__);
Vineet Gupta95d69762013-01-18 15:12:19 +0530540
541 /* Shortcut for bigger flush ranges.
542 * Here we don't care if this was kernel virtual or phy addr
543 */
544 tot_sz = kend - kstart;
545 if (tot_sz > PAGE_SIZE) {
546 flush_cache_all();
547 return;
548 }
549
550 /* Case: Kernel Phy addr (0x8000_0000 onwards) */
551 if (likely(kstart > PAGE_OFFSET)) {
Vineet Gupta7586bf722013-04-12 12:18:25 +0530552 /*
553 * The 2nd arg despite being paddr will be used to index icache
554 * This is OK since no alternate virtual mappings will exist
555 * given the callers for this case: kprobe/kgdb in built-in
556 * kernel code only.
557 */
Vineet Gupta94bad1a2013-04-12 12:20:23 +0530558 __sync_icache_dcache(kstart, kstart, kend - kstart);
Vineet Gupta95d69762013-01-18 15:12:19 +0530559 return;
560 }
561
562 /*
563 * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
564 * (1) ARC Cache Maintenance ops only take Phy addr, hence special
565 * handling of kernel vaddr.
566 *
567 * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
568 * it still needs to handle a 2 page scenario, where the range
569 * straddles across 2 virtual pages and hence need for loop
570 */
571 while (tot_sz > 0) {
Vineet Guptac59414c2014-09-24 11:36:20 +0530572 unsigned int off, sz;
573 unsigned long phy, pfn;
574
Vineet Gupta95d69762013-01-18 15:12:19 +0530575 off = kstart % PAGE_SIZE;
576 pfn = vmalloc_to_pfn((void *)kstart);
577 phy = (pfn << PAGE_SHIFT) + off;
578 sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
Vineet Gupta94bad1a2013-04-12 12:20:23 +0530579 __sync_icache_dcache(phy, kstart, sz);
Vineet Gupta95d69762013-01-18 15:12:19 +0530580 kstart += sz;
581 tot_sz -= sz;
582 }
583}
Pranith Kumare3560302014-08-29 15:19:09 -0700584EXPORT_SYMBOL(flush_icache_range);
Vineet Gupta95d69762013-01-18 15:12:19 +0530585
586/*
Vineet Gupta94bad1a2013-04-12 12:20:23 +0530587 * General purpose helper to make I and D cache lines consistent.
588 * @paddr is phy addr of region
Vineet Gupta4b06ff32013-07-10 11:40:27 +0530589 * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc)
590 * However in one instance, when called by kprobe (for a breakpt in
Vineet Gupta94bad1a2013-04-12 12:20:23 +0530591 * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
592 * use a paddr to index the cache (despite VIPT). This is fine since since a
Vineet Gupta4b06ff32013-07-10 11:40:27 +0530593 * builtin kernel page will not have any virtual mappings.
594 * kprobe on loadable module will be kernel vaddr.
Vineet Gupta95d69762013-01-18 15:12:19 +0530595 */
Vineet Gupta94bad1a2013-04-12 12:20:23 +0530596void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
Vineet Gupta95d69762013-01-18 15:12:19 +0530597{
Vineet Guptaf5388812013-05-16 12:19:29 +0530598 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
Vineet Gupta2328af02013-02-17 12:51:42 +0200599 __ic_line_inv_vaddr(paddr, vaddr, len);
Vineet Gupta95d69762013-01-18 15:12:19 +0530600}
601
Vineet Gupta24603fd2013-04-11 18:36:35 +0530602/* wrapper to compile time eliminate alignment checks in flush loop */
603void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
Vineet Gupta95d69762013-01-18 15:12:19 +0530604{
Vineet Gupta24603fd2013-04-11 18:36:35 +0530605 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
Vineet Gupta95d69762013-01-18 15:12:19 +0530606}
607
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530608/*
609 * wrapper to clearout kernel or userspace mappings of a page
610 * For kernel mappings @vaddr == @paddr
611 */
Vineet Gupta45309492015-05-18 12:46:37 +0530612void __flush_dcache_page(unsigned long paddr, unsigned long vaddr)
Vineet Guptaeacd0e92013-04-16 14:10:48 +0530613{
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530614 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
Vineet Guptaeacd0e92013-04-16 14:10:48 +0530615}
616
Vineet Gupta95d69762013-01-18 15:12:19 +0530617noinline void flush_cache_all(void)
618{
619 unsigned long flags;
620
621 local_irq_save(flags);
622
Vineet Gupta336e1992013-06-22 19:22:42 +0530623 __ic_entire_inv();
Vineet Gupta95d69762013-01-18 15:12:19 +0530624 __dc_entire_op(OP_FLUSH_N_INV);
625
626 local_irq_restore(flags);
627
628}
629
Vineet Gupta4102b532013-05-09 21:54:51 +0530630#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
631
632void flush_cache_mm(struct mm_struct *mm)
633{
634 flush_cache_all();
635}
636
637void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
638 unsigned long pfn)
639{
640 unsigned int paddr = pfn << PAGE_SHIFT;
641
Vineet Gupta5971bc72013-05-16 12:23:31 +0530642 u_vaddr &= PAGE_MASK;
643
Vineet Gupta45309492015-05-18 12:46:37 +0530644 __flush_dcache_page(paddr, u_vaddr);
Vineet Gupta5971bc72013-05-16 12:23:31 +0530645
646 if (vma->vm_flags & VM_EXEC)
647 __inv_icache_page(paddr, u_vaddr);
Vineet Gupta4102b532013-05-09 21:54:51 +0530648}
649
650void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
651 unsigned long end)
652{
653 flush_cache_all();
654}
655
Vineet Gupta7bb66f62013-05-25 14:04:25 +0530656void flush_anon_page(struct vm_area_struct *vma, struct page *page,
657 unsigned long u_vaddr)
658{
659 /* TBD: do we really need to clear the kernel mapping */
660 __flush_dcache_page(page_address(page), u_vaddr);
661 __flush_dcache_page(page_address(page), page_address(page));
662
663}
664
665#endif
666
Vineet Gupta4102b532013-05-09 21:54:51 +0530667void copy_user_highpage(struct page *to, struct page *from,
668 unsigned long u_vaddr, struct vm_area_struct *vma)
669{
Vineet Gupta45309492015-05-18 12:46:37 +0530670 unsigned long kfrom = (unsigned long)page_address(from);
671 unsigned long kto = (unsigned long)page_address(to);
Vineet Gupta4102b532013-05-09 21:54:51 +0530672 int clean_src_k_mappings = 0;
673
674 /*
675 * If SRC page was already mapped in userspace AND it's U-mapping is
676 * not congruent with K-mapping, sync former to physical page so that
677 * K-mapping in memcpy below, sees the right data
678 *
679 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
680 * equally valid for SRC page as well
681 */
682 if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
683 __flush_dcache_page(kfrom, u_vaddr);
684 clean_src_k_mappings = 1;
685 }
686
Vineet Gupta45309492015-05-18 12:46:37 +0530687 copy_page((void *)kto, (void *)kfrom);
Vineet Gupta4102b532013-05-09 21:54:51 +0530688
689 /*
690 * Mark DST page K-mapping as dirty for a later finalization by
691 * update_mmu_cache(). Although the finalization could have been done
692 * here as well (given that both vaddr/paddr are available).
693 * But update_mmu_cache() already has code to do that for other
694 * non copied user pages (e.g. read faults which wire in pagecache page
695 * directly).
696 */
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530697 clear_bit(PG_dc_clean, &to->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +0530698
699 /*
700 * if SRC was already usermapped and non-congruent to kernel mapping
701 * sync the kernel mapping back to physical page
702 */
703 if (clean_src_k_mappings) {
704 __flush_dcache_page(kfrom, kfrom);
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530705 set_bit(PG_dc_clean, &from->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +0530706 } else {
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530707 clear_bit(PG_dc_clean, &from->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +0530708 }
709}
710
711void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
712{
713 clear_page(to);
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530714 clear_bit(PG_dc_clean, &page->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +0530715}
716
Vineet Gupta4102b532013-05-09 21:54:51 +0530717
Vineet Gupta95d69762013-01-18 15:12:19 +0530718/**********************************************************************
719 * Explicit Cache flush request from user space via syscall
720 * Needed for JITs which generate code on the fly
721 */
722SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
723{
724 /* TBD: optimize this */
725 flush_cache_all();
726 return 0;
727}
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530728
729void arc_cache_init(void)
730{
731 unsigned int __maybe_unused cpu = smp_processor_id();
732 char str[256];
733
734 printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
735
736 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
737 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
738
739 if (!ic->ver)
740 panic("cache support enabled but non-existent cache\n");
741
742 if (ic->line_len != L1_CACHE_BYTES)
743 panic("ICache line [%d] != kernel Config [%d]",
744 ic->line_len, L1_CACHE_BYTES);
745
746 if (ic->ver != CONFIG_ARC_MMU_VER)
747 panic("Cache ver [%d] doesn't match MMU ver [%d]\n",
748 ic->ver, CONFIG_ARC_MMU_VER);
749 }
750
751 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
752 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530753
754 if (!dc->ver)
755 panic("cache support enabled but non-existent cache\n");
756
757 if (dc->line_len != L1_CACHE_BYTES)
758 panic("DCache line [%d] != kernel Config [%d]",
759 dc->line_len, L1_CACHE_BYTES);
760
Vineet Guptad1f317d2015-04-06 17:23:57 +0530761 /* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
762 if (is_isa_arcompact()) {
763 int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530764
Vineet Guptad1f317d2015-04-06 17:23:57 +0530765 if (dc->alias && !handled)
766 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
767 else if (!dc->alias && handled)
768 panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
769 }
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530770 }
771}