blob: 400c663b21c2bf075e977a8ccf3016fb7e03b5a6 [file] [log] [blame]
Vineet Gupta95d69762013-01-18 15:12:19 +05301/*
2 * ARC700 VIPT Cache Management
3 *
4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * vineetg: May 2011: for Non-aliasing VIPT D-cache following can be NOPs
11 * -flush_cache_dup_mm (fork)
12 * -likewise for flush_cache_mm (exit/execve)
13 * -likewise for flush_cache_range,flush_cache_page (munmap, exit, COW-break)
14 *
15 * vineetg: Apr 2011
16 * -Now that MMU can support larger pg sz (16K), the determiniation of
17 * aliasing shd not be based on assumption of 8k pg
18 *
19 * vineetg: Mar 2011
20 * -optimised version of flush_icache_range( ) for making I/D coherent
21 * when vaddr is available (agnostic of num of aliases)
22 *
23 * vineetg: Mar 2011
24 * -Added documentation about I-cache aliasing on ARC700 and the way it
25 * was handled up until MMU V2.
26 * -Spotted a three year old bug when killing the 4 aliases, which needs
27 * bottom 2 bits, so we need to do paddr | {0x00, 0x01, 0x02, 0x03}
28 * instead of paddr | {0x00, 0x01, 0x10, 0x11}
29 * (Rajesh you owe me one now)
30 *
31 * vineetg: Dec 2010
32 * -Off-by-one error when computing num_of_lines to flush
33 * This broke signal handling with bionic which uses synthetic sigret stub
34 *
35 * vineetg: Mar 2010
36 * -GCC can't generate ZOL for core cache flush loops.
37 * Conv them into iterations based as opposed to while (start < end) types
38 *
39 * Vineetg: July 2009
40 * -In I-cache flush routine we used to chk for aliasing for every line INV.
41 * Instead now we setup routines per cache geometry and invoke them
42 * via function pointers.
43 *
44 * Vineetg: Jan 2009
45 * -Cache Line flush routines used to flush an extra line beyond end addr
46 * because check was while (end >= start) instead of (end > start)
47 * =Some call sites had to work around by doing -1, -4 etc to end param
48 * =Some callers didnt care. This was spec bad in case of INV routines
49 * which would discard valid data (cause of the horrible ext2 bug
50 * in ARC IDE driver)
51 *
52 * vineetg: June 11th 2008: Fixed flush_icache_range( )
53 * -Since ARC700 caches are not coherent (I$ doesnt snoop D$) both need
54 * to be flushed, which it was not doing.
55 * -load_module( ) passes vmalloc addr (Kernel Virtual Addr) to the API,
56 * however ARC cache maintenance OPs require PHY addr. Thus need to do
57 * vmalloc_to_phy.
58 * -Also added optimisation there, that for range > PAGE SIZE we flush the
59 * entire cache in one shot rather than line by line. For e.g. a module
60 * with Code sz 600k, old code flushed 600k worth of cache (line-by-line),
61 * while cache is only 16 or 32k.
62 */
63
64#include <linux/module.h>
65#include <linux/mm.h>
66#include <linux/sched.h>
67#include <linux/cache.h>
68#include <linux/mmu_context.h>
69#include <linux/syscalls.h>
70#include <linux/uaccess.h>
Vineet Gupta4102b532013-05-09 21:54:51 +053071#include <linux/pagemap.h>
Vineet Gupta95d69762013-01-18 15:12:19 +053072#include <asm/cacheflush.h>
73#include <asm/cachectl.h>
74#include <asm/setup.h>
75
Vineet Guptada1677b2013-05-14 13:28:17 +053076/* Instruction cache related Auxiliary registers */
77#define ARC_REG_IC_BCR 0x77 /* Build Config reg */
78#define ARC_REG_IC_IVIC 0x10
79#define ARC_REG_IC_CTRL 0x11
80#define ARC_REG_IC_IVIL 0x19
81#if (CONFIG_ARC_MMU_VER > 2)
82#define ARC_REG_IC_PTAG 0x1E
83#endif
84
85/* Bit val in IC_CTRL */
86#define IC_CTRL_CACHE_DISABLE 0x1
87
88/* Data cache related Auxiliary registers */
89#define ARC_REG_DC_BCR 0x72 /* Build Config reg */
90#define ARC_REG_DC_IVDC 0x47
91#define ARC_REG_DC_CTRL 0x48
92#define ARC_REG_DC_IVDL 0x4A
93#define ARC_REG_DC_FLSH 0x4B
94#define ARC_REG_DC_FLDL 0x4C
95#if (CONFIG_ARC_MMU_VER > 2)
96#define ARC_REG_DC_PTAG 0x5C
97#endif
98
99/* Bit val in DC_CTRL */
100#define DC_CTRL_INV_MODE_FLUSH 0x40
101#define DC_CTRL_FLUSH_STATUS 0x100
102
Vineet Guptaaf617422013-01-18 15:12:24 +0530103char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len)
104{
105 int n = 0;
106 unsigned int c = smp_processor_id();
107
108#define PR_CACHE(p, enb, str) \
109{ \
110 if (!(p)->ver) \
111 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
112 else \
113 n += scnprintf(buf + n, len - n, \
114 str"\t\t: (%uK) VIPT, %dway set-asc, %ub Line %s\n", \
115 TO_KB((p)->sz), (p)->assoc, (p)->line_len, \
116 enb ? "" : "DISABLED (kernel-build)"); \
117}
118
Vineet Gupta82357032013-06-01 12:55:42 +0530119 PR_CACHE(&cpuinfo_arc700[c].icache, IS_ENABLED(CONFIG_ARC_HAS_ICACHE),
120 "I-Cache");
121 PR_CACHE(&cpuinfo_arc700[c].dcache, IS_ENABLED(CONFIG_ARC_HAS_DCACHE),
122 "D-Cache");
Vineet Guptaaf617422013-01-18 15:12:24 +0530123
124 return buf;
125}
126
Vineet Gupta95d69762013-01-18 15:12:19 +0530127/*
128 * Read the Cache Build Confuration Registers, Decode them and save into
129 * the cpuinfo structure for later use.
130 * No Validation done here, simply read/convert the BCRs
131 */
Paul Gortmakerce759952013-06-24 15:30:15 -0400132void read_decode_cache_bcr(void)
Vineet Gupta95d69762013-01-18 15:12:19 +0530133{
Vineet Gupta95d69762013-01-18 15:12:19 +0530134 struct cpuinfo_arc_cache *p_ic, *p_dc;
135 unsigned int cpu = smp_processor_id();
Vineet Guptada1677b2013-05-14 13:28:17 +0530136 struct bcr_cache {
137#ifdef CONFIG_CPU_BIG_ENDIAN
138 unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
139#else
140 unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
141#endif
142 } ibcr, dbcr;
Vineet Gupta95d69762013-01-18 15:12:19 +0530143
144 p_ic = &cpuinfo_arc700[cpu].icache;
145 READ_BCR(ARC_REG_IC_BCR, ibcr);
146
Vineet Gupta30499182013-06-15 10:21:51 +0530147 BUG_ON(ibcr.config != 3);
148 p_ic->assoc = 2; /* Fixed to 2w set assoc */
Vineet Gupta95d69762013-01-18 15:12:19 +0530149 p_ic->line_len = 8 << ibcr.line_len;
150 p_ic->sz = 0x200 << ibcr.sz;
151 p_ic->ver = ibcr.ver;
152
153 p_dc = &cpuinfo_arc700[cpu].dcache;
154 READ_BCR(ARC_REG_DC_BCR, dbcr);
155
Vineet Gupta30499182013-06-15 10:21:51 +0530156 BUG_ON(dbcr.config != 2);
157 p_dc->assoc = 4; /* Fixed to 4w set assoc */
Vineet Gupta95d69762013-01-18 15:12:19 +0530158 p_dc->line_len = 16 << dbcr.line_len;
159 p_dc->sz = 0x200 << dbcr.sz;
160 p_dc->ver = dbcr.ver;
161}
162
163/*
164 * 1. Validate the Cache Geomtery (compile time config matches hardware)
165 * 2. If I-cache suffers from aliasing, setup work arounds (difft flush rtn)
166 * (aliasing D-cache configurations are not supported YET)
167 * 3. Enable the Caches, setup default flush mode for D-Cache
168 * 3. Calculate the SHMLBA used by user space
169 */
Paul Gortmakerce759952013-06-24 15:30:15 -0400170void arc_cache_init(void)
Vineet Gupta95d69762013-01-18 15:12:19 +0530171{
Vineet Gupta95d69762013-01-18 15:12:19 +0530172 unsigned int cpu = smp_processor_id();
Vineet Guptad626f542013-01-28 15:07:31 +0530173 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
174 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
Vineet Guptada1677b2013-05-14 13:28:17 +0530175 unsigned int dcache_does_alias, temp;
Vineet Guptaaf617422013-01-18 15:12:24 +0530176 char str[256];
177
178 printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
Vineet Gupta95d69762013-01-18 15:12:19 +0530179
Vineet Guptad626f542013-01-28 15:07:31 +0530180 if (!ic->ver)
181 goto chk_dc;
Vineet Gupta95d69762013-01-18 15:12:19 +0530182
Vineet Guptad626f542013-01-28 15:07:31 +0530183#ifdef CONFIG_ARC_HAS_ICACHE
Vineet Guptaaf617422013-01-18 15:12:24 +0530184 /* 1. Confirm some of I-cache params which Linux assumes */
Vineet Gupta63d2dfd2013-09-05 13:17:49 +0530185 if (ic->line_len != L1_CACHE_BYTES)
Vineet Guptaaf617422013-01-18 15:12:24 +0530186 panic("Cache H/W doesn't match kernel Config");
Vineet Guptaaf617422013-01-18 15:12:24 +0530187
Vineet Gupta30499182013-06-15 10:21:51 +0530188 if (ic->ver != CONFIG_ARC_MMU_VER)
189 panic("Cache ver doesn't match MMU ver\n");
Vineet Gupta95d69762013-01-18 15:12:19 +0530190#endif
191
192 /* Enable/disable I-Cache */
193 temp = read_aux_reg(ARC_REG_IC_CTRL);
194
195#ifdef CONFIG_ARC_HAS_ICACHE
196 temp &= ~IC_CTRL_CACHE_DISABLE;
197#else
198 temp |= IC_CTRL_CACHE_DISABLE;
199#endif
200
201 write_aux_reg(ARC_REG_IC_CTRL, temp);
202
Vineet Guptad626f542013-01-28 15:07:31 +0530203chk_dc:
204 if (!dc->ver)
205 return;
Vineet Gupta95d69762013-01-18 15:12:19 +0530206
Vineet Guptad626f542013-01-28 15:07:31 +0530207#ifdef CONFIG_ARC_HAS_DCACHE
Vineet Gupta63d2dfd2013-09-05 13:17:49 +0530208 if (dc->line_len != L1_CACHE_BYTES)
Vineet Guptaaf617422013-01-18 15:12:24 +0530209 panic("Cache H/W doesn't match kernel Config");
Vineet Gupta4102b532013-05-09 21:54:51 +0530210
Vineet Gupta95d69762013-01-18 15:12:19 +0530211 /* check for D-Cache aliasing */
Vineet Gupta30499182013-06-15 10:21:51 +0530212 dcache_does_alias = (dc->sz / dc->assoc) > PAGE_SIZE;
213
Vineet Gupta4102b532013-05-09 21:54:51 +0530214 if (dcache_does_alias && !cache_is_vipt_aliasing())
215 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
216 else if (!dcache_does_alias && cache_is_vipt_aliasing())
217 panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n");
Vineet Gupta95d69762013-01-18 15:12:19 +0530218#endif
219
220 /* Set the default Invalidate Mode to "simpy discard dirty lines"
221 * as this is more frequent then flush before invalidate
222 * Ofcourse we toggle this default behviour when desired
223 */
224 temp = read_aux_reg(ARC_REG_DC_CTRL);
225 temp &= ~DC_CTRL_INV_MODE_FLUSH;
226
227#ifdef CONFIG_ARC_HAS_DCACHE
228 /* Enable D-Cache: Clear Bit 0 */
229 write_aux_reg(ARC_REG_DC_CTRL, temp & ~IC_CTRL_CACHE_DISABLE);
230#else
231 /* Flush D cache */
232 write_aux_reg(ARC_REG_DC_FLSH, 0x1);
233 /* Disable D cache */
234 write_aux_reg(ARC_REG_DC_CTRL, temp | IC_CTRL_CACHE_DISABLE);
235#endif
236
237 return;
238}
239
240#define OP_INV 0x1
241#define OP_FLUSH 0x2
242#define OP_FLUSH_N_INV 0x3
Vineet Guptabd129762013-09-05 13:43:03 +0530243#define OP_INV_IC 0x4
244
245/*
246 * Common Helper for Line Operations on {I,D}-Cache
247 */
248static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr,
249 unsigned long sz, const int cacheop)
250{
251 unsigned int aux_cmd, aux_tag;
252 int num_lines;
Vineet Guptad4599ba2013-09-05 14:45:51 +0530253 const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE;
Vineet Guptabd129762013-09-05 13:43:03 +0530254
255 if (cacheop == OP_INV_IC) {
256 aux_cmd = ARC_REG_IC_IVIL;
257 aux_tag = ARC_REG_IC_PTAG;
258 }
259 else {
260 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
261 aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
262 aux_tag = ARC_REG_DC_PTAG;
263 }
264
265 /* Ensure we properly floor/ceil the non-line aligned/sized requests
266 * and have @paddr - aligned to cache line and integral @num_lines.
267 * This however can be avoided for page sized since:
268 * -@paddr will be cache-line aligned already (being page aligned)
269 * -@sz will be integral multiple of line size (being page sized).
270 */
Vineet Guptad4599ba2013-09-05 14:45:51 +0530271 if (!full_page_op) {
Vineet Guptabd129762013-09-05 13:43:03 +0530272 sz += paddr & ~CACHE_LINE_MASK;
273 paddr &= CACHE_LINE_MASK;
274 vaddr &= CACHE_LINE_MASK;
275 }
276
277 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
278
279#if (CONFIG_ARC_MMU_VER <= 2)
280 /* MMUv2 and before: paddr contains stuffed vaddrs bits */
281 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
Vineet Guptad4599ba2013-09-05 14:45:51 +0530282#else
283 /* if V-P const for loop, PTAG can be written once outside loop */
284 if (full_page_op)
Vineet Guptab0539402014-03-07 13:22:22 +0530285 write_aux_reg(aux_tag, paddr);
Vineet Guptabd129762013-09-05 13:43:03 +0530286#endif
287
288 while (num_lines-- > 0) {
289#if (CONFIG_ARC_MMU_VER > 2)
290 /* MMUv3, cache ops require paddr seperately */
Vineet Guptad4599ba2013-09-05 14:45:51 +0530291 if (!full_page_op) {
292 write_aux_reg(aux_tag, paddr);
293 paddr += L1_CACHE_BYTES;
294 }
Vineet Guptabd129762013-09-05 13:43:03 +0530295
296 write_aux_reg(aux_cmd, vaddr);
297 vaddr += L1_CACHE_BYTES;
298#else
Vineet Guptab0539402014-03-07 13:22:22 +0530299 write_aux_reg(aux_cmd, paddr);
Vineet Guptabd129762013-09-05 13:43:03 +0530300 paddr += L1_CACHE_BYTES;
Vineet Guptad4599ba2013-09-05 14:45:51 +0530301#endif
Vineet Guptabd129762013-09-05 13:43:03 +0530302 }
303}
Vineet Gupta95d69762013-01-18 15:12:19 +0530304
305#ifdef CONFIG_ARC_HAS_DCACHE
306
307/***************************************************************
308 * Machine specific helpers for Entire D-Cache or Per Line ops
309 */
310
311static inline void wait_for_flush(void)
312{
313 while (read_aux_reg(ARC_REG_DC_CTRL) & DC_CTRL_FLUSH_STATUS)
314 ;
315}
316
317/*
318 * Operation on Entire D-Cache
319 * @cacheop = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
320 * Note that constant propagation ensures all the checks are gone
321 * in generated code
322 */
323static inline void __dc_entire_op(const int cacheop)
324{
Vineet Gupta336e1992013-06-22 19:22:42 +0530325 unsigned int tmp = tmp;
Vineet Gupta95d69762013-01-18 15:12:19 +0530326 int aux;
327
Vineet Gupta95d69762013-01-18 15:12:19 +0530328 if (cacheop == OP_FLUSH_N_INV) {
329 /* Dcache provides 2 cmd: FLUSH or INV
330 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
331 * flush-n-inv is achieved by INV cmd but with IM=1
332 * Default INV sub-mode is DISCARD, which needs to be toggled
333 */
334 tmp = read_aux_reg(ARC_REG_DC_CTRL);
335 write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
336 }
337
338 if (cacheop & OP_INV) /* Inv or flush-n-inv use same cmd reg */
339 aux = ARC_REG_DC_IVDC;
340 else
341 aux = ARC_REG_DC_FLSH;
342
343 write_aux_reg(aux, 0x1);
344
345 if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */
346 wait_for_flush();
347
348 /* Switch back the DISCARD ONLY Invalidate mode */
349 if (cacheop == OP_FLUSH_N_INV)
350 write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
Vineet Gupta95d69762013-01-18 15:12:19 +0530351}
352
Vineet Gupta4102b532013-05-09 21:54:51 +0530353/* For kernel mappings cache operation: index is same as paddr */
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530354#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
355
Vineet Gupta95d69762013-01-18 15:12:19 +0530356/*
357 * D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback)
358 */
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530359static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
360 unsigned long sz, const int cacheop)
Vineet Gupta95d69762013-01-18 15:12:19 +0530361{
362 unsigned long flags, tmp = tmp;
Vineet Gupta95d69762013-01-18 15:12:19 +0530363
364 local_irq_save(flags);
365
366 if (cacheop == OP_FLUSH_N_INV) {
367 /*
368 * Dcache provides 2 cmd: FLUSH or INV
369 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
370 * flush-n-inv is achieved by INV cmd but with IM=1
371 * Default INV sub-mode is DISCARD, which needs to be toggled
372 */
373 tmp = read_aux_reg(ARC_REG_DC_CTRL);
374 write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
375 }
376
Vineet Guptabd129762013-09-05 13:43:03 +0530377 __cache_line_loop(paddr, vaddr, sz, cacheop);
Vineet Gupta95d69762013-01-18 15:12:19 +0530378
379 if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */
380 wait_for_flush();
381
382 /* Switch back the DISCARD ONLY Invalidate mode */
383 if (cacheop == OP_FLUSH_N_INV)
384 write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
385
386 local_irq_restore(flags);
387}
388
389#else
390
391#define __dc_entire_op(cacheop)
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530392#define __dc_line_op(paddr, vaddr, sz, cacheop)
393#define __dc_line_op_k(paddr, sz, cacheop)
Vineet Gupta95d69762013-01-18 15:12:19 +0530394
395#endif /* CONFIG_ARC_HAS_DCACHE */
396
397
398#ifdef CONFIG_ARC_HAS_ICACHE
399
400/*
401 * I-Cache Aliasing in ARC700 VIPT caches
402 *
Vineet Gupta7f250a02013-04-12 13:08:06 +0530403 * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag.
404 * The orig Cache Management Module "CDU" only required paddr to invalidate a
405 * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry.
406 * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching
407 * the exact same line.
Vineet Gupta95d69762013-01-18 15:12:19 +0530408 *
Vineet Gupta7f250a02013-04-12 13:08:06 +0530409 * However for larger Caches (way-size > page-size) - i.e. in Aliasing config,
410 * paddr alone could not be used to correctly index the cache.
Vineet Gupta95d69762013-01-18 15:12:19 +0530411 *
412 * ------------------
413 * MMU v1/v2 (Fixed Page Size 8k)
414 * ------------------
415 * The solution was to provide CDU with these additonal vaddr bits. These
Vineet Gupta7f250a02013-04-12 13:08:06 +0530416 * would be bits [x:13], x would depend on cache-geometry, 13 comes from
417 * standard page size of 8k.
Vineet Gupta95d69762013-01-18 15:12:19 +0530418 * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits
419 * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
420 * orig 5 bits of paddr were anyways ignored by CDU line ops, as they
421 * represent the offset within cache-line. The adv of using this "clumsy"
Vineet Gupta7f250a02013-04-12 13:08:06 +0530422 * interface for additional info was no new reg was needed in CDU programming
423 * model.
Vineet Gupta95d69762013-01-18 15:12:19 +0530424 *
425 * 17:13 represented the max num of bits passable, actual bits needed were
426 * fewer, based on the num-of-aliases possible.
427 * -for 2 alias possibility, only bit 13 needed (32K cache)
428 * -for 4 alias possibility, bits 14:13 needed (64K cache)
429 *
Vineet Gupta95d69762013-01-18 15:12:19 +0530430 * ------------------
431 * MMU v3
432 * ------------------
Vineet Gupta7f250a02013-04-12 13:08:06 +0530433 * This ver of MMU supports variable page sizes (1k-16k): although Linux will
434 * only support 8k (default), 16k and 4k.
Vineet Gupta95d69762013-01-18 15:12:19 +0530435 * However from hardware perspective, smaller page sizes aggrevate aliasing
436 * meaning more vaddr bits needed to disambiguate the cache-line-op ;
437 * the existing scheme of piggybacking won't work for certain configurations.
438 * Two new registers IC_PTAG and DC_PTAG inttoduced.
439 * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs
440 */
441
442/***********************************************************
Vineet Gupta7f250a02013-04-12 13:08:06 +0530443 * Machine specific helper for per line I-Cache invalidate.
Vineet Gupta95d69762013-01-18 15:12:19 +0530444 */
Vineet Guptaa6909842013-05-09 14:00:51 +0530445static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
Vineet Gupta7f250a02013-04-12 13:08:06 +0530446 unsigned long sz)
Vineet Gupta95d69762013-01-18 15:12:19 +0530447{
448 unsigned long flags;
Vineet Gupta95d69762013-01-18 15:12:19 +0530449
450 local_irq_save(flags);
Vineet Guptabd129762013-09-05 13:43:03 +0530451 __cache_line_loop(paddr, vaddr, sz, OP_INV_IC);
Vineet Gupta95d69762013-01-18 15:12:19 +0530452 local_irq_restore(flags);
453}
454
Vineet Gupta336e1992013-06-22 19:22:42 +0530455static inline void __ic_entire_inv(void)
456{
457 write_aux_reg(ARC_REG_IC_IVIC, 1);
458 read_aux_reg(ARC_REG_IC_CTRL); /* blocks */
459}
460
Vineet Gupta95d69762013-01-18 15:12:19 +0530461#else
462
Vineet Gupta336e1992013-06-22 19:22:42 +0530463#define __ic_entire_inv()
Vineet Gupta95d69762013-01-18 15:12:19 +0530464#define __ic_line_inv_vaddr(pstart, vstart, sz)
465
466#endif /* CONFIG_ARC_HAS_ICACHE */
467
468
469/***********************************************************
470 * Exported APIs
471 */
472
Vineet Gupta4102b532013-05-09 21:54:51 +0530473/*
474 * Handle cache congruency of kernel and userspace mappings of page when kernel
475 * writes-to/reads-from
476 *
477 * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
478 * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
479 * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
480 * -In SMP, if hardware caches are coherent
481 *
482 * There's a corollary case, where kernel READs from a userspace mapped page.
483 * If the U-mapping is not congruent to to K-mapping, former needs flushing.
484 */
Vineet Gupta95d69762013-01-18 15:12:19 +0530485void flush_dcache_page(struct page *page)
486{
Vineet Gupta4102b532013-05-09 21:54:51 +0530487 struct address_space *mapping;
488
489 if (!cache_is_vipt_aliasing()) {
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530490 clear_bit(PG_dc_clean, &page->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +0530491 return;
492 }
493
494 /* don't handle anon pages here */
495 mapping = page_mapping(page);
496 if (!mapping)
497 return;
498
499 /*
500 * pagecache page, file not yet mapped to userspace
501 * Make a note that K-mapping is dirty
502 */
503 if (!mapping_mapped(mapping)) {
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530504 clear_bit(PG_dc_clean, &page->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +0530505 } else if (page_mapped(page)) {
506
507 /* kernel reading from page with U-mapping */
508 void *paddr = page_address(page);
509 unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
510
511 if (addr_not_cache_congruent(paddr, vaddr))
512 __flush_dcache_page(paddr, vaddr);
513 }
Vineet Gupta95d69762013-01-18 15:12:19 +0530514}
515EXPORT_SYMBOL(flush_dcache_page);
516
517
518void dma_cache_wback_inv(unsigned long start, unsigned long sz)
519{
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530520 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
Vineet Gupta95d69762013-01-18 15:12:19 +0530521}
522EXPORT_SYMBOL(dma_cache_wback_inv);
523
524void dma_cache_inv(unsigned long start, unsigned long sz)
525{
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530526 __dc_line_op_k(start, sz, OP_INV);
Vineet Gupta95d69762013-01-18 15:12:19 +0530527}
528EXPORT_SYMBOL(dma_cache_inv);
529
530void dma_cache_wback(unsigned long start, unsigned long sz)
531{
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530532 __dc_line_op_k(start, sz, OP_FLUSH);
Vineet Gupta95d69762013-01-18 15:12:19 +0530533}
534EXPORT_SYMBOL(dma_cache_wback);
535
536/*
Vineet Gupta7586bf722013-04-12 12:18:25 +0530537 * This is API for making I/D Caches consistent when modifying
538 * kernel code (loadable modules, kprobes, kgdb...)
Vineet Gupta95d69762013-01-18 15:12:19 +0530539 * This is called on insmod, with kernel virtual address for CODE of
540 * the module. ARC cache maintenance ops require PHY address thus we
541 * need to convert vmalloc addr to PHY addr
542 */
543void flush_icache_range(unsigned long kstart, unsigned long kend)
544{
545 unsigned int tot_sz, off, sz;
546 unsigned long phy, pfn;
Vineet Gupta95d69762013-01-18 15:12:19 +0530547
548 /* printk("Kernel Cache Cohenercy: %lx to %lx\n",kstart, kend); */
549
550 /* This is not the right API for user virtual address */
551 if (kstart < TASK_SIZE) {
552 BUG_ON("Flush icache range for user virtual addr space");
553 return;
554 }
555
556 /* Shortcut for bigger flush ranges.
557 * Here we don't care if this was kernel virtual or phy addr
558 */
559 tot_sz = kend - kstart;
560 if (tot_sz > PAGE_SIZE) {
561 flush_cache_all();
562 return;
563 }
564
565 /* Case: Kernel Phy addr (0x8000_0000 onwards) */
566 if (likely(kstart > PAGE_OFFSET)) {
Vineet Gupta7586bf722013-04-12 12:18:25 +0530567 /*
568 * The 2nd arg despite being paddr will be used to index icache
569 * This is OK since no alternate virtual mappings will exist
570 * given the callers for this case: kprobe/kgdb in built-in
571 * kernel code only.
572 */
Vineet Gupta94bad1a2013-04-12 12:20:23 +0530573 __sync_icache_dcache(kstart, kstart, kend - kstart);
Vineet Gupta95d69762013-01-18 15:12:19 +0530574 return;
575 }
576
577 /*
578 * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
579 * (1) ARC Cache Maintenance ops only take Phy addr, hence special
580 * handling of kernel vaddr.
581 *
582 * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
583 * it still needs to handle a 2 page scenario, where the range
584 * straddles across 2 virtual pages and hence need for loop
585 */
586 while (tot_sz > 0) {
587 off = kstart % PAGE_SIZE;
588 pfn = vmalloc_to_pfn((void *)kstart);
589 phy = (pfn << PAGE_SHIFT) + off;
590 sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
Vineet Gupta94bad1a2013-04-12 12:20:23 +0530591 __sync_icache_dcache(phy, kstart, sz);
Vineet Gupta95d69762013-01-18 15:12:19 +0530592 kstart += sz;
593 tot_sz -= sz;
594 }
595}
596
597/*
Vineet Gupta94bad1a2013-04-12 12:20:23 +0530598 * General purpose helper to make I and D cache lines consistent.
599 * @paddr is phy addr of region
Vineet Gupta4b06ff32013-07-10 11:40:27 +0530600 * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc)
601 * However in one instance, when called by kprobe (for a breakpt in
Vineet Gupta94bad1a2013-04-12 12:20:23 +0530602 * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
603 * use a paddr to index the cache (despite VIPT). This is fine since since a
Vineet Gupta4b06ff32013-07-10 11:40:27 +0530604 * builtin kernel page will not have any virtual mappings.
605 * kprobe on loadable module will be kernel vaddr.
Vineet Gupta95d69762013-01-18 15:12:19 +0530606 */
Vineet Gupta94bad1a2013-04-12 12:20:23 +0530607void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
Vineet Gupta95d69762013-01-18 15:12:19 +0530608{
Vineet Gupta94bad1a2013-04-12 12:20:23 +0530609 unsigned long flags;
610
611 local_irq_save(flags);
612 __ic_line_inv_vaddr(paddr, vaddr, len);
Vineet Guptaf5388812013-05-16 12:19:29 +0530613 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
Vineet Gupta94bad1a2013-04-12 12:20:23 +0530614 local_irq_restore(flags);
Vineet Gupta95d69762013-01-18 15:12:19 +0530615}
616
Vineet Gupta24603fd2013-04-11 18:36:35 +0530617/* wrapper to compile time eliminate alignment checks in flush loop */
618void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
Vineet Gupta95d69762013-01-18 15:12:19 +0530619{
Vineet Gupta24603fd2013-04-11 18:36:35 +0530620 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
Vineet Gupta95d69762013-01-18 15:12:19 +0530621}
622
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530623/*
624 * wrapper to clearout kernel or userspace mappings of a page
625 * For kernel mappings @vaddr == @paddr
626 */
Vineet Guptade2a8522013-05-09 21:55:27 +0530627void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr)
Vineet Guptaeacd0e952013-04-16 14:10:48 +0530628{
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530629 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
Vineet Guptaeacd0e952013-04-16 14:10:48 +0530630}
631
Vineet Gupta95d69762013-01-18 15:12:19 +0530632noinline void flush_cache_all(void)
633{
634 unsigned long flags;
635
636 local_irq_save(flags);
637
Vineet Gupta336e1992013-06-22 19:22:42 +0530638 __ic_entire_inv();
Vineet Gupta95d69762013-01-18 15:12:19 +0530639 __dc_entire_op(OP_FLUSH_N_INV);
640
641 local_irq_restore(flags);
642
643}
644
Vineet Gupta4102b532013-05-09 21:54:51 +0530645#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
646
647void flush_cache_mm(struct mm_struct *mm)
648{
649 flush_cache_all();
650}
651
652void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
653 unsigned long pfn)
654{
655 unsigned int paddr = pfn << PAGE_SHIFT;
656
Vineet Gupta5971bc72013-05-16 12:23:31 +0530657 u_vaddr &= PAGE_MASK;
658
659 ___flush_dcache_page(paddr, u_vaddr);
660
661 if (vma->vm_flags & VM_EXEC)
662 __inv_icache_page(paddr, u_vaddr);
Vineet Gupta4102b532013-05-09 21:54:51 +0530663}
664
665void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
666 unsigned long end)
667{
668 flush_cache_all();
669}
670
Vineet Gupta7bb66f62013-05-25 14:04:25 +0530671void flush_anon_page(struct vm_area_struct *vma, struct page *page,
672 unsigned long u_vaddr)
673{
674 /* TBD: do we really need to clear the kernel mapping */
675 __flush_dcache_page(page_address(page), u_vaddr);
676 __flush_dcache_page(page_address(page), page_address(page));
677
678}
679
680#endif
681
Vineet Gupta4102b532013-05-09 21:54:51 +0530682void copy_user_highpage(struct page *to, struct page *from,
683 unsigned long u_vaddr, struct vm_area_struct *vma)
684{
685 void *kfrom = page_address(from);
686 void *kto = page_address(to);
687 int clean_src_k_mappings = 0;
688
689 /*
690 * If SRC page was already mapped in userspace AND it's U-mapping is
691 * not congruent with K-mapping, sync former to physical page so that
692 * K-mapping in memcpy below, sees the right data
693 *
694 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
695 * equally valid for SRC page as well
696 */
697 if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
698 __flush_dcache_page(kfrom, u_vaddr);
699 clean_src_k_mappings = 1;
700 }
701
702 copy_page(kto, kfrom);
703
704 /*
705 * Mark DST page K-mapping as dirty for a later finalization by
706 * update_mmu_cache(). Although the finalization could have been done
707 * here as well (given that both vaddr/paddr are available).
708 * But update_mmu_cache() already has code to do that for other
709 * non copied user pages (e.g. read faults which wire in pagecache page
710 * directly).
711 */
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530712 clear_bit(PG_dc_clean, &to->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +0530713
714 /*
715 * if SRC was already usermapped and non-congruent to kernel mapping
716 * sync the kernel mapping back to physical page
717 */
718 if (clean_src_k_mappings) {
719 __flush_dcache_page(kfrom, kfrom);
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530720 set_bit(PG_dc_clean, &from->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +0530721 } else {
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530722 clear_bit(PG_dc_clean, &from->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +0530723 }
724}
725
726void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
727{
728 clear_page(to);
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530729 clear_bit(PG_dc_clean, &page->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +0530730}
731
Vineet Gupta4102b532013-05-09 21:54:51 +0530732
Vineet Gupta95d69762013-01-18 15:12:19 +0530733/**********************************************************************
734 * Explicit Cache flush request from user space via syscall
735 * Needed for JITs which generate code on the fly
736 */
737SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
738{
739 /* TBD: optimize this */
740 flush_cache_all();
741 return 0;
742}