Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 1 | /* |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 2 | * ARC Cache Management |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 3 | * |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 4 | * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 5 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 10 | */ |
| 11 | |
| 12 | #include <linux/module.h> |
| 13 | #include <linux/mm.h> |
| 14 | #include <linux/sched.h> |
| 15 | #include <linux/cache.h> |
| 16 | #include <linux/mmu_context.h> |
| 17 | #include <linux/syscalls.h> |
| 18 | #include <linux/uaccess.h> |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 19 | #include <linux/pagemap.h> |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 20 | #include <asm/cacheflush.h> |
| 21 | #include <asm/cachectl.h> |
| 22 | #include <asm/setup.h> |
| 23 | |
Vineet Gupta | c3441ed | 2014-02-24 11:42:50 +0800 | [diff] [blame] | 24 | char *arc_cache_mumbojumbo(int c, char *buf, int len) |
Vineet Gupta | af61742 | 2013-01-18 15:12:24 +0530 | [diff] [blame] | 25 | { |
| 26 | int n = 0; |
Vineet Gupta | af61742 | 2013-01-18 15:12:24 +0530 | [diff] [blame] | 27 | |
Vineet Gupta | da40ff4 | 2014-06-27 15:49:47 +0530 | [diff] [blame] | 28 | #define PR_CACHE(p, cfg, str) \ |
Vineet Gupta | af61742 | 2013-01-18 15:12:24 +0530 | [diff] [blame] | 29 | if (!(p)->ver) \ |
| 30 | n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \ |
| 31 | else \ |
| 32 | n += scnprintf(buf + n, len - n, \ |
Vineet Gupta | da40ff4 | 2014-06-27 15:49:47 +0530 | [diff] [blame] | 33 | str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \ |
| 34 | (p)->sz_k, (p)->assoc, (p)->line_len, \ |
| 35 | (p)->vipt ? "VIPT" : "PIPT", \ |
| 36 | (p)->alias ? " aliasing" : "", \ |
| 37 | IS_ENABLED(cfg) ? "" : " (not used)"); |
Vineet Gupta | af61742 | 2013-01-18 15:12:24 +0530 | [diff] [blame] | 38 | |
Vineet Gupta | da40ff4 | 2014-06-27 15:49:47 +0530 | [diff] [blame] | 39 | PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache"); |
| 40 | PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache"); |
Vineet Gupta | af61742 | 2013-01-18 15:12:24 +0530 | [diff] [blame] | 41 | |
| 42 | return buf; |
| 43 | } |
| 44 | |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 45 | /* |
| 46 | * Read the Cache Build Confuration Registers, Decode them and save into |
| 47 | * the cpuinfo structure for later use. |
| 48 | * No Validation done here, simply read/convert the BCRs |
| 49 | */ |
Paul Gortmaker | ce75995 | 2013-06-24 15:30:15 -0400 | [diff] [blame] | 50 | void read_decode_cache_bcr(void) |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 51 | { |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 52 | struct cpuinfo_arc_cache *p_ic, *p_dc; |
| 53 | unsigned int cpu = smp_processor_id(); |
Vineet Gupta | da1677b | 2013-05-14 13:28:17 +0530 | [diff] [blame] | 54 | struct bcr_cache { |
| 55 | #ifdef CONFIG_CPU_BIG_ENDIAN |
| 56 | unsigned int pad:12, line_len:4, sz:4, config:4, ver:8; |
| 57 | #else |
| 58 | unsigned int ver:8, config:4, sz:4, line_len:4, pad:12; |
| 59 | #endif |
| 60 | } ibcr, dbcr; |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 61 | |
| 62 | p_ic = &cpuinfo_arc700[cpu].icache; |
| 63 | READ_BCR(ARC_REG_IC_BCR, ibcr); |
| 64 | |
Vineet Gupta | da40ff4 | 2014-06-27 15:49:47 +0530 | [diff] [blame] | 65 | if (!ibcr.ver) |
| 66 | goto dc_chk; |
| 67 | |
Vineet Gupta | 3049918 | 2013-06-15 10:21:51 +0530 | [diff] [blame] | 68 | BUG_ON(ibcr.config != 3); |
| 69 | p_ic->assoc = 2; /* Fixed to 2w set assoc */ |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 70 | p_ic->line_len = 8 << ibcr.line_len; |
Vineet Gupta | da40ff4 | 2014-06-27 15:49:47 +0530 | [diff] [blame] | 71 | p_ic->sz_k = 1 << (ibcr.sz - 1); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 72 | p_ic->ver = ibcr.ver; |
Vineet Gupta | da40ff4 | 2014-06-27 15:49:47 +0530 | [diff] [blame] | 73 | p_ic->vipt = 1; |
| 74 | p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1; |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 75 | |
Vineet Gupta | da40ff4 | 2014-06-27 15:49:47 +0530 | [diff] [blame] | 76 | dc_chk: |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 77 | p_dc = &cpuinfo_arc700[cpu].dcache; |
| 78 | READ_BCR(ARC_REG_DC_BCR, dbcr); |
| 79 | |
Vineet Gupta | da40ff4 | 2014-06-27 15:49:47 +0530 | [diff] [blame] | 80 | if (!dbcr.ver) |
| 81 | return; |
| 82 | |
Vineet Gupta | 3049918 | 2013-06-15 10:21:51 +0530 | [diff] [blame] | 83 | BUG_ON(dbcr.config != 2); |
| 84 | p_dc->assoc = 4; /* Fixed to 4w set assoc */ |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 85 | p_dc->line_len = 16 << dbcr.line_len; |
Vineet Gupta | da40ff4 | 2014-06-27 15:49:47 +0530 | [diff] [blame] | 86 | p_dc->sz_k = 1 << (dbcr.sz - 1); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 87 | p_dc->ver = dbcr.ver; |
Vineet Gupta | da40ff4 | 2014-06-27 15:49:47 +0530 | [diff] [blame] | 88 | p_dc->vipt = 1; |
| 89 | p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1; |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 90 | } |
| 91 | |
| 92 | /* |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 93 | * Line Operation on {I,D}-Cache |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 94 | */ |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 95 | |
| 96 | #define OP_INV 0x1 |
| 97 | #define OP_FLUSH 0x2 |
| 98 | #define OP_FLUSH_N_INV 0x3 |
Vineet Gupta | bd12976 | 2013-09-05 13:43:03 +0530 | [diff] [blame] | 99 | #define OP_INV_IC 0x4 |
| 100 | |
| 101 | /* |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 102 | * I-Cache Aliasing in ARC700 VIPT caches (MMU v1-v3) |
| 103 | * |
| 104 | * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag. |
| 105 | * The orig Cache Management Module "CDU" only required paddr to invalidate a |
| 106 | * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry. |
| 107 | * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching |
| 108 | * the exact same line. |
| 109 | * |
| 110 | * However for larger Caches (way-size > page-size) - i.e. in Aliasing config, |
| 111 | * paddr alone could not be used to correctly index the cache. |
| 112 | * |
| 113 | * ------------------ |
| 114 | * MMU v1/v2 (Fixed Page Size 8k) |
| 115 | * ------------------ |
| 116 | * The solution was to provide CDU with these additonal vaddr bits. These |
| 117 | * would be bits [x:13], x would depend on cache-geometry, 13 comes from |
| 118 | * standard page size of 8k. |
| 119 | * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits |
| 120 | * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the |
| 121 | * orig 5 bits of paddr were anyways ignored by CDU line ops, as they |
| 122 | * represent the offset within cache-line. The adv of using this "clumsy" |
| 123 | * interface for additional info was no new reg was needed in CDU programming |
| 124 | * model. |
| 125 | * |
| 126 | * 17:13 represented the max num of bits passable, actual bits needed were |
| 127 | * fewer, based on the num-of-aliases possible. |
| 128 | * -for 2 alias possibility, only bit 13 needed (32K cache) |
| 129 | * -for 4 alias possibility, bits 14:13 needed (64K cache) |
| 130 | * |
| 131 | * ------------------ |
| 132 | * MMU v3 |
| 133 | * ------------------ |
| 134 | * This ver of MMU supports variable page sizes (1k-16k): although Linux will |
| 135 | * only support 8k (default), 16k and 4k. |
| 136 | * However from hardware perspective, smaller page sizes aggrevate aliasing |
| 137 | * meaning more vaddr bits needed to disambiguate the cache-line-op ; |
| 138 | * the existing scheme of piggybacking won't work for certain configurations. |
| 139 | * Two new registers IC_PTAG and DC_PTAG inttoduced. |
| 140 | * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs |
Vineet Gupta | bd12976 | 2013-09-05 13:43:03 +0530 | [diff] [blame] | 141 | */ |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 142 | |
Vineet Gupta | bd12976 | 2013-09-05 13:43:03 +0530 | [diff] [blame] | 143 | static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr, |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 144 | unsigned long sz, const int op) |
Vineet Gupta | bd12976 | 2013-09-05 13:43:03 +0530 | [diff] [blame] | 145 | { |
| 146 | unsigned int aux_cmd, aux_tag; |
| 147 | int num_lines; |
Vineet Gupta | d4599ba | 2013-09-05 14:45:51 +0530 | [diff] [blame] | 148 | const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE; |
Vineet Gupta | bd12976 | 2013-09-05 13:43:03 +0530 | [diff] [blame] | 149 | |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 150 | if (op == OP_INV_IC) { |
Vineet Gupta | bd12976 | 2013-09-05 13:43:03 +0530 | [diff] [blame] | 151 | aux_cmd = ARC_REG_IC_IVIL; |
Vineet Gupta | d753863 | 2014-04-06 06:59:51 +0530 | [diff] [blame] | 152 | #if (CONFIG_ARC_MMU_VER > 2) |
Vineet Gupta | bd12976 | 2013-09-05 13:43:03 +0530 | [diff] [blame] | 153 | aux_tag = ARC_REG_IC_PTAG; |
Vineet Gupta | d753863 | 2014-04-06 06:59:51 +0530 | [diff] [blame] | 154 | #endif |
Vineet Gupta | bd12976 | 2013-09-05 13:43:03 +0530 | [diff] [blame] | 155 | } |
| 156 | else { |
| 157 | /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */ |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 158 | aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL; |
Vineet Gupta | d753863 | 2014-04-06 06:59:51 +0530 | [diff] [blame] | 159 | #if (CONFIG_ARC_MMU_VER > 2) |
Vineet Gupta | bd12976 | 2013-09-05 13:43:03 +0530 | [diff] [blame] | 160 | aux_tag = ARC_REG_DC_PTAG; |
Vineet Gupta | d753863 | 2014-04-06 06:59:51 +0530 | [diff] [blame] | 161 | #endif |
Vineet Gupta | bd12976 | 2013-09-05 13:43:03 +0530 | [diff] [blame] | 162 | } |
| 163 | |
| 164 | /* Ensure we properly floor/ceil the non-line aligned/sized requests |
| 165 | * and have @paddr - aligned to cache line and integral @num_lines. |
| 166 | * This however can be avoided for page sized since: |
| 167 | * -@paddr will be cache-line aligned already (being page aligned) |
| 168 | * -@sz will be integral multiple of line size (being page sized). |
| 169 | */ |
Vineet Gupta | d4599ba | 2013-09-05 14:45:51 +0530 | [diff] [blame] | 170 | if (!full_page_op) { |
Vineet Gupta | bd12976 | 2013-09-05 13:43:03 +0530 | [diff] [blame] | 171 | sz += paddr & ~CACHE_LINE_MASK; |
| 172 | paddr &= CACHE_LINE_MASK; |
| 173 | vaddr &= CACHE_LINE_MASK; |
| 174 | } |
| 175 | |
| 176 | num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES); |
| 177 | |
| 178 | #if (CONFIG_ARC_MMU_VER <= 2) |
| 179 | /* MMUv2 and before: paddr contains stuffed vaddrs bits */ |
| 180 | paddr |= (vaddr >> PAGE_SHIFT) & 0x1F; |
Vineet Gupta | d4599ba | 2013-09-05 14:45:51 +0530 | [diff] [blame] | 181 | #else |
| 182 | /* if V-P const for loop, PTAG can be written once outside loop */ |
| 183 | if (full_page_op) |
Vineet Gupta | b053940 | 2014-03-07 13:22:22 +0530 | [diff] [blame] | 184 | write_aux_reg(aux_tag, paddr); |
Vineet Gupta | bd12976 | 2013-09-05 13:43:03 +0530 | [diff] [blame] | 185 | #endif |
| 186 | |
| 187 | while (num_lines-- > 0) { |
| 188 | #if (CONFIG_ARC_MMU_VER > 2) |
| 189 | /* MMUv3, cache ops require paddr seperately */ |
Vineet Gupta | d4599ba | 2013-09-05 14:45:51 +0530 | [diff] [blame] | 190 | if (!full_page_op) { |
| 191 | write_aux_reg(aux_tag, paddr); |
| 192 | paddr += L1_CACHE_BYTES; |
| 193 | } |
Vineet Gupta | bd12976 | 2013-09-05 13:43:03 +0530 | [diff] [blame] | 194 | |
| 195 | write_aux_reg(aux_cmd, vaddr); |
| 196 | vaddr += L1_CACHE_BYTES; |
| 197 | #else |
Vineet Gupta | b053940 | 2014-03-07 13:22:22 +0530 | [diff] [blame] | 198 | write_aux_reg(aux_cmd, paddr); |
Vineet Gupta | bd12976 | 2013-09-05 13:43:03 +0530 | [diff] [blame] | 199 | paddr += L1_CACHE_BYTES; |
Vineet Gupta | d4599ba | 2013-09-05 14:45:51 +0530 | [diff] [blame] | 200 | #endif |
Vineet Gupta | bd12976 | 2013-09-05 13:43:03 +0530 | [diff] [blame] | 201 | } |
| 202 | } |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 203 | |
| 204 | #ifdef CONFIG_ARC_HAS_DCACHE |
| 205 | |
| 206 | /*************************************************************** |
| 207 | * Machine specific helpers for Entire D-Cache or Per Line ops |
| 208 | */ |
| 209 | |
Vineet Gupta | 6c31068 | 2015-06-04 08:53:47 +0530 | [diff] [blame^] | 210 | static inline void __before_dc_op(const int op) |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 211 | { |
Vineet Gupta | 1b1a22b | 2014-06-29 19:03:58 +0530 | [diff] [blame] | 212 | if (op == OP_FLUSH_N_INV) { |
| 213 | /* Dcache provides 2 cmd: FLUSH or INV |
| 214 | * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE |
| 215 | * flush-n-inv is achieved by INV cmd but with IM=1 |
| 216 | * So toggle INV sub-mode depending on op request and default |
| 217 | */ |
Vineet Gupta | 6c31068 | 2015-06-04 08:53:47 +0530 | [diff] [blame^] | 218 | const unsigned int ctl = ARC_REG_DC_CTRL; |
| 219 | write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH); |
Vineet Gupta | 1b1a22b | 2014-06-29 19:03:58 +0530 | [diff] [blame] | 220 | } |
Vineet Gupta | 1b1a22b | 2014-06-29 19:03:58 +0530 | [diff] [blame] | 221 | } |
| 222 | |
Vineet Gupta | 6c31068 | 2015-06-04 08:53:47 +0530 | [diff] [blame^] | 223 | static inline void __after_dc_op(const int op) |
Vineet Gupta | 1b1a22b | 2014-06-29 19:03:58 +0530 | [diff] [blame] | 224 | { |
Vineet Gupta | 6c31068 | 2015-06-04 08:53:47 +0530 | [diff] [blame^] | 225 | if (op & OP_FLUSH) { |
| 226 | const unsigned int ctl = ARC_REG_DC_CTRL; |
| 227 | unsigned int reg; |
Vineet Gupta | 1b1a22b | 2014-06-29 19:03:58 +0530 | [diff] [blame] | 228 | |
Vineet Gupta | 6c31068 | 2015-06-04 08:53:47 +0530 | [diff] [blame^] | 229 | /* flush / flush-n-inv both wait */ |
| 230 | while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS) |
| 231 | ; |
| 232 | |
| 233 | /* Switch back to default Invalidate mode */ |
| 234 | if (op == OP_FLUSH_N_INV) |
| 235 | write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH); |
| 236 | } |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 237 | } |
| 238 | |
| 239 | /* |
| 240 | * Operation on Entire D-Cache |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 241 | * @op = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV} |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 242 | * Note that constant propagation ensures all the checks are gone |
| 243 | * in generated code |
| 244 | */ |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 245 | static inline void __dc_entire_op(const int op) |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 246 | { |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 247 | int aux; |
| 248 | |
Vineet Gupta | 6c31068 | 2015-06-04 08:53:47 +0530 | [diff] [blame^] | 249 | __before_dc_op(op); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 250 | |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 251 | if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */ |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 252 | aux = ARC_REG_DC_IVDC; |
| 253 | else |
| 254 | aux = ARC_REG_DC_FLSH; |
| 255 | |
| 256 | write_aux_reg(aux, 0x1); |
| 257 | |
Vineet Gupta | 6c31068 | 2015-06-04 08:53:47 +0530 | [diff] [blame^] | 258 | __after_dc_op(op); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 259 | } |
| 260 | |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 261 | /* For kernel mappings cache operation: index is same as paddr */ |
Vineet Gupta | 6ec18a8 | 2013-05-09 15:10:18 +0530 | [diff] [blame] | 262 | #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op) |
| 263 | |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 264 | /* |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 265 | * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback) |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 266 | */ |
Vineet Gupta | 6ec18a8 | 2013-05-09 15:10:18 +0530 | [diff] [blame] | 267 | static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr, |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 268 | unsigned long sz, const int op) |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 269 | { |
Vineet Gupta | 1b1a22b | 2014-06-29 19:03:58 +0530 | [diff] [blame] | 270 | unsigned long flags; |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 271 | |
| 272 | local_irq_save(flags); |
| 273 | |
Vineet Gupta | 6c31068 | 2015-06-04 08:53:47 +0530 | [diff] [blame^] | 274 | __before_dc_op(op); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 275 | |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 276 | __cache_line_loop(paddr, vaddr, sz, op); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 277 | |
Vineet Gupta | 6c31068 | 2015-06-04 08:53:47 +0530 | [diff] [blame^] | 278 | __after_dc_op(op); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 279 | |
| 280 | local_irq_restore(flags); |
| 281 | } |
| 282 | |
| 283 | #else |
| 284 | |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 285 | #define __dc_entire_op(op) |
| 286 | #define __dc_line_op(paddr, vaddr, sz, op) |
| 287 | #define __dc_line_op_k(paddr, sz, op) |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 288 | |
| 289 | #endif /* CONFIG_ARC_HAS_DCACHE */ |
| 290 | |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 291 | #ifdef CONFIG_ARC_HAS_ICACHE |
| 292 | |
Vineet Gupta | af5abf1 | 2014-07-09 14:59:47 +0530 | [diff] [blame] | 293 | static inline void __ic_entire_inv(void) |
| 294 | { |
| 295 | write_aux_reg(ARC_REG_IC_IVIC, 1); |
| 296 | read_aux_reg(ARC_REG_IC_CTRL); /* blocks */ |
| 297 | } |
| 298 | |
| 299 | static inline void |
| 300 | __ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr, |
| 301 | unsigned long sz) |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 302 | { |
| 303 | unsigned long flags; |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 304 | |
| 305 | local_irq_save(flags); |
Vineet Gupta | bd12976 | 2013-09-05 13:43:03 +0530 | [diff] [blame] | 306 | __cache_line_loop(paddr, vaddr, sz, OP_INV_IC); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 307 | local_irq_restore(flags); |
| 308 | } |
| 309 | |
Vineet Gupta | af5abf1 | 2014-07-09 14:59:47 +0530 | [diff] [blame] | 310 | #ifndef CONFIG_SMP |
Vineet Gupta | 336e199 | 2013-06-22 19:22:42 +0530 | [diff] [blame] | 311 | |
Vineet Gupta | af5abf1 | 2014-07-09 14:59:47 +0530 | [diff] [blame] | 312 | #define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s) |
| 313 | |
| 314 | #else |
| 315 | |
| 316 | struct ic_inv_args { |
Vineet Gupta | 2328af0 | 2013-02-17 12:51:42 +0200 | [diff] [blame] | 317 | unsigned long paddr, vaddr; |
| 318 | int sz; |
| 319 | }; |
| 320 | |
| 321 | static void __ic_line_inv_vaddr_helper(void *info) |
| 322 | { |
Noam Camus | 014018e | 2014-09-03 14:41:11 +0300 | [diff] [blame] | 323 | struct ic_inv_args *ic_inv = info; |
Vineet Gupta | af5abf1 | 2014-07-09 14:59:47 +0530 | [diff] [blame] | 324 | |
Vineet Gupta | 2328af0 | 2013-02-17 12:51:42 +0200 | [diff] [blame] | 325 | __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz); |
| 326 | } |
| 327 | |
| 328 | static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr, |
| 329 | unsigned long sz) |
| 330 | { |
Vineet Gupta | af5abf1 | 2014-07-09 14:59:47 +0530 | [diff] [blame] | 331 | struct ic_inv_args ic_inv = { |
| 332 | .paddr = paddr, |
| 333 | .vaddr = vaddr, |
| 334 | .sz = sz |
| 335 | }; |
| 336 | |
Vineet Gupta | 2328af0 | 2013-02-17 12:51:42 +0200 | [diff] [blame] | 337 | on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1); |
| 338 | } |
Vineet Gupta | af5abf1 | 2014-07-09 14:59:47 +0530 | [diff] [blame] | 339 | |
| 340 | #endif /* CONFIG_SMP */ |
| 341 | |
| 342 | #else /* !CONFIG_ARC_HAS_ICACHE */ |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 343 | |
Vineet Gupta | 336e199 | 2013-06-22 19:22:42 +0530 | [diff] [blame] | 344 | #define __ic_entire_inv() |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 345 | #define __ic_line_inv_vaddr(pstart, vstart, sz) |
| 346 | |
| 347 | #endif /* CONFIG_ARC_HAS_ICACHE */ |
| 348 | |
| 349 | |
| 350 | /*********************************************************** |
| 351 | * Exported APIs |
| 352 | */ |
| 353 | |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 354 | /* |
| 355 | * Handle cache congruency of kernel and userspace mappings of page when kernel |
| 356 | * writes-to/reads-from |
| 357 | * |
| 358 | * The idea is to defer flushing of kernel mapping after a WRITE, possible if: |
| 359 | * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent |
| 360 | * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache) |
| 361 | * -In SMP, if hardware caches are coherent |
| 362 | * |
| 363 | * There's a corollary case, where kernel READs from a userspace mapped page. |
| 364 | * If the U-mapping is not congruent to to K-mapping, former needs flushing. |
| 365 | */ |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 366 | void flush_dcache_page(struct page *page) |
| 367 | { |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 368 | struct address_space *mapping; |
| 369 | |
| 370 | if (!cache_is_vipt_aliasing()) { |
Vineet Gupta | 2ed21da | 2013-05-13 17:23:58 +0530 | [diff] [blame] | 371 | clear_bit(PG_dc_clean, &page->flags); |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 372 | return; |
| 373 | } |
| 374 | |
| 375 | /* don't handle anon pages here */ |
| 376 | mapping = page_mapping(page); |
| 377 | if (!mapping) |
| 378 | return; |
| 379 | |
| 380 | /* |
| 381 | * pagecache page, file not yet mapped to userspace |
| 382 | * Make a note that K-mapping is dirty |
| 383 | */ |
| 384 | if (!mapping_mapped(mapping)) { |
Vineet Gupta | 2ed21da | 2013-05-13 17:23:58 +0530 | [diff] [blame] | 385 | clear_bit(PG_dc_clean, &page->flags); |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 386 | } else if (page_mapped(page)) { |
| 387 | |
| 388 | /* kernel reading from page with U-mapping */ |
Vineet Gupta | 4530949 | 2015-05-18 12:46:37 +0530 | [diff] [blame] | 389 | unsigned long paddr = (unsigned long)page_address(page); |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 390 | unsigned long vaddr = page->index << PAGE_CACHE_SHIFT; |
| 391 | |
| 392 | if (addr_not_cache_congruent(paddr, vaddr)) |
| 393 | __flush_dcache_page(paddr, vaddr); |
| 394 | } |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 395 | } |
| 396 | EXPORT_SYMBOL(flush_dcache_page); |
| 397 | |
| 398 | |
| 399 | void dma_cache_wback_inv(unsigned long start, unsigned long sz) |
| 400 | { |
Vineet Gupta | 6ec18a8 | 2013-05-09 15:10:18 +0530 | [diff] [blame] | 401 | __dc_line_op_k(start, sz, OP_FLUSH_N_INV); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 402 | } |
| 403 | EXPORT_SYMBOL(dma_cache_wback_inv); |
| 404 | |
| 405 | void dma_cache_inv(unsigned long start, unsigned long sz) |
| 406 | { |
Vineet Gupta | 6ec18a8 | 2013-05-09 15:10:18 +0530 | [diff] [blame] | 407 | __dc_line_op_k(start, sz, OP_INV); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 408 | } |
| 409 | EXPORT_SYMBOL(dma_cache_inv); |
| 410 | |
| 411 | void dma_cache_wback(unsigned long start, unsigned long sz) |
| 412 | { |
Vineet Gupta | 6ec18a8 | 2013-05-09 15:10:18 +0530 | [diff] [blame] | 413 | __dc_line_op_k(start, sz, OP_FLUSH); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 414 | } |
| 415 | EXPORT_SYMBOL(dma_cache_wback); |
| 416 | |
| 417 | /* |
Vineet Gupta | 7586bf72 | 2013-04-12 12:18:25 +0530 | [diff] [blame] | 418 | * This is API for making I/D Caches consistent when modifying |
| 419 | * kernel code (loadable modules, kprobes, kgdb...) |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 420 | * This is called on insmod, with kernel virtual address for CODE of |
| 421 | * the module. ARC cache maintenance ops require PHY address thus we |
| 422 | * need to convert vmalloc addr to PHY addr |
| 423 | */ |
| 424 | void flush_icache_range(unsigned long kstart, unsigned long kend) |
| 425 | { |
Vineet Gupta | c59414c | 2014-09-24 11:36:20 +0530 | [diff] [blame] | 426 | unsigned int tot_sz; |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 427 | |
Vineet Gupta | c59414c | 2014-09-24 11:36:20 +0530 | [diff] [blame] | 428 | WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 429 | |
| 430 | /* Shortcut for bigger flush ranges. |
| 431 | * Here we don't care if this was kernel virtual or phy addr |
| 432 | */ |
| 433 | tot_sz = kend - kstart; |
| 434 | if (tot_sz > PAGE_SIZE) { |
| 435 | flush_cache_all(); |
| 436 | return; |
| 437 | } |
| 438 | |
| 439 | /* Case: Kernel Phy addr (0x8000_0000 onwards) */ |
| 440 | if (likely(kstart > PAGE_OFFSET)) { |
Vineet Gupta | 7586bf72 | 2013-04-12 12:18:25 +0530 | [diff] [blame] | 441 | /* |
| 442 | * The 2nd arg despite being paddr will be used to index icache |
| 443 | * This is OK since no alternate virtual mappings will exist |
| 444 | * given the callers for this case: kprobe/kgdb in built-in |
| 445 | * kernel code only. |
| 446 | */ |
Vineet Gupta | 94bad1a | 2013-04-12 12:20:23 +0530 | [diff] [blame] | 447 | __sync_icache_dcache(kstart, kstart, kend - kstart); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 448 | return; |
| 449 | } |
| 450 | |
| 451 | /* |
| 452 | * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff) |
| 453 | * (1) ARC Cache Maintenance ops only take Phy addr, hence special |
| 454 | * handling of kernel vaddr. |
| 455 | * |
| 456 | * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already), |
| 457 | * it still needs to handle a 2 page scenario, where the range |
| 458 | * straddles across 2 virtual pages and hence need for loop |
| 459 | */ |
| 460 | while (tot_sz > 0) { |
Vineet Gupta | c59414c | 2014-09-24 11:36:20 +0530 | [diff] [blame] | 461 | unsigned int off, sz; |
| 462 | unsigned long phy, pfn; |
| 463 | |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 464 | off = kstart % PAGE_SIZE; |
| 465 | pfn = vmalloc_to_pfn((void *)kstart); |
| 466 | phy = (pfn << PAGE_SHIFT) + off; |
| 467 | sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off); |
Vineet Gupta | 94bad1a | 2013-04-12 12:20:23 +0530 | [diff] [blame] | 468 | __sync_icache_dcache(phy, kstart, sz); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 469 | kstart += sz; |
| 470 | tot_sz -= sz; |
| 471 | } |
| 472 | } |
Pranith Kumar | e356030 | 2014-08-29 15:19:09 -0700 | [diff] [blame] | 473 | EXPORT_SYMBOL(flush_icache_range); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 474 | |
| 475 | /* |
Vineet Gupta | 94bad1a | 2013-04-12 12:20:23 +0530 | [diff] [blame] | 476 | * General purpose helper to make I and D cache lines consistent. |
| 477 | * @paddr is phy addr of region |
Vineet Gupta | 4b06ff3 | 2013-07-10 11:40:27 +0530 | [diff] [blame] | 478 | * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc) |
| 479 | * However in one instance, when called by kprobe (for a breakpt in |
Vineet Gupta | 94bad1a | 2013-04-12 12:20:23 +0530 | [diff] [blame] | 480 | * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will |
| 481 | * use a paddr to index the cache (despite VIPT). This is fine since since a |
Vineet Gupta | 4b06ff3 | 2013-07-10 11:40:27 +0530 | [diff] [blame] | 482 | * builtin kernel page will not have any virtual mappings. |
| 483 | * kprobe on loadable module will be kernel vaddr. |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 484 | */ |
Vineet Gupta | 94bad1a | 2013-04-12 12:20:23 +0530 | [diff] [blame] | 485 | void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len) |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 486 | { |
Vineet Gupta | f538881 | 2013-05-16 12:19:29 +0530 | [diff] [blame] | 487 | __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV); |
Vineet Gupta | 2328af0 | 2013-02-17 12:51:42 +0200 | [diff] [blame] | 488 | __ic_line_inv_vaddr(paddr, vaddr, len); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 489 | } |
| 490 | |
Vineet Gupta | 24603fd | 2013-04-11 18:36:35 +0530 | [diff] [blame] | 491 | /* wrapper to compile time eliminate alignment checks in flush loop */ |
| 492 | void __inv_icache_page(unsigned long paddr, unsigned long vaddr) |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 493 | { |
Vineet Gupta | 24603fd | 2013-04-11 18:36:35 +0530 | [diff] [blame] | 494 | __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 495 | } |
| 496 | |
Vineet Gupta | 6ec18a8 | 2013-05-09 15:10:18 +0530 | [diff] [blame] | 497 | /* |
| 498 | * wrapper to clearout kernel or userspace mappings of a page |
| 499 | * For kernel mappings @vaddr == @paddr |
| 500 | */ |
Vineet Gupta | 4530949 | 2015-05-18 12:46:37 +0530 | [diff] [blame] | 501 | void __flush_dcache_page(unsigned long paddr, unsigned long vaddr) |
Vineet Gupta | eacd0e9 | 2013-04-16 14:10:48 +0530 | [diff] [blame] | 502 | { |
Vineet Gupta | 6ec18a8 | 2013-05-09 15:10:18 +0530 | [diff] [blame] | 503 | __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV); |
Vineet Gupta | eacd0e9 | 2013-04-16 14:10:48 +0530 | [diff] [blame] | 504 | } |
| 505 | |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 506 | noinline void flush_cache_all(void) |
| 507 | { |
| 508 | unsigned long flags; |
| 509 | |
| 510 | local_irq_save(flags); |
| 511 | |
Vineet Gupta | 336e199 | 2013-06-22 19:22:42 +0530 | [diff] [blame] | 512 | __ic_entire_inv(); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 513 | __dc_entire_op(OP_FLUSH_N_INV); |
| 514 | |
| 515 | local_irq_restore(flags); |
| 516 | |
| 517 | } |
| 518 | |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 519 | #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING |
| 520 | |
| 521 | void flush_cache_mm(struct mm_struct *mm) |
| 522 | { |
| 523 | flush_cache_all(); |
| 524 | } |
| 525 | |
| 526 | void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr, |
| 527 | unsigned long pfn) |
| 528 | { |
| 529 | unsigned int paddr = pfn << PAGE_SHIFT; |
| 530 | |
Vineet Gupta | 5971bc7 | 2013-05-16 12:23:31 +0530 | [diff] [blame] | 531 | u_vaddr &= PAGE_MASK; |
| 532 | |
Vineet Gupta | 4530949 | 2015-05-18 12:46:37 +0530 | [diff] [blame] | 533 | __flush_dcache_page(paddr, u_vaddr); |
Vineet Gupta | 5971bc7 | 2013-05-16 12:23:31 +0530 | [diff] [blame] | 534 | |
| 535 | if (vma->vm_flags & VM_EXEC) |
| 536 | __inv_icache_page(paddr, u_vaddr); |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 537 | } |
| 538 | |
| 539 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, |
| 540 | unsigned long end) |
| 541 | { |
| 542 | flush_cache_all(); |
| 543 | } |
| 544 | |
Vineet Gupta | 7bb66f6 | 2013-05-25 14:04:25 +0530 | [diff] [blame] | 545 | void flush_anon_page(struct vm_area_struct *vma, struct page *page, |
| 546 | unsigned long u_vaddr) |
| 547 | { |
| 548 | /* TBD: do we really need to clear the kernel mapping */ |
| 549 | __flush_dcache_page(page_address(page), u_vaddr); |
| 550 | __flush_dcache_page(page_address(page), page_address(page)); |
| 551 | |
| 552 | } |
| 553 | |
| 554 | #endif |
| 555 | |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 556 | void copy_user_highpage(struct page *to, struct page *from, |
| 557 | unsigned long u_vaddr, struct vm_area_struct *vma) |
| 558 | { |
Vineet Gupta | 4530949 | 2015-05-18 12:46:37 +0530 | [diff] [blame] | 559 | unsigned long kfrom = (unsigned long)page_address(from); |
| 560 | unsigned long kto = (unsigned long)page_address(to); |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 561 | int clean_src_k_mappings = 0; |
| 562 | |
| 563 | /* |
| 564 | * If SRC page was already mapped in userspace AND it's U-mapping is |
| 565 | * not congruent with K-mapping, sync former to physical page so that |
| 566 | * K-mapping in memcpy below, sees the right data |
| 567 | * |
| 568 | * Note that while @u_vaddr refers to DST page's userspace vaddr, it is |
| 569 | * equally valid for SRC page as well |
| 570 | */ |
| 571 | if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) { |
| 572 | __flush_dcache_page(kfrom, u_vaddr); |
| 573 | clean_src_k_mappings = 1; |
| 574 | } |
| 575 | |
Vineet Gupta | 4530949 | 2015-05-18 12:46:37 +0530 | [diff] [blame] | 576 | copy_page((void *)kto, (void *)kfrom); |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 577 | |
| 578 | /* |
| 579 | * Mark DST page K-mapping as dirty for a later finalization by |
| 580 | * update_mmu_cache(). Although the finalization could have been done |
| 581 | * here as well (given that both vaddr/paddr are available). |
| 582 | * But update_mmu_cache() already has code to do that for other |
| 583 | * non copied user pages (e.g. read faults which wire in pagecache page |
| 584 | * directly). |
| 585 | */ |
Vineet Gupta | 2ed21da | 2013-05-13 17:23:58 +0530 | [diff] [blame] | 586 | clear_bit(PG_dc_clean, &to->flags); |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 587 | |
| 588 | /* |
| 589 | * if SRC was already usermapped and non-congruent to kernel mapping |
| 590 | * sync the kernel mapping back to physical page |
| 591 | */ |
| 592 | if (clean_src_k_mappings) { |
| 593 | __flush_dcache_page(kfrom, kfrom); |
Vineet Gupta | 2ed21da | 2013-05-13 17:23:58 +0530 | [diff] [blame] | 594 | set_bit(PG_dc_clean, &from->flags); |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 595 | } else { |
Vineet Gupta | 2ed21da | 2013-05-13 17:23:58 +0530 | [diff] [blame] | 596 | clear_bit(PG_dc_clean, &from->flags); |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 597 | } |
| 598 | } |
| 599 | |
| 600 | void clear_user_page(void *to, unsigned long u_vaddr, struct page *page) |
| 601 | { |
| 602 | clear_page(to); |
Vineet Gupta | 2ed21da | 2013-05-13 17:23:58 +0530 | [diff] [blame] | 603 | clear_bit(PG_dc_clean, &page->flags); |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 604 | } |
| 605 | |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 606 | |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 607 | /********************************************************************** |
| 608 | * Explicit Cache flush request from user space via syscall |
| 609 | * Needed for JITs which generate code on the fly |
| 610 | */ |
| 611 | SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags) |
| 612 | { |
| 613 | /* TBD: optimize this */ |
| 614 | flush_cache_all(); |
| 615 | return 0; |
| 616 | } |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 617 | |
| 618 | void arc_cache_init(void) |
| 619 | { |
| 620 | unsigned int __maybe_unused cpu = smp_processor_id(); |
| 621 | char str[256]; |
| 622 | |
| 623 | printk(arc_cache_mumbojumbo(0, str, sizeof(str))); |
| 624 | |
| 625 | if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) { |
| 626 | struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; |
| 627 | |
| 628 | if (!ic->ver) |
| 629 | panic("cache support enabled but non-existent cache\n"); |
| 630 | |
| 631 | if (ic->line_len != L1_CACHE_BYTES) |
| 632 | panic("ICache line [%d] != kernel Config [%d]", |
| 633 | ic->line_len, L1_CACHE_BYTES); |
| 634 | |
| 635 | if (ic->ver != CONFIG_ARC_MMU_VER) |
| 636 | panic("Cache ver [%d] doesn't match MMU ver [%d]\n", |
| 637 | ic->ver, CONFIG_ARC_MMU_VER); |
| 638 | } |
| 639 | |
| 640 | if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) { |
| 641 | struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache; |
| 642 | int handled; |
| 643 | |
| 644 | if (!dc->ver) |
| 645 | panic("cache support enabled but non-existent cache\n"); |
| 646 | |
| 647 | if (dc->line_len != L1_CACHE_BYTES) |
| 648 | panic("DCache line [%d] != kernel Config [%d]", |
| 649 | dc->line_len, L1_CACHE_BYTES); |
| 650 | |
| 651 | /* check for D-Cache aliasing */ |
| 652 | handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING); |
| 653 | |
| 654 | if (dc->alias && !handled) |
| 655 | panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); |
| 656 | else if (!dc->alias && handled) |
| 657 | panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); |
| 658 | } |
| 659 | } |