Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 1 | /* |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 2 | * ARC Cache Management |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 3 | * |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 4 | * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 5 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 10 | */ |
| 11 | |
| 12 | #include <linux/module.h> |
| 13 | #include <linux/mm.h> |
| 14 | #include <linux/sched.h> |
| 15 | #include <linux/cache.h> |
| 16 | #include <linux/mmu_context.h> |
| 17 | #include <linux/syscalls.h> |
| 18 | #include <linux/uaccess.h> |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 19 | #include <linux/pagemap.h> |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 20 | #include <asm/cacheflush.h> |
| 21 | #include <asm/cachectl.h> |
| 22 | #include <asm/setup.h> |
| 23 | |
Vineet Gupta | 795f455 | 2015-04-03 12:37:07 +0300 | [diff] [blame] | 24 | static int l2_line_sz; |
Alexey Brodkin | f2b0b25 | 2015-05-25 19:54:28 +0300 | [diff] [blame] | 25 | int ioc_exists; |
Alexey Brodkin | 1648c70 | 2015-06-09 11:25:22 +0300 | [diff] [blame] | 26 | volatile int slc_enable = 1, ioc_enable = 1; |
Vineet Gupta | 795f455 | 2015-04-03 12:37:07 +0300 | [diff] [blame] | 27 | |
Vineet Gupta | bcc4d65 | 2015-06-04 14:39:15 +0530 | [diff] [blame] | 28 | void (*_cache_line_loop_ic_fn)(unsigned long paddr, unsigned long vaddr, |
| 29 | unsigned long sz, const int cacheop); |
| 30 | |
Alexey Brodkin | f2b0b25 | 2015-05-25 19:54:28 +0300 | [diff] [blame] | 31 | void (*__dma_cache_wback_inv)(unsigned long start, unsigned long sz); |
| 32 | void (*__dma_cache_inv)(unsigned long start, unsigned long sz); |
| 33 | void (*__dma_cache_wback)(unsigned long start, unsigned long sz); |
| 34 | |
Vineet Gupta | c3441ed | 2014-02-24 11:42:50 +0800 | [diff] [blame] | 35 | char *arc_cache_mumbojumbo(int c, char *buf, int len) |
Vineet Gupta | af61742 | 2013-01-18 15:12:24 +0530 | [diff] [blame] | 36 | { |
| 37 | int n = 0; |
Vineet Gupta | d1f317d | 2015-04-06 17:23:57 +0530 | [diff] [blame] | 38 | struct cpuinfo_arc_cache *p; |
Vineet Gupta | af61742 | 2013-01-18 15:12:24 +0530 | [diff] [blame] | 39 | |
Vineet Gupta | 79335a2 | 2015-06-04 18:30:23 +0530 | [diff] [blame] | 40 | #define IS_USED_RUN(v) ((v) ? "" : "(disabled) ") |
Vineet Gupta | da40ff4 | 2014-06-27 15:49:47 +0530 | [diff] [blame] | 41 | #define PR_CACHE(p, cfg, str) \ |
Vineet Gupta | af61742 | 2013-01-18 15:12:24 +0530 | [diff] [blame] | 42 | if (!(p)->ver) \ |
| 43 | n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \ |
| 44 | else \ |
| 45 | n += scnprintf(buf + n, len - n, \ |
Vineet Gupta | da40ff4 | 2014-06-27 15:49:47 +0530 | [diff] [blame] | 46 | str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \ |
| 47 | (p)->sz_k, (p)->assoc, (p)->line_len, \ |
| 48 | (p)->vipt ? "VIPT" : "PIPT", \ |
| 49 | (p)->alias ? " aliasing" : "", \ |
| 50 | IS_ENABLED(cfg) ? "" : " (not used)"); |
Vineet Gupta | af61742 | 2013-01-18 15:12:24 +0530 | [diff] [blame] | 51 | |
Vineet Gupta | da40ff4 | 2014-06-27 15:49:47 +0530 | [diff] [blame] | 52 | PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache"); |
| 53 | PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache"); |
Vineet Gupta | af61742 | 2013-01-18 15:12:24 +0530 | [diff] [blame] | 54 | |
Vineet Gupta | d1f317d | 2015-04-06 17:23:57 +0530 | [diff] [blame] | 55 | p = &cpuinfo_arc700[c].slc; |
| 56 | if (p->ver) |
| 57 | n += scnprintf(buf + n, len - n, |
Vineet Gupta | 79335a2 | 2015-06-04 18:30:23 +0530 | [diff] [blame] | 58 | "SLC\t\t: %uK, %uB Line%s\n", |
| 59 | p->sz_k, p->line_len, IS_USED_RUN(slc_enable)); |
Vineet Gupta | d1f317d | 2015-04-06 17:23:57 +0530 | [diff] [blame] | 60 | |
Alexey Brodkin | f2b0b25 | 2015-05-25 19:54:28 +0300 | [diff] [blame] | 61 | if (ioc_exists) |
Alexey Brodkin | 1648c70 | 2015-06-09 11:25:22 +0300 | [diff] [blame] | 62 | n += scnprintf(buf + n, len - n, "IOC\t\t:%s\n", |
| 63 | IS_USED_RUN(ioc_enable)); |
Alexey Brodkin | f2b0b25 | 2015-05-25 19:54:28 +0300 | [diff] [blame] | 64 | |
Vineet Gupta | af61742 | 2013-01-18 15:12:24 +0530 | [diff] [blame] | 65 | return buf; |
| 66 | } |
| 67 | |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 68 | /* |
| 69 | * Read the Cache Build Confuration Registers, Decode them and save into |
| 70 | * the cpuinfo structure for later use. |
| 71 | * No Validation done here, simply read/convert the BCRs |
| 72 | */ |
Paul Gortmaker | ce75995 | 2013-06-24 15:30:15 -0400 | [diff] [blame] | 73 | void read_decode_cache_bcr(void) |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 74 | { |
Vineet Gupta | d1f317d | 2015-04-06 17:23:57 +0530 | [diff] [blame] | 75 | struct cpuinfo_arc_cache *p_ic, *p_dc, *p_slc; |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 76 | unsigned int cpu = smp_processor_id(); |
Vineet Gupta | da1677b | 2013-05-14 13:28:17 +0530 | [diff] [blame] | 77 | struct bcr_cache { |
| 78 | #ifdef CONFIG_CPU_BIG_ENDIAN |
| 79 | unsigned int pad:12, line_len:4, sz:4, config:4, ver:8; |
| 80 | #else |
| 81 | unsigned int ver:8, config:4, sz:4, line_len:4, pad:12; |
| 82 | #endif |
| 83 | } ibcr, dbcr; |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 84 | |
Vineet Gupta | d1f317d | 2015-04-06 17:23:57 +0530 | [diff] [blame] | 85 | struct bcr_generic sbcr; |
| 86 | |
| 87 | struct bcr_slc_cfg { |
| 88 | #ifdef CONFIG_CPU_BIG_ENDIAN |
| 89 | unsigned int pad:24, way:2, lsz:2, sz:4; |
| 90 | #else |
| 91 | unsigned int sz:4, lsz:2, way:2, pad:24; |
| 92 | #endif |
| 93 | } slc_cfg; |
| 94 | |
Alexey Brodkin | f2b0b25 | 2015-05-25 19:54:28 +0300 | [diff] [blame] | 95 | struct bcr_clust_cfg { |
| 96 | #ifdef CONFIG_CPU_BIG_ENDIAN |
| 97 | unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8; |
| 98 | #else |
| 99 | unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7; |
| 100 | #endif |
| 101 | } cbcr; |
| 102 | |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 103 | p_ic = &cpuinfo_arc700[cpu].icache; |
| 104 | READ_BCR(ARC_REG_IC_BCR, ibcr); |
| 105 | |
Vineet Gupta | da40ff4 | 2014-06-27 15:49:47 +0530 | [diff] [blame] | 106 | if (!ibcr.ver) |
| 107 | goto dc_chk; |
| 108 | |
Vineet Gupta | d1f317d | 2015-04-06 17:23:57 +0530 | [diff] [blame] | 109 | if (ibcr.ver <= 3) { |
| 110 | BUG_ON(ibcr.config != 3); |
| 111 | p_ic->assoc = 2; /* Fixed to 2w set assoc */ |
| 112 | } else if (ibcr.ver >= 4) { |
| 113 | p_ic->assoc = 1 << ibcr.config; /* 1,2,4,8 */ |
| 114 | } |
| 115 | |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 116 | p_ic->line_len = 8 << ibcr.line_len; |
Vineet Gupta | da40ff4 | 2014-06-27 15:49:47 +0530 | [diff] [blame] | 117 | p_ic->sz_k = 1 << (ibcr.sz - 1); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 118 | p_ic->ver = ibcr.ver; |
Vineet Gupta | da40ff4 | 2014-06-27 15:49:47 +0530 | [diff] [blame] | 119 | p_ic->vipt = 1; |
| 120 | p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1; |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 121 | |
Vineet Gupta | da40ff4 | 2014-06-27 15:49:47 +0530 | [diff] [blame] | 122 | dc_chk: |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 123 | p_dc = &cpuinfo_arc700[cpu].dcache; |
| 124 | READ_BCR(ARC_REG_DC_BCR, dbcr); |
| 125 | |
Vineet Gupta | da40ff4 | 2014-06-27 15:49:47 +0530 | [diff] [blame] | 126 | if (!dbcr.ver) |
Vineet Gupta | d1f317d | 2015-04-06 17:23:57 +0530 | [diff] [blame] | 127 | goto slc_chk; |
Vineet Gupta | da40ff4 | 2014-06-27 15:49:47 +0530 | [diff] [blame] | 128 | |
Vineet Gupta | d1f317d | 2015-04-06 17:23:57 +0530 | [diff] [blame] | 129 | if (dbcr.ver <= 3) { |
| 130 | BUG_ON(dbcr.config != 2); |
| 131 | p_dc->assoc = 4; /* Fixed to 4w set assoc */ |
| 132 | p_dc->vipt = 1; |
| 133 | p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1; |
| 134 | } else if (dbcr.ver >= 4) { |
| 135 | p_dc->assoc = 1 << dbcr.config; /* 1,2,4,8 */ |
| 136 | p_dc->vipt = 0; |
| 137 | p_dc->alias = 0; /* PIPT so can't VIPT alias */ |
| 138 | } |
| 139 | |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 140 | p_dc->line_len = 16 << dbcr.line_len; |
Vineet Gupta | da40ff4 | 2014-06-27 15:49:47 +0530 | [diff] [blame] | 141 | p_dc->sz_k = 1 << (dbcr.sz - 1); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 142 | p_dc->ver = dbcr.ver; |
Vineet Gupta | d1f317d | 2015-04-06 17:23:57 +0530 | [diff] [blame] | 143 | |
| 144 | slc_chk: |
Vineet Gupta | 795f455 | 2015-04-03 12:37:07 +0300 | [diff] [blame] | 145 | if (!is_isa_arcv2()) |
| 146 | return; |
| 147 | |
Vineet Gupta | d1f317d | 2015-04-06 17:23:57 +0530 | [diff] [blame] | 148 | p_slc = &cpuinfo_arc700[cpu].slc; |
| 149 | READ_BCR(ARC_REG_SLC_BCR, sbcr); |
| 150 | if (sbcr.ver) { |
| 151 | READ_BCR(ARC_REG_SLC_CFG, slc_cfg); |
| 152 | p_slc->ver = sbcr.ver; |
| 153 | p_slc->sz_k = 128 << slc_cfg.sz; |
Vineet Gupta | 795f455 | 2015-04-03 12:37:07 +0300 | [diff] [blame] | 154 | l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64; |
Vineet Gupta | d1f317d | 2015-04-06 17:23:57 +0530 | [diff] [blame] | 155 | } |
Alexey Brodkin | f2b0b25 | 2015-05-25 19:54:28 +0300 | [diff] [blame] | 156 | |
| 157 | READ_BCR(ARC_REG_CLUSTER_BCR, cbcr); |
Alexey Brodkin | 1648c70 | 2015-06-09 11:25:22 +0300 | [diff] [blame] | 158 | if (cbcr.c && ioc_enable) |
Alexey Brodkin | f2b0b25 | 2015-05-25 19:54:28 +0300 | [diff] [blame] | 159 | ioc_exists = 1; |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 160 | } |
| 161 | |
| 162 | /* |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 163 | * Line Operation on {I,D}-Cache |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 164 | */ |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 165 | |
| 166 | #define OP_INV 0x1 |
| 167 | #define OP_FLUSH 0x2 |
| 168 | #define OP_FLUSH_N_INV 0x3 |
Vineet Gupta | bd12976 | 2013-09-05 13:43:03 +0530 | [diff] [blame] | 169 | #define OP_INV_IC 0x4 |
| 170 | |
| 171 | /* |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 172 | * I-Cache Aliasing in ARC700 VIPT caches (MMU v1-v3) |
| 173 | * |
| 174 | * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag. |
| 175 | * The orig Cache Management Module "CDU" only required paddr to invalidate a |
| 176 | * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry. |
| 177 | * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching |
| 178 | * the exact same line. |
| 179 | * |
| 180 | * However for larger Caches (way-size > page-size) - i.e. in Aliasing config, |
| 181 | * paddr alone could not be used to correctly index the cache. |
| 182 | * |
| 183 | * ------------------ |
| 184 | * MMU v1/v2 (Fixed Page Size 8k) |
| 185 | * ------------------ |
| 186 | * The solution was to provide CDU with these additonal vaddr bits. These |
| 187 | * would be bits [x:13], x would depend on cache-geometry, 13 comes from |
| 188 | * standard page size of 8k. |
| 189 | * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits |
| 190 | * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the |
| 191 | * orig 5 bits of paddr were anyways ignored by CDU line ops, as they |
| 192 | * represent the offset within cache-line. The adv of using this "clumsy" |
| 193 | * interface for additional info was no new reg was needed in CDU programming |
| 194 | * model. |
| 195 | * |
| 196 | * 17:13 represented the max num of bits passable, actual bits needed were |
| 197 | * fewer, based on the num-of-aliases possible. |
| 198 | * -for 2 alias possibility, only bit 13 needed (32K cache) |
| 199 | * -for 4 alias possibility, bits 14:13 needed (64K cache) |
| 200 | * |
| 201 | * ------------------ |
| 202 | * MMU v3 |
| 203 | * ------------------ |
| 204 | * This ver of MMU supports variable page sizes (1k-16k): although Linux will |
| 205 | * only support 8k (default), 16k and 4k. |
| 206 | * However from hardware perspective, smaller page sizes aggrevate aliasing |
| 207 | * meaning more vaddr bits needed to disambiguate the cache-line-op ; |
| 208 | * the existing scheme of piggybacking won't work for certain configurations. |
| 209 | * Two new registers IC_PTAG and DC_PTAG inttoduced. |
| 210 | * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs |
Vineet Gupta | bd12976 | 2013-09-05 13:43:03 +0530 | [diff] [blame] | 211 | */ |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 212 | |
Vineet Gupta | 11e1489 | 2014-08-04 08:32:31 -0700 | [diff] [blame] | 213 | static inline |
| 214 | void __cache_line_loop_v2(unsigned long paddr, unsigned long vaddr, |
| 215 | unsigned long sz, const int op) |
Vineet Gupta | bd12976 | 2013-09-05 13:43:03 +0530 | [diff] [blame] | 216 | { |
Vineet Gupta | 11e1489 | 2014-08-04 08:32:31 -0700 | [diff] [blame] | 217 | unsigned int aux_cmd; |
Vineet Gupta | bd12976 | 2013-09-05 13:43:03 +0530 | [diff] [blame] | 218 | int num_lines; |
Vineet Gupta | 11e1489 | 2014-08-04 08:32:31 -0700 | [diff] [blame] | 219 | const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE; |
Vineet Gupta | bd12976 | 2013-09-05 13:43:03 +0530 | [diff] [blame] | 220 | |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 221 | if (op == OP_INV_IC) { |
Vineet Gupta | bd12976 | 2013-09-05 13:43:03 +0530 | [diff] [blame] | 222 | aux_cmd = ARC_REG_IC_IVIL; |
Vineet Gupta | 11e1489 | 2014-08-04 08:32:31 -0700 | [diff] [blame] | 223 | } else { |
Vineet Gupta | bd12976 | 2013-09-05 13:43:03 +0530 | [diff] [blame] | 224 | /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */ |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 225 | aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL; |
Vineet Gupta | bd12976 | 2013-09-05 13:43:03 +0530 | [diff] [blame] | 226 | } |
| 227 | |
| 228 | /* Ensure we properly floor/ceil the non-line aligned/sized requests |
| 229 | * and have @paddr - aligned to cache line and integral @num_lines. |
| 230 | * This however can be avoided for page sized since: |
| 231 | * -@paddr will be cache-line aligned already (being page aligned) |
| 232 | * -@sz will be integral multiple of line size (being page sized). |
| 233 | */ |
Vineet Gupta | 11e1489 | 2014-08-04 08:32:31 -0700 | [diff] [blame] | 234 | if (!full_page) { |
Vineet Gupta | bd12976 | 2013-09-05 13:43:03 +0530 | [diff] [blame] | 235 | sz += paddr & ~CACHE_LINE_MASK; |
| 236 | paddr &= CACHE_LINE_MASK; |
| 237 | vaddr &= CACHE_LINE_MASK; |
| 238 | } |
| 239 | |
| 240 | num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES); |
| 241 | |
Vineet Gupta | bd12976 | 2013-09-05 13:43:03 +0530 | [diff] [blame] | 242 | /* MMUv2 and before: paddr contains stuffed vaddrs bits */ |
| 243 | paddr |= (vaddr >> PAGE_SHIFT) & 0x1F; |
Vineet Gupta | bd12976 | 2013-09-05 13:43:03 +0530 | [diff] [blame] | 244 | |
| 245 | while (num_lines-- > 0) { |
Vineet Gupta | 11e1489 | 2014-08-04 08:32:31 -0700 | [diff] [blame] | 246 | write_aux_reg(aux_cmd, paddr); |
| 247 | paddr += L1_CACHE_BYTES; |
| 248 | } |
| 249 | } |
| 250 | |
| 251 | static inline |
| 252 | void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr, |
| 253 | unsigned long sz, const int op) |
| 254 | { |
| 255 | unsigned int aux_cmd, aux_tag; |
| 256 | int num_lines; |
| 257 | const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE; |
| 258 | |
| 259 | if (op == OP_INV_IC) { |
| 260 | aux_cmd = ARC_REG_IC_IVIL; |
| 261 | aux_tag = ARC_REG_IC_PTAG; |
| 262 | } else { |
| 263 | aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL; |
| 264 | aux_tag = ARC_REG_DC_PTAG; |
| 265 | } |
| 266 | |
| 267 | /* Ensure we properly floor/ceil the non-line aligned/sized requests |
| 268 | * and have @paddr - aligned to cache line and integral @num_lines. |
| 269 | * This however can be avoided for page sized since: |
| 270 | * -@paddr will be cache-line aligned already (being page aligned) |
| 271 | * -@sz will be integral multiple of line size (being page sized). |
| 272 | */ |
| 273 | if (!full_page) { |
| 274 | sz += paddr & ~CACHE_LINE_MASK; |
| 275 | paddr &= CACHE_LINE_MASK; |
| 276 | vaddr &= CACHE_LINE_MASK; |
| 277 | } |
| 278 | num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES); |
| 279 | |
| 280 | /* |
| 281 | * MMUv3, cache ops require paddr in PTAG reg |
| 282 | * if V-P const for loop, PTAG can be written once outside loop |
| 283 | */ |
| 284 | if (full_page) |
| 285 | write_aux_reg(aux_tag, paddr); |
| 286 | |
| 287 | while (num_lines-- > 0) { |
| 288 | if (!full_page) { |
Vineet Gupta | d4599ba | 2013-09-05 14:45:51 +0530 | [diff] [blame] | 289 | write_aux_reg(aux_tag, paddr); |
| 290 | paddr += L1_CACHE_BYTES; |
| 291 | } |
Vineet Gupta | bd12976 | 2013-09-05 13:43:03 +0530 | [diff] [blame] | 292 | |
| 293 | write_aux_reg(aux_cmd, vaddr); |
| 294 | vaddr += L1_CACHE_BYTES; |
Vineet Gupta | bd12976 | 2013-09-05 13:43:03 +0530 | [diff] [blame] | 295 | } |
| 296 | } |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 297 | |
Vineet Gupta | d1f317d | 2015-04-06 17:23:57 +0530 | [diff] [blame] | 298 | /* |
| 299 | * In HS38x (MMU v4), although icache is VIPT, only paddr is needed for cache |
| 300 | * maintenance ops (in IVIL reg), as long as icache doesn't alias. |
| 301 | * |
| 302 | * For Aliasing icache, vaddr is also needed (in IVIL), while paddr is |
| 303 | * specified in PTAG (similar to MMU v3) |
| 304 | */ |
| 305 | static inline |
| 306 | void __cache_line_loop_v4(unsigned long paddr, unsigned long vaddr, |
| 307 | unsigned long sz, const int cacheop) |
| 308 | { |
| 309 | unsigned int aux_cmd; |
| 310 | int num_lines; |
| 311 | const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE; |
| 312 | |
| 313 | if (cacheop == OP_INV_IC) { |
| 314 | aux_cmd = ARC_REG_IC_IVIL; |
| 315 | } else { |
| 316 | /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */ |
| 317 | aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL; |
| 318 | } |
| 319 | |
| 320 | /* Ensure we properly floor/ceil the non-line aligned/sized requests |
| 321 | * and have @paddr - aligned to cache line and integral @num_lines. |
| 322 | * This however can be avoided for page sized since: |
| 323 | * -@paddr will be cache-line aligned already (being page aligned) |
| 324 | * -@sz will be integral multiple of line size (being page sized). |
| 325 | */ |
| 326 | if (!full_page_op) { |
| 327 | sz += paddr & ~CACHE_LINE_MASK; |
| 328 | paddr &= CACHE_LINE_MASK; |
| 329 | } |
| 330 | |
| 331 | num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES); |
| 332 | |
| 333 | while (num_lines-- > 0) { |
| 334 | write_aux_reg(aux_cmd, paddr); |
| 335 | paddr += L1_CACHE_BYTES; |
| 336 | } |
| 337 | } |
| 338 | |
Vineet Gupta | 11e1489 | 2014-08-04 08:32:31 -0700 | [diff] [blame] | 339 | #if (CONFIG_ARC_MMU_VER < 3) |
| 340 | #define __cache_line_loop __cache_line_loop_v2 |
| 341 | #elif (CONFIG_ARC_MMU_VER == 3) |
| 342 | #define __cache_line_loop __cache_line_loop_v3 |
Vineet Gupta | d1f317d | 2015-04-06 17:23:57 +0530 | [diff] [blame] | 343 | #elif (CONFIG_ARC_MMU_VER > 3) |
| 344 | #define __cache_line_loop __cache_line_loop_v4 |
Vineet Gupta | 11e1489 | 2014-08-04 08:32:31 -0700 | [diff] [blame] | 345 | #endif |
| 346 | |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 347 | #ifdef CONFIG_ARC_HAS_DCACHE |
| 348 | |
| 349 | /*************************************************************** |
| 350 | * Machine specific helpers for Entire D-Cache or Per Line ops |
| 351 | */ |
| 352 | |
Vineet Gupta | 6c31068 | 2015-06-04 08:53:47 +0530 | [diff] [blame] | 353 | static inline void __before_dc_op(const int op) |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 354 | { |
Vineet Gupta | 1b1a22b | 2014-06-29 19:03:58 +0530 | [diff] [blame] | 355 | if (op == OP_FLUSH_N_INV) { |
| 356 | /* Dcache provides 2 cmd: FLUSH or INV |
| 357 | * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE |
| 358 | * flush-n-inv is achieved by INV cmd but with IM=1 |
| 359 | * So toggle INV sub-mode depending on op request and default |
| 360 | */ |
Vineet Gupta | 6c31068 | 2015-06-04 08:53:47 +0530 | [diff] [blame] | 361 | const unsigned int ctl = ARC_REG_DC_CTRL; |
| 362 | write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH); |
Vineet Gupta | 1b1a22b | 2014-06-29 19:03:58 +0530 | [diff] [blame] | 363 | } |
Vineet Gupta | 1b1a22b | 2014-06-29 19:03:58 +0530 | [diff] [blame] | 364 | } |
| 365 | |
Vineet Gupta | 6c31068 | 2015-06-04 08:53:47 +0530 | [diff] [blame] | 366 | static inline void __after_dc_op(const int op) |
Vineet Gupta | 1b1a22b | 2014-06-29 19:03:58 +0530 | [diff] [blame] | 367 | { |
Vineet Gupta | 6c31068 | 2015-06-04 08:53:47 +0530 | [diff] [blame] | 368 | if (op & OP_FLUSH) { |
| 369 | const unsigned int ctl = ARC_REG_DC_CTRL; |
| 370 | unsigned int reg; |
Vineet Gupta | 1b1a22b | 2014-06-29 19:03:58 +0530 | [diff] [blame] | 371 | |
Vineet Gupta | 6c31068 | 2015-06-04 08:53:47 +0530 | [diff] [blame] | 372 | /* flush / flush-n-inv both wait */ |
| 373 | while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS) |
| 374 | ; |
| 375 | |
| 376 | /* Switch back to default Invalidate mode */ |
| 377 | if (op == OP_FLUSH_N_INV) |
| 378 | write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH); |
| 379 | } |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 380 | } |
| 381 | |
| 382 | /* |
| 383 | * Operation on Entire D-Cache |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 384 | * @op = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV} |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 385 | * Note that constant propagation ensures all the checks are gone |
| 386 | * in generated code |
| 387 | */ |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 388 | static inline void __dc_entire_op(const int op) |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 389 | { |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 390 | int aux; |
| 391 | |
Vineet Gupta | 6c31068 | 2015-06-04 08:53:47 +0530 | [diff] [blame] | 392 | __before_dc_op(op); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 393 | |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 394 | if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */ |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 395 | aux = ARC_REG_DC_IVDC; |
| 396 | else |
| 397 | aux = ARC_REG_DC_FLSH; |
| 398 | |
| 399 | write_aux_reg(aux, 0x1); |
| 400 | |
Vineet Gupta | 6c31068 | 2015-06-04 08:53:47 +0530 | [diff] [blame] | 401 | __after_dc_op(op); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 402 | } |
| 403 | |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 404 | /* For kernel mappings cache operation: index is same as paddr */ |
Vineet Gupta | 6ec18a8 | 2013-05-09 15:10:18 +0530 | [diff] [blame] | 405 | #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op) |
| 406 | |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 407 | /* |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 408 | * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback) |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 409 | */ |
Vineet Gupta | 6ec18a8 | 2013-05-09 15:10:18 +0530 | [diff] [blame] | 410 | static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr, |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 411 | unsigned long sz, const int op) |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 412 | { |
Vineet Gupta | 1b1a22b | 2014-06-29 19:03:58 +0530 | [diff] [blame] | 413 | unsigned long flags; |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 414 | |
| 415 | local_irq_save(flags); |
| 416 | |
Vineet Gupta | 6c31068 | 2015-06-04 08:53:47 +0530 | [diff] [blame] | 417 | __before_dc_op(op); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 418 | |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 419 | __cache_line_loop(paddr, vaddr, sz, op); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 420 | |
Vineet Gupta | 6c31068 | 2015-06-04 08:53:47 +0530 | [diff] [blame] | 421 | __after_dc_op(op); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 422 | |
| 423 | local_irq_restore(flags); |
| 424 | } |
| 425 | |
| 426 | #else |
| 427 | |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 428 | #define __dc_entire_op(op) |
| 429 | #define __dc_line_op(paddr, vaddr, sz, op) |
| 430 | #define __dc_line_op_k(paddr, sz, op) |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 431 | |
| 432 | #endif /* CONFIG_ARC_HAS_DCACHE */ |
| 433 | |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 434 | #ifdef CONFIG_ARC_HAS_ICACHE |
| 435 | |
Vineet Gupta | af5abf1 | 2014-07-09 14:59:47 +0530 | [diff] [blame] | 436 | static inline void __ic_entire_inv(void) |
| 437 | { |
| 438 | write_aux_reg(ARC_REG_IC_IVIC, 1); |
| 439 | read_aux_reg(ARC_REG_IC_CTRL); /* blocks */ |
| 440 | } |
| 441 | |
| 442 | static inline void |
| 443 | __ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr, |
| 444 | unsigned long sz) |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 445 | { |
| 446 | unsigned long flags; |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 447 | |
| 448 | local_irq_save(flags); |
Vineet Gupta | bcc4d65 | 2015-06-04 14:39:15 +0530 | [diff] [blame] | 449 | (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 450 | local_irq_restore(flags); |
| 451 | } |
| 452 | |
Vineet Gupta | af5abf1 | 2014-07-09 14:59:47 +0530 | [diff] [blame] | 453 | #ifndef CONFIG_SMP |
Vineet Gupta | 336e199 | 2013-06-22 19:22:42 +0530 | [diff] [blame] | 454 | |
Vineet Gupta | af5abf1 | 2014-07-09 14:59:47 +0530 | [diff] [blame] | 455 | #define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s) |
| 456 | |
| 457 | #else |
| 458 | |
| 459 | struct ic_inv_args { |
Vineet Gupta | 2328af0 | 2013-02-17 12:51:42 +0200 | [diff] [blame] | 460 | unsigned long paddr, vaddr; |
| 461 | int sz; |
| 462 | }; |
| 463 | |
| 464 | static void __ic_line_inv_vaddr_helper(void *info) |
| 465 | { |
Noam Camus | 014018e | 2014-09-03 14:41:11 +0300 | [diff] [blame] | 466 | struct ic_inv_args *ic_inv = info; |
Vineet Gupta | af5abf1 | 2014-07-09 14:59:47 +0530 | [diff] [blame] | 467 | |
Vineet Gupta | 2328af0 | 2013-02-17 12:51:42 +0200 | [diff] [blame] | 468 | __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz); |
| 469 | } |
| 470 | |
| 471 | static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr, |
| 472 | unsigned long sz) |
| 473 | { |
Vineet Gupta | af5abf1 | 2014-07-09 14:59:47 +0530 | [diff] [blame] | 474 | struct ic_inv_args ic_inv = { |
| 475 | .paddr = paddr, |
| 476 | .vaddr = vaddr, |
| 477 | .sz = sz |
| 478 | }; |
| 479 | |
Vineet Gupta | 2328af0 | 2013-02-17 12:51:42 +0200 | [diff] [blame] | 480 | on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1); |
| 481 | } |
Vineet Gupta | af5abf1 | 2014-07-09 14:59:47 +0530 | [diff] [blame] | 482 | |
| 483 | #endif /* CONFIG_SMP */ |
| 484 | |
| 485 | #else /* !CONFIG_ARC_HAS_ICACHE */ |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 486 | |
Vineet Gupta | 336e199 | 2013-06-22 19:22:42 +0530 | [diff] [blame] | 487 | #define __ic_entire_inv() |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 488 | #define __ic_line_inv_vaddr(pstart, vstart, sz) |
| 489 | |
| 490 | #endif /* CONFIG_ARC_HAS_ICACHE */ |
| 491 | |
Vineet Gupta | 795f455 | 2015-04-03 12:37:07 +0300 | [diff] [blame] | 492 | noinline void slc_op(unsigned long paddr, unsigned long sz, const int op) |
| 493 | { |
| 494 | #ifdef CONFIG_ISA_ARCV2 |
Alexey Brodkin | b607edd | 2015-06-29 15:24:37 +0300 | [diff] [blame] | 495 | /* |
| 496 | * SLC is shared between all cores and concurrent aux operations from |
| 497 | * multiple cores need to be serialized using a spinlock |
| 498 | * A concurrent operation can be silently ignored and/or the old/new |
| 499 | * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop |
| 500 | * below) |
| 501 | */ |
| 502 | static DEFINE_SPINLOCK(lock); |
Vineet Gupta | 795f455 | 2015-04-03 12:37:07 +0300 | [diff] [blame] | 503 | unsigned long flags; |
| 504 | unsigned int ctrl; |
| 505 | |
Alexey Brodkin | b607edd | 2015-06-29 15:24:37 +0300 | [diff] [blame] | 506 | spin_lock_irqsave(&lock, flags); |
Vineet Gupta | 795f455 | 2015-04-03 12:37:07 +0300 | [diff] [blame] | 507 | |
| 508 | /* |
| 509 | * The Region Flush operation is specified by CTRL.RGN_OP[11..9] |
| 510 | * - b'000 (default) is Flush, |
| 511 | * - b'001 is Invalidate if CTRL.IM == 0 |
| 512 | * - b'001 is Flush-n-Invalidate if CTRL.IM == 1 |
| 513 | */ |
| 514 | ctrl = read_aux_reg(ARC_REG_SLC_CTRL); |
| 515 | |
| 516 | /* Don't rely on default value of IM bit */ |
| 517 | if (!(op & OP_FLUSH)) /* i.e. OP_INV */ |
| 518 | ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */ |
| 519 | else |
| 520 | ctrl |= SLC_CTRL_IM; |
| 521 | |
| 522 | if (op & OP_INV) |
| 523 | ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */ |
| 524 | else |
| 525 | ctrl &= ~SLC_CTRL_RGN_OP_INV; |
| 526 | |
| 527 | write_aux_reg(ARC_REG_SLC_CTRL, ctrl); |
| 528 | |
| 529 | /* |
| 530 | * Lower bits are ignored, no need to clip |
| 531 | * END needs to be setup before START (latter triggers the operation) |
| 532 | * END can't be same as START, so add (l2_line_sz - 1) to sz |
| 533 | */ |
| 534 | write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1)); |
| 535 | write_aux_reg(ARC_REG_SLC_RGN_START, paddr); |
| 536 | |
| 537 | while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY); |
| 538 | |
Alexey Brodkin | b607edd | 2015-06-29 15:24:37 +0300 | [diff] [blame] | 539 | spin_unlock_irqrestore(&lock, flags); |
Vineet Gupta | 795f455 | 2015-04-03 12:37:07 +0300 | [diff] [blame] | 540 | #endif |
| 541 | } |
| 542 | |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 543 | /*********************************************************** |
| 544 | * Exported APIs |
| 545 | */ |
| 546 | |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 547 | /* |
| 548 | * Handle cache congruency of kernel and userspace mappings of page when kernel |
| 549 | * writes-to/reads-from |
| 550 | * |
| 551 | * The idea is to defer flushing of kernel mapping after a WRITE, possible if: |
| 552 | * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent |
| 553 | * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache) |
| 554 | * -In SMP, if hardware caches are coherent |
| 555 | * |
| 556 | * There's a corollary case, where kernel READs from a userspace mapped page. |
| 557 | * If the U-mapping is not congruent to to K-mapping, former needs flushing. |
| 558 | */ |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 559 | void flush_dcache_page(struct page *page) |
| 560 | { |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 561 | struct address_space *mapping; |
| 562 | |
| 563 | if (!cache_is_vipt_aliasing()) { |
Vineet Gupta | 2ed21da | 2013-05-13 17:23:58 +0530 | [diff] [blame] | 564 | clear_bit(PG_dc_clean, &page->flags); |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 565 | return; |
| 566 | } |
| 567 | |
| 568 | /* don't handle anon pages here */ |
| 569 | mapping = page_mapping(page); |
| 570 | if (!mapping) |
| 571 | return; |
| 572 | |
| 573 | /* |
| 574 | * pagecache page, file not yet mapped to userspace |
| 575 | * Make a note that K-mapping is dirty |
| 576 | */ |
| 577 | if (!mapping_mapped(mapping)) { |
Vineet Gupta | 2ed21da | 2013-05-13 17:23:58 +0530 | [diff] [blame] | 578 | clear_bit(PG_dc_clean, &page->flags); |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 579 | } else if (page_mapped(page)) { |
| 580 | |
| 581 | /* kernel reading from page with U-mapping */ |
Vineet Gupta | 4530949 | 2015-05-18 12:46:37 +0530 | [diff] [blame] | 582 | unsigned long paddr = (unsigned long)page_address(page); |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 583 | unsigned long vaddr = page->index << PAGE_CACHE_SHIFT; |
| 584 | |
| 585 | if (addr_not_cache_congruent(paddr, vaddr)) |
| 586 | __flush_dcache_page(paddr, vaddr); |
| 587 | } |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 588 | } |
| 589 | EXPORT_SYMBOL(flush_dcache_page); |
| 590 | |
Alexey Brodkin | f2b0b25 | 2015-05-25 19:54:28 +0300 | [diff] [blame] | 591 | /* |
| 592 | * DMA ops for systems with L1 cache only |
| 593 | * Make memory coherent with L1 cache by flushing/invalidating L1 lines |
| 594 | */ |
| 595 | static void __dma_cache_wback_inv_l1(unsigned long start, unsigned long sz) |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 596 | { |
Vineet Gupta | 6ec18a8 | 2013-05-09 15:10:18 +0530 | [diff] [blame] | 597 | __dc_line_op_k(start, sz, OP_FLUSH_N_INV); |
Alexey Brodkin | f2b0b25 | 2015-05-25 19:54:28 +0300 | [diff] [blame] | 598 | } |
Vineet Gupta | 795f455 | 2015-04-03 12:37:07 +0300 | [diff] [blame] | 599 | |
Alexey Brodkin | f2b0b25 | 2015-05-25 19:54:28 +0300 | [diff] [blame] | 600 | static void __dma_cache_inv_l1(unsigned long start, unsigned long sz) |
| 601 | { |
| 602 | __dc_line_op_k(start, sz, OP_INV); |
| 603 | } |
| 604 | |
| 605 | static void __dma_cache_wback_l1(unsigned long start, unsigned long sz) |
| 606 | { |
| 607 | __dc_line_op_k(start, sz, OP_FLUSH); |
| 608 | } |
| 609 | |
| 610 | /* |
| 611 | * DMA ops for systems with both L1 and L2 caches, but without IOC |
| 612 | * Both L1 and L2 lines need to be explicity flushed/invalidated |
| 613 | */ |
| 614 | static void __dma_cache_wback_inv_slc(unsigned long start, unsigned long sz) |
| 615 | { |
| 616 | __dc_line_op_k(start, sz, OP_FLUSH_N_INV); |
| 617 | slc_op(start, sz, OP_FLUSH_N_INV); |
| 618 | } |
| 619 | |
| 620 | static void __dma_cache_inv_slc(unsigned long start, unsigned long sz) |
| 621 | { |
| 622 | __dc_line_op_k(start, sz, OP_INV); |
| 623 | slc_op(start, sz, OP_INV); |
| 624 | } |
| 625 | |
| 626 | static void __dma_cache_wback_slc(unsigned long start, unsigned long sz) |
| 627 | { |
| 628 | __dc_line_op_k(start, sz, OP_FLUSH); |
| 629 | slc_op(start, sz, OP_FLUSH); |
| 630 | } |
| 631 | |
| 632 | /* |
| 633 | * DMA ops for systems with IOC |
| 634 | * IOC hardware snoops all DMA traffic keeping the caches consistent with |
| 635 | * memory - eliding need for any explicit cache maintenance of DMA buffers |
| 636 | */ |
| 637 | static void __dma_cache_wback_inv_ioc(unsigned long start, unsigned long sz) {} |
| 638 | static void __dma_cache_inv_ioc(unsigned long start, unsigned long sz) {} |
| 639 | static void __dma_cache_wback_ioc(unsigned long start, unsigned long sz) {} |
| 640 | |
| 641 | /* |
| 642 | * Exported DMA API |
| 643 | */ |
| 644 | void dma_cache_wback_inv(unsigned long start, unsigned long sz) |
| 645 | { |
| 646 | __dma_cache_wback_inv(start, sz); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 647 | } |
| 648 | EXPORT_SYMBOL(dma_cache_wback_inv); |
| 649 | |
| 650 | void dma_cache_inv(unsigned long start, unsigned long sz) |
| 651 | { |
Alexey Brodkin | f2b0b25 | 2015-05-25 19:54:28 +0300 | [diff] [blame] | 652 | __dma_cache_inv(start, sz); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 653 | } |
| 654 | EXPORT_SYMBOL(dma_cache_inv); |
| 655 | |
| 656 | void dma_cache_wback(unsigned long start, unsigned long sz) |
| 657 | { |
Alexey Brodkin | f2b0b25 | 2015-05-25 19:54:28 +0300 | [diff] [blame] | 658 | __dma_cache_wback(start, sz); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 659 | } |
| 660 | EXPORT_SYMBOL(dma_cache_wback); |
| 661 | |
| 662 | /* |
Vineet Gupta | 7586bf72 | 2013-04-12 12:18:25 +0530 | [diff] [blame] | 663 | * This is API for making I/D Caches consistent when modifying |
| 664 | * kernel code (loadable modules, kprobes, kgdb...) |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 665 | * This is called on insmod, with kernel virtual address for CODE of |
| 666 | * the module. ARC cache maintenance ops require PHY address thus we |
| 667 | * need to convert vmalloc addr to PHY addr |
| 668 | */ |
| 669 | void flush_icache_range(unsigned long kstart, unsigned long kend) |
| 670 | { |
Vineet Gupta | c59414c | 2014-09-24 11:36:20 +0530 | [diff] [blame] | 671 | unsigned int tot_sz; |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 672 | |
Vineet Gupta | c59414c | 2014-09-24 11:36:20 +0530 | [diff] [blame] | 673 | WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 674 | |
| 675 | /* Shortcut for bigger flush ranges. |
| 676 | * Here we don't care if this was kernel virtual or phy addr |
| 677 | */ |
| 678 | tot_sz = kend - kstart; |
| 679 | if (tot_sz > PAGE_SIZE) { |
| 680 | flush_cache_all(); |
| 681 | return; |
| 682 | } |
| 683 | |
| 684 | /* Case: Kernel Phy addr (0x8000_0000 onwards) */ |
| 685 | if (likely(kstart > PAGE_OFFSET)) { |
Vineet Gupta | 7586bf72 | 2013-04-12 12:18:25 +0530 | [diff] [blame] | 686 | /* |
| 687 | * The 2nd arg despite being paddr will be used to index icache |
| 688 | * This is OK since no alternate virtual mappings will exist |
| 689 | * given the callers for this case: kprobe/kgdb in built-in |
| 690 | * kernel code only. |
| 691 | */ |
Vineet Gupta | 94bad1a | 2013-04-12 12:20:23 +0530 | [diff] [blame] | 692 | __sync_icache_dcache(kstart, kstart, kend - kstart); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 693 | return; |
| 694 | } |
| 695 | |
| 696 | /* |
| 697 | * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff) |
| 698 | * (1) ARC Cache Maintenance ops only take Phy addr, hence special |
| 699 | * handling of kernel vaddr. |
| 700 | * |
| 701 | * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already), |
| 702 | * it still needs to handle a 2 page scenario, where the range |
| 703 | * straddles across 2 virtual pages and hence need for loop |
| 704 | */ |
| 705 | while (tot_sz > 0) { |
Vineet Gupta | c59414c | 2014-09-24 11:36:20 +0530 | [diff] [blame] | 706 | unsigned int off, sz; |
| 707 | unsigned long phy, pfn; |
| 708 | |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 709 | off = kstart % PAGE_SIZE; |
| 710 | pfn = vmalloc_to_pfn((void *)kstart); |
| 711 | phy = (pfn << PAGE_SHIFT) + off; |
| 712 | sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off); |
Vineet Gupta | 94bad1a | 2013-04-12 12:20:23 +0530 | [diff] [blame] | 713 | __sync_icache_dcache(phy, kstart, sz); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 714 | kstart += sz; |
| 715 | tot_sz -= sz; |
| 716 | } |
| 717 | } |
Pranith Kumar | e356030 | 2014-08-29 15:19:09 -0700 | [diff] [blame] | 718 | EXPORT_SYMBOL(flush_icache_range); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 719 | |
| 720 | /* |
Vineet Gupta | 94bad1a | 2013-04-12 12:20:23 +0530 | [diff] [blame] | 721 | * General purpose helper to make I and D cache lines consistent. |
| 722 | * @paddr is phy addr of region |
Vineet Gupta | 4b06ff3 | 2013-07-10 11:40:27 +0530 | [diff] [blame] | 723 | * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc) |
| 724 | * However in one instance, when called by kprobe (for a breakpt in |
Vineet Gupta | 94bad1a | 2013-04-12 12:20:23 +0530 | [diff] [blame] | 725 | * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will |
| 726 | * use a paddr to index the cache (despite VIPT). This is fine since since a |
Vineet Gupta | 4b06ff3 | 2013-07-10 11:40:27 +0530 | [diff] [blame] | 727 | * builtin kernel page will not have any virtual mappings. |
| 728 | * kprobe on loadable module will be kernel vaddr. |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 729 | */ |
Vineet Gupta | 94bad1a | 2013-04-12 12:20:23 +0530 | [diff] [blame] | 730 | void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len) |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 731 | { |
Vineet Gupta | f538881 | 2013-05-16 12:19:29 +0530 | [diff] [blame] | 732 | __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV); |
Vineet Gupta | 2328af0 | 2013-02-17 12:51:42 +0200 | [diff] [blame] | 733 | __ic_line_inv_vaddr(paddr, vaddr, len); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 734 | } |
| 735 | |
Vineet Gupta | 24603fd | 2013-04-11 18:36:35 +0530 | [diff] [blame] | 736 | /* wrapper to compile time eliminate alignment checks in flush loop */ |
| 737 | void __inv_icache_page(unsigned long paddr, unsigned long vaddr) |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 738 | { |
Vineet Gupta | 24603fd | 2013-04-11 18:36:35 +0530 | [diff] [blame] | 739 | __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 740 | } |
| 741 | |
Vineet Gupta | 6ec18a8 | 2013-05-09 15:10:18 +0530 | [diff] [blame] | 742 | /* |
| 743 | * wrapper to clearout kernel or userspace mappings of a page |
| 744 | * For kernel mappings @vaddr == @paddr |
| 745 | */ |
Vineet Gupta | 4530949 | 2015-05-18 12:46:37 +0530 | [diff] [blame] | 746 | void __flush_dcache_page(unsigned long paddr, unsigned long vaddr) |
Vineet Gupta | eacd0e9 | 2013-04-16 14:10:48 +0530 | [diff] [blame] | 747 | { |
Vineet Gupta | 6ec18a8 | 2013-05-09 15:10:18 +0530 | [diff] [blame] | 748 | __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV); |
Vineet Gupta | eacd0e9 | 2013-04-16 14:10:48 +0530 | [diff] [blame] | 749 | } |
| 750 | |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 751 | noinline void flush_cache_all(void) |
| 752 | { |
| 753 | unsigned long flags; |
| 754 | |
| 755 | local_irq_save(flags); |
| 756 | |
Vineet Gupta | 336e199 | 2013-06-22 19:22:42 +0530 | [diff] [blame] | 757 | __ic_entire_inv(); |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 758 | __dc_entire_op(OP_FLUSH_N_INV); |
| 759 | |
| 760 | local_irq_restore(flags); |
| 761 | |
| 762 | } |
| 763 | |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 764 | #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING |
| 765 | |
| 766 | void flush_cache_mm(struct mm_struct *mm) |
| 767 | { |
| 768 | flush_cache_all(); |
| 769 | } |
| 770 | |
| 771 | void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr, |
| 772 | unsigned long pfn) |
| 773 | { |
| 774 | unsigned int paddr = pfn << PAGE_SHIFT; |
| 775 | |
Vineet Gupta | 5971bc7 | 2013-05-16 12:23:31 +0530 | [diff] [blame] | 776 | u_vaddr &= PAGE_MASK; |
| 777 | |
Vineet Gupta | 4530949 | 2015-05-18 12:46:37 +0530 | [diff] [blame] | 778 | __flush_dcache_page(paddr, u_vaddr); |
Vineet Gupta | 5971bc7 | 2013-05-16 12:23:31 +0530 | [diff] [blame] | 779 | |
| 780 | if (vma->vm_flags & VM_EXEC) |
| 781 | __inv_icache_page(paddr, u_vaddr); |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 782 | } |
| 783 | |
| 784 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, |
| 785 | unsigned long end) |
| 786 | { |
| 787 | flush_cache_all(); |
| 788 | } |
| 789 | |
Vineet Gupta | 7bb66f6 | 2013-05-25 14:04:25 +0530 | [diff] [blame] | 790 | void flush_anon_page(struct vm_area_struct *vma, struct page *page, |
| 791 | unsigned long u_vaddr) |
| 792 | { |
| 793 | /* TBD: do we really need to clear the kernel mapping */ |
| 794 | __flush_dcache_page(page_address(page), u_vaddr); |
| 795 | __flush_dcache_page(page_address(page), page_address(page)); |
| 796 | |
| 797 | } |
| 798 | |
| 799 | #endif |
| 800 | |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 801 | void copy_user_highpage(struct page *to, struct page *from, |
| 802 | unsigned long u_vaddr, struct vm_area_struct *vma) |
| 803 | { |
Vineet Gupta | 4530949 | 2015-05-18 12:46:37 +0530 | [diff] [blame] | 804 | unsigned long kfrom = (unsigned long)page_address(from); |
| 805 | unsigned long kto = (unsigned long)page_address(to); |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 806 | int clean_src_k_mappings = 0; |
| 807 | |
| 808 | /* |
| 809 | * If SRC page was already mapped in userspace AND it's U-mapping is |
| 810 | * not congruent with K-mapping, sync former to physical page so that |
| 811 | * K-mapping in memcpy below, sees the right data |
| 812 | * |
| 813 | * Note that while @u_vaddr refers to DST page's userspace vaddr, it is |
| 814 | * equally valid for SRC page as well |
| 815 | */ |
| 816 | if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) { |
| 817 | __flush_dcache_page(kfrom, u_vaddr); |
| 818 | clean_src_k_mappings = 1; |
| 819 | } |
| 820 | |
Vineet Gupta | 4530949 | 2015-05-18 12:46:37 +0530 | [diff] [blame] | 821 | copy_page((void *)kto, (void *)kfrom); |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 822 | |
| 823 | /* |
| 824 | * Mark DST page K-mapping as dirty for a later finalization by |
| 825 | * update_mmu_cache(). Although the finalization could have been done |
| 826 | * here as well (given that both vaddr/paddr are available). |
| 827 | * But update_mmu_cache() already has code to do that for other |
| 828 | * non copied user pages (e.g. read faults which wire in pagecache page |
| 829 | * directly). |
| 830 | */ |
Vineet Gupta | 2ed21da | 2013-05-13 17:23:58 +0530 | [diff] [blame] | 831 | clear_bit(PG_dc_clean, &to->flags); |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 832 | |
| 833 | /* |
| 834 | * if SRC was already usermapped and non-congruent to kernel mapping |
| 835 | * sync the kernel mapping back to physical page |
| 836 | */ |
| 837 | if (clean_src_k_mappings) { |
| 838 | __flush_dcache_page(kfrom, kfrom); |
Vineet Gupta | 2ed21da | 2013-05-13 17:23:58 +0530 | [diff] [blame] | 839 | set_bit(PG_dc_clean, &from->flags); |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 840 | } else { |
Vineet Gupta | 2ed21da | 2013-05-13 17:23:58 +0530 | [diff] [blame] | 841 | clear_bit(PG_dc_clean, &from->flags); |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 842 | } |
| 843 | } |
| 844 | |
| 845 | void clear_user_page(void *to, unsigned long u_vaddr, struct page *page) |
| 846 | { |
| 847 | clear_page(to); |
Vineet Gupta | 2ed21da | 2013-05-13 17:23:58 +0530 | [diff] [blame] | 848 | clear_bit(PG_dc_clean, &page->flags); |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 849 | } |
| 850 | |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 851 | |
Vineet Gupta | 95d6976 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 852 | /********************************************************************** |
| 853 | * Explicit Cache flush request from user space via syscall |
| 854 | * Needed for JITs which generate code on the fly |
| 855 | */ |
| 856 | SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags) |
| 857 | { |
| 858 | /* TBD: optimize this */ |
| 859 | flush_cache_all(); |
| 860 | return 0; |
| 861 | } |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 862 | |
| 863 | void arc_cache_init(void) |
| 864 | { |
| 865 | unsigned int __maybe_unused cpu = smp_processor_id(); |
| 866 | char str[256]; |
| 867 | |
| 868 | printk(arc_cache_mumbojumbo(0, str, sizeof(str))); |
| 869 | |
| 870 | if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) { |
| 871 | struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; |
| 872 | |
| 873 | if (!ic->ver) |
| 874 | panic("cache support enabled but non-existent cache\n"); |
| 875 | |
| 876 | if (ic->line_len != L1_CACHE_BYTES) |
| 877 | panic("ICache line [%d] != kernel Config [%d]", |
| 878 | ic->line_len, L1_CACHE_BYTES); |
| 879 | |
| 880 | if (ic->ver != CONFIG_ARC_MMU_VER) |
| 881 | panic("Cache ver [%d] doesn't match MMU ver [%d]\n", |
| 882 | ic->ver, CONFIG_ARC_MMU_VER); |
Vineet Gupta | bcc4d65 | 2015-06-04 14:39:15 +0530 | [diff] [blame] | 883 | |
| 884 | /* |
| 885 | * In MMU v4 (HS38x) the alising icache config uses IVIL/PTAG |
| 886 | * pair to provide vaddr/paddr respectively, just as in MMU v3 |
| 887 | */ |
| 888 | if (is_isa_arcv2() && ic->alias) |
| 889 | _cache_line_loop_ic_fn = __cache_line_loop_v3; |
| 890 | else |
| 891 | _cache_line_loop_ic_fn = __cache_line_loop; |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 892 | } |
| 893 | |
| 894 | if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) { |
| 895 | struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache; |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 896 | |
| 897 | if (!dc->ver) |
| 898 | panic("cache support enabled but non-existent cache\n"); |
| 899 | |
| 900 | if (dc->line_len != L1_CACHE_BYTES) |
| 901 | panic("DCache line [%d] != kernel Config [%d]", |
| 902 | dc->line_len, L1_CACHE_BYTES); |
| 903 | |
Vineet Gupta | d1f317d | 2015-04-06 17:23:57 +0530 | [diff] [blame] | 904 | /* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */ |
| 905 | if (is_isa_arcompact()) { |
| 906 | int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING); |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 907 | |
Vineet Gupta | d1f317d | 2015-04-06 17:23:57 +0530 | [diff] [blame] | 908 | if (dc->alias && !handled) |
| 909 | panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); |
| 910 | else if (!dc->alias && handled) |
| 911 | panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); |
| 912 | } |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 913 | } |
Alexey Brodkin | f2b0b25 | 2015-05-25 19:54:28 +0300 | [diff] [blame] | 914 | |
Vineet Gupta | 79335a2 | 2015-06-04 18:30:23 +0530 | [diff] [blame] | 915 | if (is_isa_arcv2() && l2_line_sz && !slc_enable) { |
| 916 | |
| 917 | /* IM set : flush before invalidate */ |
| 918 | write_aux_reg(ARC_REG_SLC_CTRL, |
| 919 | read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_IM); |
| 920 | |
| 921 | write_aux_reg(ARC_REG_SLC_INVALIDATE, 1); |
| 922 | |
| 923 | /* Important to wait for flush to complete */ |
| 924 | while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY); |
| 925 | write_aux_reg(ARC_REG_SLC_CTRL, |
| 926 | read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_DISABLE); |
| 927 | } |
| 928 | |
Alexey Brodkin | f2b0b25 | 2015-05-25 19:54:28 +0300 | [diff] [blame] | 929 | if (is_isa_arcv2() && ioc_exists) { |
| 930 | /* IO coherency base - 0x8z */ |
| 931 | write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000); |
| 932 | /* IO coherency aperture size - 512Mb: 0x8z-0xAz */ |
| 933 | write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, 0x11); |
| 934 | /* Enable partial writes */ |
| 935 | write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1); |
| 936 | /* Enable IO coherency */ |
| 937 | write_aux_reg(ARC_REG_IO_COH_ENABLE, 1); |
| 938 | |
| 939 | __dma_cache_wback_inv = __dma_cache_wback_inv_ioc; |
| 940 | __dma_cache_inv = __dma_cache_inv_ioc; |
| 941 | __dma_cache_wback = __dma_cache_wback_ioc; |
Vineet Gupta | 79335a2 | 2015-06-04 18:30:23 +0530 | [diff] [blame] | 942 | } else if (is_isa_arcv2() && l2_line_sz && slc_enable) { |
Alexey Brodkin | f2b0b25 | 2015-05-25 19:54:28 +0300 | [diff] [blame] | 943 | __dma_cache_wback_inv = __dma_cache_wback_inv_slc; |
| 944 | __dma_cache_inv = __dma_cache_inv_slc; |
| 945 | __dma_cache_wback = __dma_cache_wback_slc; |
| 946 | } else { |
| 947 | __dma_cache_wback_inv = __dma_cache_wback_inv_l1; |
| 948 | __dma_cache_inv = __dma_cache_inv_l1; |
| 949 | __dma_cache_wback = __dma_cache_wback_l1; |
| 950 | } |
Vineet Gupta | 8ea2ddf | 2015-06-04 15:35:53 +0530 | [diff] [blame] | 951 | } |