Catalin Marinas | f1a0c4a | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Cache maintenance |
| 3 | * |
| 4 | * Copyright (C) 2001 Deep Blue Solutions Ltd. |
| 5 | * Copyright (C) 2012 ARM Ltd. |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU General Public License |
| 17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
| 18 | */ |
| 19 | |
| 20 | #include <linux/linkage.h> |
| 21 | #include <linux/init.h> |
| 22 | #include <asm/assembler.h> |
| 23 | |
| 24 | #include "proc-macros.S" |
| 25 | |
| 26 | /* |
| 27 | * __flush_dcache_all() |
| 28 | * |
| 29 | * Flush the whole D-cache. |
| 30 | * |
| 31 | * Corrupted registers: x0-x7, x9-x11 |
| 32 | */ |
Mark Rutland | bff7059 | 2013-08-14 09:54:54 +0100 | [diff] [blame] | 33 | __flush_dcache_all: |
Catalin Marinas | f1a0c4a | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 34 | dsb sy // ensure ordering with previous memory accesses |
| 35 | mrs x0, clidr_el1 // read clidr |
| 36 | and x3, x0, #0x7000000 // extract loc from clidr |
| 37 | lsr x3, x3, #23 // left align loc bit field |
| 38 | cbz x3, finished // if loc is 0, then no need to clean |
| 39 | mov x10, #0 // start clean at cache level 0 |
| 40 | loop1: |
| 41 | add x2, x10, x10, lsr #1 // work out 3x current cache level |
| 42 | lsr x1, x0, x2 // extract cache type bits from clidr |
| 43 | and x1, x1, #7 // mask of the bits for current cache only |
| 44 | cmp x1, #2 // see what cache we have at this level |
| 45 | b.lt skip // skip if no cache, or just i-cache |
| 46 | save_and_disable_irqs x9 // make CSSELR and CCSIDR access atomic |
| 47 | msr csselr_el1, x10 // select current cache level in csselr |
| 48 | isb // isb to sych the new cssr&csidr |
| 49 | mrs x1, ccsidr_el1 // read the new ccsidr |
| 50 | restore_irqs x9 |
| 51 | and x2, x1, #7 // extract the length of the cache lines |
| 52 | add x2, x2, #4 // add 4 (line length offset) |
| 53 | mov x4, #0x3ff |
| 54 | and x4, x4, x1, lsr #3 // find maximum number on the way size |
Sukanto Ghosh | b4fed07 | 2013-05-14 10:26:54 +0100 | [diff] [blame] | 55 | clz w5, w4 // find bit position of way size increment |
Catalin Marinas | f1a0c4a | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 56 | mov x7, #0x7fff |
| 57 | and x7, x7, x1, lsr #13 // extract max number of the index size |
| 58 | loop2: |
| 59 | mov x9, x4 // create working copy of max way size |
| 60 | loop3: |
| 61 | lsl x6, x9, x5 |
| 62 | orr x11, x10, x6 // factor way and cache number into x11 |
| 63 | lsl x6, x7, x2 |
| 64 | orr x11, x11, x6 // factor index number into x11 |
| 65 | dc cisw, x11 // clean & invalidate by set/way |
| 66 | subs x9, x9, #1 // decrement the way |
| 67 | b.ge loop3 |
| 68 | subs x7, x7, #1 // decrement the index |
| 69 | b.ge loop2 |
| 70 | skip: |
| 71 | add x10, x10, #2 // increment cache number |
| 72 | cmp x3, x10 |
| 73 | b.gt loop1 |
| 74 | finished: |
| 75 | mov x10, #0 // swith back to cache level 0 |
| 76 | msr csselr_el1, x10 // select current cache level in csselr |
| 77 | dsb sy |
| 78 | isb |
| 79 | ret |
| 80 | ENDPROC(__flush_dcache_all) |
| 81 | |
| 82 | /* |
| 83 | * flush_cache_all() |
| 84 | * |
| 85 | * Flush the entire cache system. The data cache flush is now achieved |
| 86 | * using atomic clean / invalidates working outwards from L1 cache. This |
| 87 | * is done using Set/Way based cache maintainance instructions. The |
| 88 | * instruction cache can still be invalidated back to the point of |
| 89 | * unification in a single instruction. |
| 90 | */ |
| 91 | ENTRY(flush_cache_all) |
| 92 | mov x12, lr |
| 93 | bl __flush_dcache_all |
| 94 | mov x0, #0 |
| 95 | ic ialluis // I+BTB cache invalidate |
| 96 | ret x12 |
| 97 | ENDPROC(flush_cache_all) |
| 98 | |
| 99 | /* |
| 100 | * flush_icache_range(start,end) |
| 101 | * |
| 102 | * Ensure that the I and D caches are coherent within specified region. |
| 103 | * This is typically used when code has been written to a memory region, |
| 104 | * and will be executed. |
| 105 | * |
| 106 | * - start - virtual start address of region |
| 107 | * - end - virtual end address of region |
| 108 | */ |
| 109 | ENTRY(flush_icache_range) |
| 110 | /* FALLTHROUGH */ |
| 111 | |
| 112 | /* |
| 113 | * __flush_cache_user_range(start,end) |
| 114 | * |
| 115 | * Ensure that the I and D caches are coherent within specified region. |
| 116 | * This is typically used when code has been written to a memory region, |
| 117 | * and will be executed. |
| 118 | * |
| 119 | * - start - virtual start address of region |
| 120 | * - end - virtual end address of region |
| 121 | */ |
| 122 | ENTRY(__flush_cache_user_range) |
| 123 | dcache_line_size x2, x3 |
| 124 | sub x3, x2, #1 |
| 125 | bic x4, x0, x3 |
| 126 | 1: |
| 127 | USER(9f, dc cvau, x4 ) // clean D line to PoU |
| 128 | add x4, x4, x2 |
| 129 | cmp x4, x1 |
| 130 | b.lo 1b |
| 131 | dsb sy |
| 132 | |
| 133 | icache_line_size x2, x3 |
| 134 | sub x3, x2, #1 |
| 135 | bic x4, x0, x3 |
| 136 | 1: |
| 137 | USER(9f, ic ivau, x4 ) // invalidate I line PoU |
| 138 | add x4, x4, x2 |
| 139 | cmp x4, x1 |
| 140 | b.lo 1b |
| 141 | 9: // ignore any faulting cache operation |
| 142 | dsb sy |
| 143 | isb |
| 144 | ret |
| 145 | ENDPROC(flush_icache_range) |
| 146 | ENDPROC(__flush_cache_user_range) |
| 147 | |
| 148 | /* |
Jingoo Han | 03324e6 | 2014-01-21 01:17:47 +0000 | [diff] [blame] | 149 | * __flush_dcache_area(kaddr, size) |
Catalin Marinas | f1a0c4a | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 150 | * |
| 151 | * Ensure that the data held in the page kaddr is written back to the |
| 152 | * page in question. |
| 153 | * |
| 154 | * - kaddr - kernel address |
| 155 | * - size - size in question |
| 156 | */ |
| 157 | ENTRY(__flush_dcache_area) |
| 158 | dcache_line_size x2, x3 |
| 159 | add x1, x0, x1 |
| 160 | sub x3, x2, #1 |
| 161 | bic x0, x0, x3 |
| 162 | 1: dc civac, x0 // clean & invalidate D line / unified line |
| 163 | add x0, x0, x2 |
| 164 | cmp x0, x1 |
| 165 | b.lo 1b |
| 166 | dsb sy |
| 167 | ret |
| 168 | ENDPROC(__flush_dcache_area) |
Catalin Marinas | 7363590 | 2013-05-21 17:35:19 +0100 | [diff] [blame] | 169 | |
| 170 | /* |
| 171 | * __dma_inv_range(start, end) |
| 172 | * - start - virtual start address of region |
| 173 | * - end - virtual end address of region |
| 174 | */ |
| 175 | __dma_inv_range: |
| 176 | dcache_line_size x2, x3 |
| 177 | sub x3, x2, #1 |
| 178 | bic x0, x0, x3 |
| 179 | bic x1, x1, x3 |
| 180 | 1: dc ivac, x0 // invalidate D / U line |
| 181 | add x0, x0, x2 |
| 182 | cmp x0, x1 |
| 183 | b.lo 1b |
| 184 | dsb sy |
| 185 | ret |
| 186 | ENDPROC(__dma_inv_range) |
| 187 | |
| 188 | /* |
| 189 | * __dma_clean_range(start, end) |
| 190 | * - start - virtual start address of region |
| 191 | * - end - virtual end address of region |
| 192 | */ |
| 193 | __dma_clean_range: |
| 194 | dcache_line_size x2, x3 |
| 195 | sub x3, x2, #1 |
| 196 | bic x0, x0, x3 |
| 197 | 1: dc cvac, x0 // clean D / U line |
| 198 | add x0, x0, x2 |
| 199 | cmp x0, x1 |
| 200 | b.lo 1b |
| 201 | dsb sy |
| 202 | ret |
| 203 | ENDPROC(__dma_clean_range) |
| 204 | |
| 205 | /* |
| 206 | * __dma_flush_range(start, end) |
| 207 | * - start - virtual start address of region |
| 208 | * - end - virtual end address of region |
| 209 | */ |
| 210 | ENTRY(__dma_flush_range) |
| 211 | dcache_line_size x2, x3 |
| 212 | sub x3, x2, #1 |
| 213 | bic x0, x0, x3 |
| 214 | 1: dc civac, x0 // clean & invalidate D / U line |
| 215 | add x0, x0, x2 |
| 216 | cmp x0, x1 |
| 217 | b.lo 1b |
| 218 | dsb sy |
| 219 | ret |
| 220 | ENDPROC(__dma_flush_range) |
| 221 | |
| 222 | /* |
| 223 | * __dma_map_area(start, size, dir) |
| 224 | * - start - kernel virtual start address |
| 225 | * - size - size of region |
| 226 | * - dir - DMA direction |
| 227 | */ |
| 228 | ENTRY(__dma_map_area) |
| 229 | add x1, x1, x0 |
| 230 | cmp w2, #DMA_FROM_DEVICE |
| 231 | b.eq __dma_inv_range |
| 232 | b __dma_clean_range |
| 233 | ENDPROC(__dma_map_area) |
| 234 | |
| 235 | /* |
| 236 | * __dma_unmap_area(start, size, dir) |
| 237 | * - start - kernel virtual start address |
| 238 | * - size - size of region |
| 239 | * - dir - DMA direction |
| 240 | */ |
| 241 | ENTRY(__dma_unmap_area) |
| 242 | add x1, x1, x0 |
| 243 | cmp w2, #DMA_TO_DEVICE |
| 244 | b.ne __dma_inv_range |
| 245 | ret |
| 246 | ENDPROC(__dma_unmap_area) |