Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __ASM_SH64_CACHE_H |
| 2 | #define __ASM_SH64_CACHE_H |
| 3 | |
| 4 | /* |
| 5 | * This file is subject to the terms and conditions of the GNU General Public |
| 6 | * License. See the file "COPYING" in the main directory of this archive |
| 7 | * for more details. |
| 8 | * |
| 9 | * include/asm-sh64/cache.h |
| 10 | * |
| 11 | * Copyright (C) 2000, 2001 Paolo Alberelli |
| 12 | * Copyright (C) 2003, 2004 Paul Mundt |
| 13 | * |
| 14 | */ |
| 15 | #include <asm/cacheflush.h> |
| 16 | |
| 17 | #define L1_CACHE_SHIFT 5 |
| 18 | /* bytes per L1 cache line */ |
| 19 | #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) |
| 20 | #define L1_CACHE_ALIGN_MASK (~(L1_CACHE_BYTES - 1)) |
| 21 | #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES - 1)) & L1_CACHE_ALIGN_MASK) |
| 22 | #define L1_CACHE_SIZE_BYTES (L1_CACHE_BYTES << 10) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | |
| 24 | #ifdef MODULE |
| 25 | #define __cacheline_aligned __attribute__((__aligned__(L1_CACHE_BYTES))) |
| 26 | #else |
| 27 | #define __cacheline_aligned \ |
| 28 | __attribute__((__aligned__(L1_CACHE_BYTES), \ |
| 29 | __section__(".data.cacheline_aligned"))) |
| 30 | #endif |
| 31 | |
| 32 | /* |
| 33 | * Control Registers. |
| 34 | */ |
| 35 | #define ICCR_BASE 0x01600000 /* Instruction Cache Control Register */ |
| 36 | #define ICCR_REG0 0 /* Register 0 offset */ |
| 37 | #define ICCR_REG1 1 /* Register 1 offset */ |
| 38 | #define ICCR0 ICCR_BASE+ICCR_REG0 |
| 39 | #define ICCR1 ICCR_BASE+ICCR_REG1 |
| 40 | |
| 41 | #define ICCR0_OFF 0x0 /* Set ICACHE off */ |
| 42 | #define ICCR0_ON 0x1 /* Set ICACHE on */ |
| 43 | #define ICCR0_ICI 0x2 /* Invalidate all in IC */ |
| 44 | |
| 45 | #define ICCR1_NOLOCK 0x0 /* Set No Locking */ |
| 46 | |
| 47 | #define OCCR_BASE 0x01E00000 /* Operand Cache Control Register */ |
| 48 | #define OCCR_REG0 0 /* Register 0 offset */ |
| 49 | #define OCCR_REG1 1 /* Register 1 offset */ |
| 50 | #define OCCR0 OCCR_BASE+OCCR_REG0 |
| 51 | #define OCCR1 OCCR_BASE+OCCR_REG1 |
| 52 | |
| 53 | #define OCCR0_OFF 0x0 /* Set OCACHE off */ |
| 54 | #define OCCR0_ON 0x1 /* Set OCACHE on */ |
| 55 | #define OCCR0_OCI 0x2 /* Invalidate all in OC */ |
| 56 | #define OCCR0_WT 0x4 /* Set OCACHE in WT Mode */ |
| 57 | #define OCCR0_WB 0x0 /* Set OCACHE in WB Mode */ |
| 58 | |
| 59 | #define OCCR1_NOLOCK 0x0 /* Set No Locking */ |
| 60 | |
| 61 | |
| 62 | /* |
| 63 | * SH-5 |
| 64 | * A bit of description here, for neff=32. |
| 65 | * |
| 66 | * |<--- tag (19 bits) --->| |
| 67 | * +-----------------------------+-----------------+------+----------+------+ |
| 68 | * | | | ways |set index |offset| |
| 69 | * +-----------------------------+-----------------+------+----------+------+ |
| 70 | * ^ 2 bits 8 bits 5 bits |
| 71 | * +- Bit 31 |
| 72 | * |
| 73 | * Cacheline size is based on offset: 5 bits = 32 bytes per line |
| 74 | * A cache line is identified by a tag + set but OCACHETAG/ICACHETAG |
| 75 | * have a broader space for registers. These are outlined by |
| 76 | * CACHE_?C_*_STEP below. |
| 77 | * |
| 78 | */ |
| 79 | |
| 80 | /* Valid and Dirty bits */ |
| 81 | #define SH_CACHE_VALID (1LL<<0) |
| 82 | #define SH_CACHE_UPDATED (1LL<<57) |
| 83 | |
| 84 | /* Cache flags */ |
| 85 | #define SH_CACHE_MODE_WT (1LL<<0) |
| 86 | #define SH_CACHE_MODE_WB (1LL<<1) |
| 87 | |
| 88 | #ifndef __ASSEMBLY__ |
| 89 | |
| 90 | /* |
| 91 | * Cache information structure. |
| 92 | * |
| 93 | * Defined for both I and D cache, per-processor. |
| 94 | */ |
| 95 | struct cache_info { |
| 96 | unsigned int ways; |
| 97 | unsigned int sets; |
| 98 | unsigned int linesz; |
| 99 | |
| 100 | unsigned int way_shift; |
| 101 | unsigned int entry_shift; |
| 102 | unsigned int set_shift; |
| 103 | unsigned int way_step_shift; |
| 104 | unsigned int asid_shift; |
| 105 | |
| 106 | unsigned int way_ofs; |
| 107 | |
| 108 | unsigned int asid_mask; |
| 109 | unsigned int idx_mask; |
| 110 | unsigned int epn_mask; |
| 111 | |
| 112 | unsigned long flags; |
| 113 | }; |
| 114 | |
| 115 | #endif /* __ASSEMBLY__ */ |
| 116 | |
| 117 | /* Instruction cache */ |
| 118 | #define CACHE_IC_ADDRESS_ARRAY 0x01000000 |
| 119 | |
| 120 | /* Operand Cache */ |
| 121 | #define CACHE_OC_ADDRESS_ARRAY 0x01800000 |
| 122 | |
| 123 | /* These declarations relate to cache 'synonyms' in the operand cache. A |
| 124 | 'synonym' occurs where effective address bits overlap between those used for |
| 125 | indexing the cache sets and those passed to the MMU for translation. In the |
| 126 | case of SH5-101 & SH5-103, only bit 12 is affected for 4k pages. */ |
| 127 | |
| 128 | #define CACHE_OC_N_SYNBITS 1 /* Number of synonym bits */ |
| 129 | #define CACHE_OC_SYN_SHIFT 12 |
| 130 | /* Mask to select synonym bit(s) */ |
| 131 | #define CACHE_OC_SYN_MASK (((1UL<<CACHE_OC_N_SYNBITS)-1)<<CACHE_OC_SYN_SHIFT) |
| 132 | |
| 133 | |
| 134 | /* |
| 135 | * Instruction cache can't be invalidated based on physical addresses. |
| 136 | * No Instruction Cache defines required, then. |
| 137 | */ |
| 138 | |
| 139 | #endif /* __ASM_SH64_CACHE_H */ |