Doug Thompson | 2bc6541 | 2009-05-04 20:11:14 +0200 | [diff] [blame] | 1 | #include "amd64_edac.h" |
| 2 | |
| 3 | static struct edac_pci_ctl_info *amd64_ctl_pci; |
| 4 | |
| 5 | static int report_gart_errors; |
| 6 | module_param(report_gart_errors, int, 0644); |
| 7 | |
| 8 | /* |
| 9 | * Set by command line parameter. If BIOS has enabled the ECC, this override is |
| 10 | * cleared to prevent re-enabling the hardware by this driver. |
| 11 | */ |
| 12 | static int ecc_enable_override; |
| 13 | module_param(ecc_enable_override, int, 0644); |
| 14 | |
| 15 | /* Lookup table for all possible MC control instances */ |
| 16 | struct amd64_pvt; |
| 17 | static struct mem_ctl_info *mci_lookup[MAX_NUMNODES]; |
| 18 | static struct amd64_pvt *pvt_lookup[MAX_NUMNODES]; |
| 19 | |
| 20 | /* |
| 21 | * Memory scrubber control interface. For K8, memory scrubbing is handled by |
| 22 | * hardware and can involve L2 cache, dcache as well as the main memory. With |
| 23 | * F10, this is extended to L3 cache scrubbing on CPU models sporting that |
| 24 | * functionality. |
| 25 | * |
| 26 | * This causes the "units" for the scrubbing speed to vary from 64 byte blocks |
| 27 | * (dram) over to cache lines. This is nasty, so we will use bandwidth in |
| 28 | * bytes/sec for the setting. |
| 29 | * |
| 30 | * Currently, we only do dram scrubbing. If the scrubbing is done in software on |
| 31 | * other archs, we might not have access to the caches directly. |
| 32 | */ |
| 33 | |
| 34 | /* |
| 35 | * scan the scrub rate mapping table for a close or matching bandwidth value to |
| 36 | * issue. If requested is too big, then use last maximum value found. |
| 37 | */ |
| 38 | static int amd64_search_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, |
| 39 | u32 min_scrubrate) |
| 40 | { |
| 41 | u32 scrubval; |
| 42 | int i; |
| 43 | |
| 44 | /* |
| 45 | * map the configured rate (new_bw) to a value specific to the AMD64 |
| 46 | * memory controller and apply to register. Search for the first |
| 47 | * bandwidth entry that is greater or equal than the setting requested |
| 48 | * and program that. If at last entry, turn off DRAM scrubbing. |
| 49 | */ |
| 50 | for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { |
| 51 | /* |
| 52 | * skip scrub rates which aren't recommended |
| 53 | * (see F10 BKDG, F3x58) |
| 54 | */ |
| 55 | if (scrubrates[i].scrubval < min_scrubrate) |
| 56 | continue; |
| 57 | |
| 58 | if (scrubrates[i].bandwidth <= new_bw) |
| 59 | break; |
| 60 | |
| 61 | /* |
| 62 | * if no suitable bandwidth found, turn off DRAM scrubbing |
| 63 | * entirely by falling back to the last element in the |
| 64 | * scrubrates array. |
| 65 | */ |
| 66 | } |
| 67 | |
| 68 | scrubval = scrubrates[i].scrubval; |
| 69 | if (scrubval) |
| 70 | edac_printk(KERN_DEBUG, EDAC_MC, |
| 71 | "Setting scrub rate bandwidth: %u\n", |
| 72 | scrubrates[i].bandwidth); |
| 73 | else |
| 74 | edac_printk(KERN_DEBUG, EDAC_MC, "Turning scrubbing off.\n"); |
| 75 | |
| 76 | pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F); |
| 77 | |
| 78 | return 0; |
| 79 | } |
| 80 | |
| 81 | static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 *bandwidth) |
| 82 | { |
| 83 | struct amd64_pvt *pvt = mci->pvt_info; |
| 84 | u32 min_scrubrate = 0x0; |
| 85 | |
| 86 | switch (boot_cpu_data.x86) { |
| 87 | case 0xf: |
| 88 | min_scrubrate = K8_MIN_SCRUB_RATE_BITS; |
| 89 | break; |
| 90 | case 0x10: |
| 91 | min_scrubrate = F10_MIN_SCRUB_RATE_BITS; |
| 92 | break; |
| 93 | case 0x11: |
| 94 | min_scrubrate = F11_MIN_SCRUB_RATE_BITS; |
| 95 | break; |
| 96 | |
| 97 | default: |
| 98 | amd64_printk(KERN_ERR, "Unsupported family!\n"); |
| 99 | break; |
| 100 | } |
| 101 | return amd64_search_set_scrub_rate(pvt->misc_f3_ctl, *bandwidth, |
| 102 | min_scrubrate); |
| 103 | } |
| 104 | |
| 105 | static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw) |
| 106 | { |
| 107 | struct amd64_pvt *pvt = mci->pvt_info; |
| 108 | u32 scrubval = 0; |
| 109 | int status = -1, i, ret = 0; |
| 110 | |
| 111 | ret = pci_read_config_dword(pvt->misc_f3_ctl, K8_SCRCTRL, &scrubval); |
| 112 | if (ret) |
| 113 | debugf0("Reading K8_SCRCTRL failed\n"); |
| 114 | |
| 115 | scrubval = scrubval & 0x001F; |
| 116 | |
| 117 | edac_printk(KERN_DEBUG, EDAC_MC, |
| 118 | "pci-read, sdram scrub control value: %d \n", scrubval); |
| 119 | |
| 120 | for (i = 0; ARRAY_SIZE(scrubrates); i++) { |
| 121 | if (scrubrates[i].scrubval == scrubval) { |
| 122 | *bw = scrubrates[i].bandwidth; |
| 123 | status = 0; |
| 124 | break; |
| 125 | } |
| 126 | } |
| 127 | |
| 128 | return status; |
| 129 | } |
| 130 | |
Doug Thompson | 6775763 | 2009-04-27 15:53:22 +0200 | [diff] [blame^] | 131 | /* Map from a CSROW entry to the mask entry that operates on it */ |
| 132 | static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow) |
| 133 | { |
| 134 | return csrow >> (pvt->num_dcsm >> 3); |
| 135 | } |
| 136 | |
| 137 | /* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */ |
| 138 | static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow) |
| 139 | { |
| 140 | if (dct == 0) |
| 141 | return pvt->dcsb0[csrow]; |
| 142 | else |
| 143 | return pvt->dcsb1[csrow]; |
| 144 | } |
| 145 | |
| 146 | /* |
| 147 | * Return the 'mask' address the i'th CS entry. This function is needed because |
| 148 | * there number of DCSM registers on Rev E and prior vs Rev F and later is |
| 149 | * different. |
| 150 | */ |
| 151 | static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow) |
| 152 | { |
| 153 | if (dct == 0) |
| 154 | return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)]; |
| 155 | else |
| 156 | return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)]; |
| 157 | } |
| 158 | |
| 159 | |
| 160 | /* |
| 161 | * In *base and *limit, pass back the full 40-bit base and limit physical |
| 162 | * addresses for the node given by node_id. This information is obtained from |
| 163 | * DRAM Base (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers. The |
| 164 | * base and limit addresses are of type SysAddr, as defined at the start of |
| 165 | * section 3.4.4 (p. 70). They are the lowest and highest physical addresses |
| 166 | * in the address range they represent. |
| 167 | */ |
| 168 | static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id, |
| 169 | u64 *base, u64 *limit) |
| 170 | { |
| 171 | *base = pvt->dram_base[node_id]; |
| 172 | *limit = pvt->dram_limit[node_id]; |
| 173 | } |
| 174 | |
| 175 | /* |
| 176 | * Return 1 if the SysAddr given by sys_addr matches the base/limit associated |
| 177 | * with node_id |
| 178 | */ |
| 179 | static int amd64_base_limit_match(struct amd64_pvt *pvt, |
| 180 | u64 sys_addr, int node_id) |
| 181 | { |
| 182 | u64 base, limit, addr; |
| 183 | |
| 184 | amd64_get_base_and_limit(pvt, node_id, &base, &limit); |
| 185 | |
| 186 | /* The K8 treats this as a 40-bit value. However, bits 63-40 will be |
| 187 | * all ones if the most significant implemented address bit is 1. |
| 188 | * Here we discard bits 63-40. See section 3.4.2 of AMD publication |
| 189 | * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1 |
| 190 | * Application Programming. |
| 191 | */ |
| 192 | addr = sys_addr & 0x000000ffffffffffull; |
| 193 | |
| 194 | return (addr >= base) && (addr <= limit); |
| 195 | } |
| 196 | |
| 197 | /* |
| 198 | * Attempt to map a SysAddr to a node. On success, return a pointer to the |
| 199 | * mem_ctl_info structure for the node that the SysAddr maps to. |
| 200 | * |
| 201 | * On failure, return NULL. |
| 202 | */ |
| 203 | static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, |
| 204 | u64 sys_addr) |
| 205 | { |
| 206 | struct amd64_pvt *pvt; |
| 207 | int node_id; |
| 208 | u32 intlv_en, bits; |
| 209 | |
| 210 | /* |
| 211 | * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section |
| 212 | * 3.4.4.2) registers to map the SysAddr to a node ID. |
| 213 | */ |
| 214 | pvt = mci->pvt_info; |
| 215 | |
| 216 | /* |
| 217 | * The value of this field should be the same for all DRAM Base |
| 218 | * registers. Therefore we arbitrarily choose to read it from the |
| 219 | * register for node 0. |
| 220 | */ |
| 221 | intlv_en = pvt->dram_IntlvEn[0]; |
| 222 | |
| 223 | if (intlv_en == 0) { |
| 224 | for (node_id = 0; ; ) { |
| 225 | if (amd64_base_limit_match(pvt, sys_addr, node_id)) |
| 226 | break; |
| 227 | |
| 228 | if (++node_id >= DRAM_REG_COUNT) |
| 229 | goto err_no_match; |
| 230 | } |
| 231 | goto found; |
| 232 | } |
| 233 | |
| 234 | if (unlikely((intlv_en != (0x01 << 8)) && |
| 235 | (intlv_en != (0x03 << 8)) && |
| 236 | (intlv_en != (0x07 << 8)))) { |
| 237 | amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from " |
| 238 | "IntlvEn field of DRAM Base Register for node 0: " |
| 239 | "This probably indicates a BIOS bug.\n", intlv_en); |
| 240 | return NULL; |
| 241 | } |
| 242 | |
| 243 | bits = (((u32) sys_addr) >> 12) & intlv_en; |
| 244 | |
| 245 | for (node_id = 0; ; ) { |
| 246 | if ((pvt->dram_limit[node_id] & intlv_en) == bits) |
| 247 | break; /* intlv_sel field matches */ |
| 248 | |
| 249 | if (++node_id >= DRAM_REG_COUNT) |
| 250 | goto err_no_match; |
| 251 | } |
| 252 | |
| 253 | /* sanity test for sys_addr */ |
| 254 | if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) { |
| 255 | amd64_printk(KERN_WARNING, |
| 256 | "%s(): sys_addr 0x%lx falls outside base/limit " |
| 257 | "address range for node %d with node interleaving " |
| 258 | "enabled.\n", __func__, (unsigned long)sys_addr, |
| 259 | node_id); |
| 260 | return NULL; |
| 261 | } |
| 262 | |
| 263 | found: |
| 264 | return edac_mc_find(node_id); |
| 265 | |
| 266 | err_no_match: |
| 267 | debugf2("sys_addr 0x%lx doesn't match any node\n", |
| 268 | (unsigned long)sys_addr); |
| 269 | |
| 270 | return NULL; |
| 271 | } |