blob: 9efa88a9fdf52e2a8dfd0a7c5707cdb10f7675f1 [file] [log] [blame]
Doug Thompson2bc65412009-05-04 20:11:14 +02001#include "amd64_edac.h"
Andreas Herrmann23ac4ae2010-09-17 18:03:43 +02002#include <asm/amd_nb.h>
Doug Thompson2bc65412009-05-04 20:11:14 +02003
4static struct edac_pci_ctl_info *amd64_ctl_pci;
5
6static int report_gart_errors;
7module_param(report_gart_errors, int, 0644);
8
9/*
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver.
12 */
13static int ecc_enable_override;
14module_param(ecc_enable_override, int, 0644);
15
Tejun Heoa29d8b82010-02-02 14:39:15 +090016static struct msr __percpu *msrs;
Borislav Petkov50542252009-12-11 18:14:40 +010017
Borislav Petkovcc4d8862010-10-13 16:11:59 +020018/* Per-node driver instances */
19static struct mem_ctl_info **mcis;
20static struct amd64_pvt **pvts;
Doug Thompson2bc65412009-05-04 20:11:14 +020021
22/*
Borislav Petkov1433eb92009-10-21 13:44:36 +020023 * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and
24 * later.
Borislav Petkovb70ef012009-06-25 19:32:38 +020025 */
Borislav Petkov1433eb92009-10-21 13:44:36 +020026static int ddr2_dbam_revCG[] = {
27 [0] = 32,
28 [1] = 64,
29 [2] = 128,
30 [3] = 256,
31 [4] = 512,
32 [5] = 1024,
33 [6] = 2048,
34};
35
36static int ddr2_dbam_revD[] = {
37 [0] = 32,
38 [1] = 64,
39 [2 ... 3] = 128,
40 [4] = 256,
41 [5] = 512,
42 [6] = 256,
43 [7] = 512,
44 [8 ... 9] = 1024,
45 [10] = 2048,
46};
47
48static int ddr2_dbam[] = { [0] = 128,
49 [1] = 256,
50 [2 ... 4] = 512,
51 [5 ... 6] = 1024,
52 [7 ... 8] = 2048,
53 [9 ... 10] = 4096,
54 [11] = 8192,
55};
56
57static int ddr3_dbam[] = { [0] = -1,
58 [1] = 256,
59 [2] = 512,
60 [3 ... 4] = -1,
61 [5 ... 6] = 1024,
62 [7 ... 8] = 2048,
63 [9 ... 10] = 4096,
Borislav Petkov24f9a7f2010-10-07 18:29:15 +020064 [11] = 8192,
Borislav Petkovb70ef012009-06-25 19:32:38 +020065};
66
67/*
68 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
69 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
70 * or higher value'.
71 *
72 *FIXME: Produce a better mapping/linearisation.
73 */
74
75struct scrubrate scrubrates[] = {
76 { 0x01, 1600000000UL},
77 { 0x02, 800000000UL},
78 { 0x03, 400000000UL},
79 { 0x04, 200000000UL},
80 { 0x05, 100000000UL},
81 { 0x06, 50000000UL},
82 { 0x07, 25000000UL},
83 { 0x08, 12284069UL},
84 { 0x09, 6274509UL},
85 { 0x0A, 3121951UL},
86 { 0x0B, 1560975UL},
87 { 0x0C, 781440UL},
88 { 0x0D, 390720UL},
89 { 0x0E, 195300UL},
90 { 0x0F, 97650UL},
91 { 0x10, 48854UL},
92 { 0x11, 24427UL},
93 { 0x12, 12213UL},
94 { 0x13, 6101UL},
95 { 0x14, 3051UL},
96 { 0x15, 1523UL},
97 { 0x16, 761UL},
98 { 0x00, 0UL}, /* scrubbing off */
99};
100
101/*
Doug Thompson2bc65412009-05-04 20:11:14 +0200102 * Memory scrubber control interface. For K8, memory scrubbing is handled by
103 * hardware and can involve L2 cache, dcache as well as the main memory. With
104 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
105 * functionality.
106 *
107 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
108 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
109 * bytes/sec for the setting.
110 *
111 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
112 * other archs, we might not have access to the caches directly.
113 */
114
115/*
116 * scan the scrub rate mapping table for a close or matching bandwidth value to
117 * issue. If requested is too big, then use last maximum value found.
118 */
Borislav Petkov395ae782010-10-01 18:38:19 +0200119static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
Doug Thompson2bc65412009-05-04 20:11:14 +0200120{
121 u32 scrubval;
122 int i;
123
124 /*
125 * map the configured rate (new_bw) to a value specific to the AMD64
126 * memory controller and apply to register. Search for the first
127 * bandwidth entry that is greater or equal than the setting requested
128 * and program that. If at last entry, turn off DRAM scrubbing.
129 */
130 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
131 /*
132 * skip scrub rates which aren't recommended
133 * (see F10 BKDG, F3x58)
134 */
Borislav Petkov395ae782010-10-01 18:38:19 +0200135 if (scrubrates[i].scrubval < min_rate)
Doug Thompson2bc65412009-05-04 20:11:14 +0200136 continue;
137
138 if (scrubrates[i].bandwidth <= new_bw)
139 break;
140
141 /*
142 * if no suitable bandwidth found, turn off DRAM scrubbing
143 * entirely by falling back to the last element in the
144 * scrubrates array.
145 */
146 }
147
148 scrubval = scrubrates[i].scrubval;
149 if (scrubval)
Borislav Petkov24f9a7f2010-10-07 18:29:15 +0200150 amd64_info("Setting scrub rate bandwidth: %u\n",
151 scrubrates[i].bandwidth);
Doug Thompson2bc65412009-05-04 20:11:14 +0200152 else
Borislav Petkov24f9a7f2010-10-07 18:29:15 +0200153 amd64_info("Turning scrubbing off.\n");
Doug Thompson2bc65412009-05-04 20:11:14 +0200154
155 pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F);
156
157 return 0;
158}
159
Borislav Petkov395ae782010-10-01 18:38:19 +0200160static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
Doug Thompson2bc65412009-05-04 20:11:14 +0200161{
162 struct amd64_pvt *pvt = mci->pvt_info;
Doug Thompson2bc65412009-05-04 20:11:14 +0200163
Borislav Petkov8d5b5d92010-10-01 20:11:07 +0200164 return __amd64_set_scrub_rate(pvt->F3, bw, pvt->min_scrubrate);
Doug Thompson2bc65412009-05-04 20:11:14 +0200165}
166
167static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
168{
169 struct amd64_pvt *pvt = mci->pvt_info;
170 u32 scrubval = 0;
Borislav Petkov6ba5dcd2009-10-13 19:26:55 +0200171 int status = -1, i;
Doug Thompson2bc65412009-05-04 20:11:14 +0200172
Borislav Petkov8d5b5d92010-10-01 20:11:07 +0200173 amd64_read_pci_cfg(pvt->F3, K8_SCRCTRL, &scrubval);
Doug Thompson2bc65412009-05-04 20:11:14 +0200174
175 scrubval = scrubval & 0x001F;
176
Borislav Petkov24f9a7f2010-10-07 18:29:15 +0200177 amd64_debug("pci-read, sdram scrub control value: %d\n", scrubval);
Doug Thompson2bc65412009-05-04 20:11:14 +0200178
Roel Kluin926311f2010-01-11 20:58:21 +0100179 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
Doug Thompson2bc65412009-05-04 20:11:14 +0200180 if (scrubrates[i].scrubval == scrubval) {
181 *bw = scrubrates[i].bandwidth;
182 status = 0;
183 break;
184 }
185 }
186
187 return status;
188}
189
Doug Thompson67757632009-04-27 15:53:22 +0200190/* Map from a CSROW entry to the mask entry that operates on it */
191static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
192{
Borislav Petkov1433eb92009-10-21 13:44:36 +0200193 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F)
Borislav Petkov9d858bb2009-09-21 14:35:51 +0200194 return csrow;
195 else
196 return csrow >> 1;
Doug Thompson67757632009-04-27 15:53:22 +0200197}
198
199/* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */
200static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow)
201{
202 if (dct == 0)
203 return pvt->dcsb0[csrow];
204 else
205 return pvt->dcsb1[csrow];
206}
207
208/*
209 * Return the 'mask' address the i'th CS entry. This function is needed because
210 * there number of DCSM registers on Rev E and prior vs Rev F and later is
211 * different.
212 */
213static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow)
214{
215 if (dct == 0)
216 return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)];
217 else
218 return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)];
219}
220
221
222/*
223 * In *base and *limit, pass back the full 40-bit base and limit physical
224 * addresses for the node given by node_id. This information is obtained from
225 * DRAM Base (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers. The
226 * base and limit addresses are of type SysAddr, as defined at the start of
227 * section 3.4.4 (p. 70). They are the lowest and highest physical addresses
228 * in the address range they represent.
229 */
230static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id,
231 u64 *base, u64 *limit)
232{
233 *base = pvt->dram_base[node_id];
234 *limit = pvt->dram_limit[node_id];
235}
236
237/*
238 * Return 1 if the SysAddr given by sys_addr matches the base/limit associated
239 * with node_id
240 */
241static int amd64_base_limit_match(struct amd64_pvt *pvt,
242 u64 sys_addr, int node_id)
243{
244 u64 base, limit, addr;
245
246 amd64_get_base_and_limit(pvt, node_id, &base, &limit);
247
248 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
249 * all ones if the most significant implemented address bit is 1.
250 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
251 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
252 * Application Programming.
253 */
254 addr = sys_addr & 0x000000ffffffffffull;
255
256 return (addr >= base) && (addr <= limit);
257}
258
259/*
260 * Attempt to map a SysAddr to a node. On success, return a pointer to the
261 * mem_ctl_info structure for the node that the SysAddr maps to.
262 *
263 * On failure, return NULL.
264 */
265static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
266 u64 sys_addr)
267{
268 struct amd64_pvt *pvt;
269 int node_id;
270 u32 intlv_en, bits;
271
272 /*
273 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
274 * 3.4.4.2) registers to map the SysAddr to a node ID.
275 */
276 pvt = mci->pvt_info;
277
278 /*
279 * The value of this field should be the same for all DRAM Base
280 * registers. Therefore we arbitrarily choose to read it from the
281 * register for node 0.
282 */
283 intlv_en = pvt->dram_IntlvEn[0];
284
285 if (intlv_en == 0) {
Borislav Petkov8edc5442009-09-18 12:39:19 +0200286 for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) {
Doug Thompson67757632009-04-27 15:53:22 +0200287 if (amd64_base_limit_match(pvt, sys_addr, node_id))
Borislav Petkov8edc5442009-09-18 12:39:19 +0200288 goto found;
Doug Thompson67757632009-04-27 15:53:22 +0200289 }
Borislav Petkov8edc5442009-09-18 12:39:19 +0200290 goto err_no_match;
Doug Thompson67757632009-04-27 15:53:22 +0200291 }
292
Borislav Petkov72f158f2009-09-18 12:27:27 +0200293 if (unlikely((intlv_en != 0x01) &&
294 (intlv_en != 0x03) &&
295 (intlv_en != 0x07))) {
Borislav Petkov24f9a7f2010-10-07 18:29:15 +0200296 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
Doug Thompson67757632009-04-27 15:53:22 +0200297 return NULL;
298 }
299
300 bits = (((u32) sys_addr) >> 12) & intlv_en;
301
302 for (node_id = 0; ; ) {
Borislav Petkov8edc5442009-09-18 12:39:19 +0200303 if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits)
Doug Thompson67757632009-04-27 15:53:22 +0200304 break; /* intlv_sel field matches */
305
306 if (++node_id >= DRAM_REG_COUNT)
307 goto err_no_match;
308 }
309
310 /* sanity test for sys_addr */
311 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
Borislav Petkov24f9a7f2010-10-07 18:29:15 +0200312 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
313 "range for node %d with node interleaving enabled.\n",
314 __func__, sys_addr, node_id);
Doug Thompson67757632009-04-27 15:53:22 +0200315 return NULL;
316 }
317
318found:
319 return edac_mc_find(node_id);
320
321err_no_match:
322 debugf2("sys_addr 0x%lx doesn't match any node\n",
323 (unsigned long)sys_addr);
324
325 return NULL;
326}
Doug Thompsone2ce7252009-04-27 15:57:12 +0200327
328/*
329 * Extract the DRAM CS base address from selected csrow register.
330 */
331static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow)
332{
333 return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) <<
334 pvt->dcs_shift;
335}
336
337/*
338 * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way.
339 */
340static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow)
341{
342 u64 dcsm_bits, other_bits;
343 u64 mask;
344
345 /* Extract bits from DRAM CS Mask. */
346 dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask;
347
348 other_bits = pvt->dcsm_mask;
349 other_bits = ~(other_bits << pvt->dcs_shift);
350
351 /*
352 * The extracted bits from DCSM belong in the spaces represented by
353 * the cleared bits in other_bits.
354 */
355 mask = (dcsm_bits << pvt->dcs_shift) | other_bits;
356
357 return mask;
358}
359
360/*
361 * @input_addr is an InputAddr associated with the node given by mci. Return the
362 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
363 */
364static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
365{
366 struct amd64_pvt *pvt;
367 int csrow;
368 u64 base, mask;
369
370 pvt = mci->pvt_info;
371
372 /*
373 * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS
374 * base/mask register pair, test the condition shown near the start of
375 * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E).
376 */
Borislav Petkov9d858bb2009-09-21 14:35:51 +0200377 for (csrow = 0; csrow < pvt->cs_count; csrow++) {
Doug Thompsone2ce7252009-04-27 15:57:12 +0200378
379 /* This DRAM chip select is disabled on this node */
380 if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0)
381 continue;
382
383 base = base_from_dct_base(pvt, csrow);
384 mask = ~mask_from_dct_mask(pvt, csrow);
385
386 if ((input_addr & mask) == (base & mask)) {
387 debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
388 (unsigned long)input_addr, csrow,
389 pvt->mc_node_id);
390
391 return csrow;
392 }
393 }
394
395 debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
396 (unsigned long)input_addr, pvt->mc_node_id);
397
398 return -1;
399}
400
401/*
402 * Return the base value defined by the DRAM Base register for the node
403 * represented by mci. This function returns the full 40-bit value despite the
404 * fact that the register only stores bits 39-24 of the value. See section
405 * 3.4.4.1 (BKDG #26094, K8, revA-E)
406 */
407static inline u64 get_dram_base(struct mem_ctl_info *mci)
408{
409 struct amd64_pvt *pvt = mci->pvt_info;
410
411 return pvt->dram_base[pvt->mc_node_id];
412}
413
414/*
415 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
416 * for the node represented by mci. Info is passed back in *hole_base,
417 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
418 * info is invalid. Info may be invalid for either of the following reasons:
419 *
420 * - The revision of the node is not E or greater. In this case, the DRAM Hole
421 * Address Register does not exist.
422 *
423 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
424 * indicating that its contents are not valid.
425 *
426 * The values passed back in *hole_base, *hole_offset, and *hole_size are
427 * complete 32-bit values despite the fact that the bitfields in the DHAR
428 * only represent bits 31-24 of the base and offset values.
429 */
430int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
431 u64 *hole_offset, u64 *hole_size)
432{
433 struct amd64_pvt *pvt = mci->pvt_info;
434 u64 base;
435
436 /* only revE and later have the DRAM Hole Address Register */
Borislav Petkov1433eb92009-10-21 13:44:36 +0200437 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
Doug Thompsone2ce7252009-04-27 15:57:12 +0200438 debugf1(" revision %d for node %d does not support DHAR\n",
439 pvt->ext_model, pvt->mc_node_id);
440 return 1;
441 }
442
443 /* only valid for Fam10h */
444 if (boot_cpu_data.x86 == 0x10 &&
445 (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) {
446 debugf1(" Dram Memory Hoisting is DISABLED on this system\n");
447 return 1;
448 }
449
450 if ((pvt->dhar & DHAR_VALID) == 0) {
451 debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n",
452 pvt->mc_node_id);
453 return 1;
454 }
455
456 /* This node has Memory Hoisting */
457
458 /* +------------------+--------------------+--------------------+-----
459 * | memory | DRAM hole | relocated |
460 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
461 * | | | DRAM hole |
462 * | | | [0x100000000, |
463 * | | | (0x100000000+ |
464 * | | | (0xffffffff-x))] |
465 * +------------------+--------------------+--------------------+-----
466 *
467 * Above is a diagram of physical memory showing the DRAM hole and the
468 * relocated addresses from the DRAM hole. As shown, the DRAM hole
469 * starts at address x (the base address) and extends through address
470 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
471 * addresses in the hole so that they start at 0x100000000.
472 */
473
474 base = dhar_base(pvt->dhar);
475
476 *hole_base = base;
477 *hole_size = (0x1ull << 32) - base;
478
479 if (boot_cpu_data.x86 > 0xf)
480 *hole_offset = f10_dhar_offset(pvt->dhar);
481 else
482 *hole_offset = k8_dhar_offset(pvt->dhar);
483
484 debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
485 pvt->mc_node_id, (unsigned long)*hole_base,
486 (unsigned long)*hole_offset, (unsigned long)*hole_size);
487
488 return 0;
489}
490EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
491
Doug Thompson93c2df52009-05-04 20:46:50 +0200492/*
493 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
494 * assumed that sys_addr maps to the node given by mci.
495 *
496 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
497 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
498 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
499 * then it is also involved in translating a SysAddr to a DramAddr. Sections
500 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
501 * These parts of the documentation are unclear. I interpret them as follows:
502 *
503 * When node n receives a SysAddr, it processes the SysAddr as follows:
504 *
505 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
506 * Limit registers for node n. If the SysAddr is not within the range
507 * specified by the base and limit values, then node n ignores the Sysaddr
508 * (since it does not map to node n). Otherwise continue to step 2 below.
509 *
510 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
511 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
512 * the range of relocated addresses (starting at 0x100000000) from the DRAM
513 * hole. If not, skip to step 3 below. Else get the value of the
514 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
515 * offset defined by this value from the SysAddr.
516 *
517 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
518 * Base register for node n. To obtain the DramAddr, subtract the base
519 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
520 */
521static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
522{
523 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
524 int ret = 0;
525
526 dram_base = get_dram_base(mci);
527
528 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
529 &hole_size);
530 if (!ret) {
531 if ((sys_addr >= (1ull << 32)) &&
532 (sys_addr < ((1ull << 32) + hole_size))) {
533 /* use DHAR to translate SysAddr to DramAddr */
534 dram_addr = sys_addr - hole_offset;
535
536 debugf2("using DHAR to translate SysAddr 0x%lx to "
537 "DramAddr 0x%lx\n",
538 (unsigned long)sys_addr,
539 (unsigned long)dram_addr);
540
541 return dram_addr;
542 }
543 }
544
545 /*
546 * Translate the SysAddr to a DramAddr as shown near the start of
547 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
548 * only deals with 40-bit values. Therefore we discard bits 63-40 of
549 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
550 * discard are all 1s. Otherwise the bits we discard are all 0s. See
551 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
552 * Programmer's Manual Volume 1 Application Programming.
553 */
554 dram_addr = (sys_addr & 0xffffffffffull) - dram_base;
555
556 debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
557 "DramAddr 0x%lx\n", (unsigned long)sys_addr,
558 (unsigned long)dram_addr);
559 return dram_addr;
560}
561
562/*
563 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
564 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
565 * for node interleaving.
566 */
567static int num_node_interleave_bits(unsigned intlv_en)
568{
569 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
570 int n;
571
572 BUG_ON(intlv_en > 7);
573 n = intlv_shift_table[intlv_en];
574 return n;
575}
576
577/* Translate the DramAddr given by @dram_addr to an InputAddr. */
578static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
579{
580 struct amd64_pvt *pvt;
581 int intlv_shift;
582 u64 input_addr;
583
584 pvt = mci->pvt_info;
585
586 /*
587 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
588 * concerning translating a DramAddr to an InputAddr.
589 */
590 intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
591 input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) +
592 (dram_addr & 0xfff);
593
594 debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
595 intlv_shift, (unsigned long)dram_addr,
596 (unsigned long)input_addr);
597
598 return input_addr;
599}
600
601/*
602 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
603 * assumed that @sys_addr maps to the node given by mci.
604 */
605static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
606{
607 u64 input_addr;
608
609 input_addr =
610 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
611
612 debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
613 (unsigned long)sys_addr, (unsigned long)input_addr);
614
615 return input_addr;
616}
617
618
619/*
620 * @input_addr is an InputAddr associated with the node represented by mci.
621 * Translate @input_addr to a DramAddr and return the result.
622 */
623static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
624{
625 struct amd64_pvt *pvt;
626 int node_id, intlv_shift;
627 u64 bits, dram_addr;
628 u32 intlv_sel;
629
630 /*
631 * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
632 * shows how to translate a DramAddr to an InputAddr. Here we reverse
633 * this procedure. When translating from a DramAddr to an InputAddr, the
634 * bits used for node interleaving are discarded. Here we recover these
635 * bits from the IntlvSel field of the DRAM Limit register (section
636 * 3.4.4.2) for the node that input_addr is associated with.
637 */
638 pvt = mci->pvt_info;
639 node_id = pvt->mc_node_id;
640 BUG_ON((node_id < 0) || (node_id > 7));
641
642 intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
643
644 if (intlv_shift == 0) {
645 debugf1(" InputAddr 0x%lx translates to DramAddr of "
646 "same value\n", (unsigned long)input_addr);
647
648 return input_addr;
649 }
650
651 bits = ((input_addr & 0xffffff000ull) << intlv_shift) +
652 (input_addr & 0xfff);
653
654 intlv_sel = pvt->dram_IntlvSel[node_id] & ((1 << intlv_shift) - 1);
655 dram_addr = bits + (intlv_sel << 12);
656
657 debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
658 "(%d node interleave bits)\n", (unsigned long)input_addr,
659 (unsigned long)dram_addr, intlv_shift);
660
661 return dram_addr;
662}
663
664/*
665 * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
666 * @dram_addr to a SysAddr.
667 */
668static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
669{
670 struct amd64_pvt *pvt = mci->pvt_info;
671 u64 hole_base, hole_offset, hole_size, base, limit, sys_addr;
672 int ret = 0;
673
674 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
675 &hole_size);
676 if (!ret) {
677 if ((dram_addr >= hole_base) &&
678 (dram_addr < (hole_base + hole_size))) {
679 sys_addr = dram_addr + hole_offset;
680
681 debugf1("using DHAR to translate DramAddr 0x%lx to "
682 "SysAddr 0x%lx\n", (unsigned long)dram_addr,
683 (unsigned long)sys_addr);
684
685 return sys_addr;
686 }
687 }
688
689 amd64_get_base_and_limit(pvt, pvt->mc_node_id, &base, &limit);
690 sys_addr = dram_addr + base;
691
692 /*
693 * The sys_addr we have computed up to this point is a 40-bit value
694 * because the k8 deals with 40-bit values. However, the value we are
695 * supposed to return is a full 64-bit physical address. The AMD
696 * x86-64 architecture specifies that the most significant implemented
697 * address bit through bit 63 of a physical address must be either all
698 * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a
699 * 64-bit value below. See section 3.4.2 of AMD publication 24592:
700 * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
701 * Programming.
702 */
703 sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
704
705 debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
706 pvt->mc_node_id, (unsigned long)dram_addr,
707 (unsigned long)sys_addr);
708
709 return sys_addr;
710}
711
712/*
713 * @input_addr is an InputAddr associated with the node given by mci. Translate
714 * @input_addr to a SysAddr.
715 */
716static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
717 u64 input_addr)
718{
719 return dram_addr_to_sys_addr(mci,
720 input_addr_to_dram_addr(mci, input_addr));
721}
722
723/*
724 * Find the minimum and maximum InputAddr values that map to the given @csrow.
725 * Pass back these values in *input_addr_min and *input_addr_max.
726 */
727static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
728 u64 *input_addr_min, u64 *input_addr_max)
729{
730 struct amd64_pvt *pvt;
731 u64 base, mask;
732
733 pvt = mci->pvt_info;
Borislav Petkov9d858bb2009-09-21 14:35:51 +0200734 BUG_ON((csrow < 0) || (csrow >= pvt->cs_count));
Doug Thompson93c2df52009-05-04 20:46:50 +0200735
736 base = base_from_dct_base(pvt, csrow);
737 mask = mask_from_dct_mask(pvt, csrow);
738
739 *input_addr_min = base & ~mask;
740 *input_addr_max = base | mask | pvt->dcs_mask_notused;
741}
742
Doug Thompson93c2df52009-05-04 20:46:50 +0200743/* Map the Error address to a PAGE and PAGE OFFSET. */
744static inline void error_address_to_page_and_offset(u64 error_address,
745 u32 *page, u32 *offset)
746{
747 *page = (u32) (error_address >> PAGE_SHIFT);
748 *offset = ((u32) error_address) & ~PAGE_MASK;
749}
750
751/*
752 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
753 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
754 * of a node that detected an ECC memory error. mci represents the node that
755 * the error address maps to (possibly different from the node that detected
756 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
757 * error.
758 */
759static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
760{
761 int csrow;
762
763 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
764
765 if (csrow == -1)
Borislav Petkov24f9a7f2010-10-07 18:29:15 +0200766 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
767 "address 0x%lx\n", (unsigned long)sys_addr);
Doug Thompson93c2df52009-05-04 20:46:50 +0200768 return csrow;
769}
Doug Thompsone2ce7252009-04-27 15:57:12 +0200770
Borislav Petkovbfc04ae2009-11-12 19:05:07 +0100771static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
Doug Thompson2da11652009-04-27 16:09:09 +0200772
Borislav Petkovad6a32e2010-03-09 12:46:00 +0100773static u16 extract_syndrome(struct err_regs *err)
774{
775 return ((err->nbsh >> 15) & 0xff) | ((err->nbsl >> 16) & 0xff00);
776}
777
Doug Thompson2da11652009-04-27 16:09:09 +0200778/*
779 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
780 * are ECC capable.
781 */
782static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
783{
784 int bit;
Borislav Petkov584fcff2009-06-10 18:29:54 +0200785 enum dev_type edac_cap = EDAC_FLAG_NONE;
Doug Thompson2da11652009-04-27 16:09:09 +0200786
Borislav Petkov1433eb92009-10-21 13:44:36 +0200787 bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
Doug Thompson2da11652009-04-27 16:09:09 +0200788 ? 19
789 : 17;
790
Borislav Petkov584fcff2009-06-10 18:29:54 +0200791 if (pvt->dclr0 & BIT(bit))
Doug Thompson2da11652009-04-27 16:09:09 +0200792 edac_cap = EDAC_FLAG_SECDED;
793
794 return edac_cap;
795}
796
797
Borislav Petkov8566c4d2009-10-16 13:48:28 +0200798static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt);
Doug Thompson2da11652009-04-27 16:09:09 +0200799
Borislav Petkov68798e12009-11-03 16:18:33 +0100800static void amd64_dump_dramcfg_low(u32 dclr, int chan)
801{
802 debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
803
804 debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
805 (dclr & BIT(16)) ? "un" : "",
806 (dclr & BIT(19)) ? "yes" : "no");
807
808 debugf1(" PAR/ERR parity: %s\n",
809 (dclr & BIT(8)) ? "enabled" : "disabled");
810
811 debugf1(" DCT 128bit mode width: %s\n",
812 (dclr & BIT(11)) ? "128b" : "64b");
813
814 debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
815 (dclr & BIT(12)) ? "yes" : "no",
816 (dclr & BIT(13)) ? "yes" : "no",
817 (dclr & BIT(14)) ? "yes" : "no",
818 (dclr & BIT(15)) ? "yes" : "no");
819}
820
Doug Thompson2da11652009-04-27 16:09:09 +0200821/* Display and decode various NB registers for debug purposes. */
822static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
823{
824 int ganged;
825
Borislav Petkov68798e12009-11-03 16:18:33 +0100826 debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
Doug Thompson2da11652009-04-27 16:09:09 +0200827
Borislav Petkov68798e12009-11-03 16:18:33 +0100828 debugf1(" NB two channel DRAM capable: %s\n",
829 (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no");
830
831 debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n",
832 (pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no",
833 (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no");
834
835 amd64_dump_dramcfg_low(pvt->dclr0, 0);
Doug Thompson2da11652009-04-27 16:09:09 +0200836
Borislav Petkov8de1d912009-10-16 13:39:30 +0200837 debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
Doug Thompson2da11652009-04-27 16:09:09 +0200838
Borislav Petkov8de1d912009-10-16 13:39:30 +0200839 debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, "
840 "offset: 0x%08x\n",
841 pvt->dhar,
842 dhar_base(pvt->dhar),
843 (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt->dhar)
844 : f10_dhar_offset(pvt->dhar));
Doug Thompson2da11652009-04-27 16:09:09 +0200845
Borislav Petkov8de1d912009-10-16 13:39:30 +0200846 debugf1(" DramHoleValid: %s\n",
847 (pvt->dhar & DHAR_VALID) ? "yes" : "no");
Doug Thompson2da11652009-04-27 16:09:09 +0200848
Borislav Petkov8de1d912009-10-16 13:39:30 +0200849 /* everything below this point is Fam10h and above */
Borislav Petkov8566c4d2009-10-16 13:48:28 +0200850 if (boot_cpu_data.x86 == 0xf) {
851 amd64_debug_display_dimm_sizes(0, pvt);
Doug Thompson2da11652009-04-27 16:09:09 +0200852 return;
Borislav Petkov8566c4d2009-10-16 13:48:28 +0200853 }
Doug Thompson2da11652009-04-27 16:09:09 +0200854
Borislav Petkov24f9a7f2010-10-07 18:29:15 +0200855 amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4"));
Borislav Petkovad6a32e2010-03-09 12:46:00 +0100856
Borislav Petkov8de1d912009-10-16 13:39:30 +0200857 /* Only if NOT ganged does dclr1 have valid info */
Borislav Petkov68798e12009-11-03 16:18:33 +0100858 if (!dct_ganging_enabled(pvt))
859 amd64_dump_dramcfg_low(pvt->dclr1, 1);
Doug Thompson2da11652009-04-27 16:09:09 +0200860
861 /*
862 * Determine if ganged and then dump memory sizes for first controller,
863 * and if NOT ganged dump info for 2nd controller.
864 */
865 ganged = dct_ganging_enabled(pvt);
866
Borislav Petkov8566c4d2009-10-16 13:48:28 +0200867 amd64_debug_display_dimm_sizes(0, pvt);
Doug Thompson2da11652009-04-27 16:09:09 +0200868
869 if (!ganged)
Borislav Petkov8566c4d2009-10-16 13:48:28 +0200870 amd64_debug_display_dimm_sizes(1, pvt);
Doug Thompson2da11652009-04-27 16:09:09 +0200871}
872
873/* Read in both of DBAM registers */
874static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
875{
Borislav Petkov8d5b5d92010-10-01 20:11:07 +0200876 amd64_read_pci_cfg(pvt->F2, DBAM0, &pvt->dbam0);
Doug Thompson2da11652009-04-27 16:09:09 +0200877
Borislav Petkov6ba5dcd2009-10-13 19:26:55 +0200878 if (boot_cpu_data.x86 >= 0x10)
Borislav Petkov8d5b5d92010-10-01 20:11:07 +0200879 amd64_read_pci_cfg(pvt->F2, DBAM1, &pvt->dbam1);
Doug Thompson2da11652009-04-27 16:09:09 +0200880}
881
Doug Thompson94be4bf2009-04-27 16:12:00 +0200882/*
883 * NOTE: CPU Revision Dependent code: Rev E and Rev F
884 *
885 * Set the DCSB and DCSM mask values depending on the CPU revision value. Also
886 * set the shift factor for the DCSB and DCSM values.
887 *
888 * ->dcs_mask_notused, RevE:
889 *
890 * To find the max InputAddr for the csrow, start with the base address and set
891 * all bits that are "don't care" bits in the test at the start of section
892 * 3.5.4 (p. 84).
893 *
894 * The "don't care" bits are all set bits in the mask and all bits in the gaps
895 * between bit ranges [35:25] and [19:13]. The value REV_E_DCS_NOTUSED_BITS
896 * represents bits [24:20] and [12:0], which are all bits in the above-mentioned
897 * gaps.
898 *
899 * ->dcs_mask_notused, RevF and later:
900 *
901 * To find the max InputAddr for the csrow, start with the base address and set
902 * all bits that are "don't care" bits in the test at the start of NPT section
903 * 4.5.4 (p. 87).
904 *
905 * The "don't care" bits are all set bits in the mask and all bits in the gaps
906 * between bit ranges [36:27] and [21:13].
907 *
908 * The value REV_F_F1Xh_DCS_NOTUSED_BITS represents bits [26:22] and [12:0],
909 * which are all bits in the above-mentioned gaps.
910 */
911static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
912{
Borislav Petkov9d858bb2009-09-21 14:35:51 +0200913
Borislav Petkov1433eb92009-10-21 13:44:36 +0200914 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
Borislav Petkov9d858bb2009-09-21 14:35:51 +0200915 pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
916 pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
917 pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
918 pvt->dcs_shift = REV_E_DCS_SHIFT;
919 pvt->cs_count = 8;
920 pvt->num_dcsm = 8;
921 } else {
Doug Thompson94be4bf2009-04-27 16:12:00 +0200922 pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS;
923 pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS;
924 pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS;
925 pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT;
Borislav Petkov3ab0e7d2010-10-01 18:19:06 +0200926 pvt->cs_count = 8;
927 pvt->num_dcsm = 4;
Doug Thompson94be4bf2009-04-27 16:12:00 +0200928 }
929}
930
931/*
932 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers
933 */
934static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
935{
Borislav Petkov6ba5dcd2009-10-13 19:26:55 +0200936 int cs, reg;
Doug Thompson94be4bf2009-04-27 16:12:00 +0200937
938 amd64_set_dct_base_and_mask(pvt);
939
Borislav Petkov9d858bb2009-09-21 14:35:51 +0200940 for (cs = 0; cs < pvt->cs_count; cs++) {
Doug Thompson94be4bf2009-04-27 16:12:00 +0200941 reg = K8_DCSB0 + (cs * 4);
Borislav Petkov8d5b5d92010-10-01 20:11:07 +0200942 if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsb0[cs]))
Doug Thompson94be4bf2009-04-27 16:12:00 +0200943 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
944 cs, pvt->dcsb0[cs], reg);
945
946 /* If DCT are NOT ganged, then read in DCT1's base */
947 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
948 reg = F10_DCSB1 + (cs * 4);
Borislav Petkov8d5b5d92010-10-01 20:11:07 +0200949 if (!amd64_read_pci_cfg(pvt->F2, reg,
Borislav Petkov6ba5dcd2009-10-13 19:26:55 +0200950 &pvt->dcsb1[cs]))
Doug Thompson94be4bf2009-04-27 16:12:00 +0200951 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
952 cs, pvt->dcsb1[cs], reg);
953 } else {
954 pvt->dcsb1[cs] = 0;
955 }
956 }
957
958 for (cs = 0; cs < pvt->num_dcsm; cs++) {
Wan Wei4afcd2d2009-07-27 14:34:15 +0200959 reg = K8_DCSM0 + (cs * 4);
Borislav Petkov8d5b5d92010-10-01 20:11:07 +0200960 if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsm0[cs]))
Doug Thompson94be4bf2009-04-27 16:12:00 +0200961 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
962 cs, pvt->dcsm0[cs], reg);
963
964 /* If DCT are NOT ganged, then read in DCT1's mask */
965 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
966 reg = F10_DCSM1 + (cs * 4);
Borislav Petkov8d5b5d92010-10-01 20:11:07 +0200967 if (!amd64_read_pci_cfg(pvt->F2, reg,
Borislav Petkov6ba5dcd2009-10-13 19:26:55 +0200968 &pvt->dcsm1[cs]))
Doug Thompson94be4bf2009-04-27 16:12:00 +0200969 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
970 cs, pvt->dcsm1[cs], reg);
Borislav Petkov6ba5dcd2009-10-13 19:26:55 +0200971 } else {
Doug Thompson94be4bf2009-04-27 16:12:00 +0200972 pvt->dcsm1[cs] = 0;
Borislav Petkov6ba5dcd2009-10-13 19:26:55 +0200973 }
Doug Thompson94be4bf2009-04-27 16:12:00 +0200974 }
975}
976
Borislav Petkov24f9a7f2010-10-07 18:29:15 +0200977static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
Doug Thompson94be4bf2009-04-27 16:12:00 +0200978{
979 enum mem_type type;
980
Borislav Petkov1433eb92009-10-21 13:44:36 +0200981 if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= K8_REV_F) {
Borislav Petkov6b4c0bd2009-11-12 15:37:57 +0100982 if (pvt->dchr0 & DDR3_MODE)
983 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
984 else
985 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
Doug Thompson94be4bf2009-04-27 16:12:00 +0200986 } else {
Doug Thompson94be4bf2009-04-27 16:12:00 +0200987 type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
988 }
989
Borislav Petkov24f9a7f2010-10-07 18:29:15 +0200990 amd64_info("CS%d: %s\n", cs, edac_mem_types[type]);
Doug Thompson94be4bf2009-04-27 16:12:00 +0200991
992 return type;
993}
994
Doug Thompsonddff8762009-04-27 16:14:52 +0200995/*
996 * Read the DRAM Configuration Low register. It differs between CG, D & E revs
997 * and the later RevF memory controllers (DDR vs DDR2)
998 *
999 * Return:
1000 * number of memory channels in operation
1001 * Pass back:
1002 * contents of the DCL0_LOW register
1003 */
1004static int k8_early_channel_count(struct amd64_pvt *pvt)
1005{
1006 int flag, err = 0;
1007
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02001008 err = amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0);
Doug Thompsonddff8762009-04-27 16:14:52 +02001009 if (err)
1010 return err;
1011
Borislav Petkov9f56da02010-10-01 19:44:53 +02001012 if (pvt->ext_model >= K8_REV_F)
Doug Thompsonddff8762009-04-27 16:14:52 +02001013 /* RevF (NPT) and later */
1014 flag = pvt->dclr0 & F10_WIDTH_128;
Borislav Petkov9f56da02010-10-01 19:44:53 +02001015 else
Doug Thompsonddff8762009-04-27 16:14:52 +02001016 /* RevE and earlier */
1017 flag = pvt->dclr0 & REVE_WIDTH_128;
Doug Thompsonddff8762009-04-27 16:14:52 +02001018
1019 /* not used */
1020 pvt->dclr1 = 0;
1021
1022 return (flag) ? 2 : 1;
1023}
1024
1025/* extract the ERROR ADDRESS for the K8 CPUs */
1026static u64 k8_get_error_address(struct mem_ctl_info *mci,
Borislav Petkovef44cc42009-07-23 14:45:48 +02001027 struct err_regs *info)
Doug Thompsonddff8762009-04-27 16:14:52 +02001028{
1029 return (((u64) (info->nbeah & 0xff)) << 32) +
1030 (info->nbeal & ~0x03);
1031}
1032
1033/*
1034 * Read the Base and Limit registers for K8 based Memory controllers; extract
1035 * fields from the 'raw' reg into separate data fields
1036 *
1037 * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN
1038 */
1039static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1040{
1041 u32 low;
1042 u32 off = dram << 3; /* 8 bytes between DRAM entries */
Doug Thompsonddff8762009-04-27 16:14:52 +02001043
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02001044 amd64_read_pci_cfg(pvt->F1, K8_DRAM_BASE_LOW + off, &low);
Doug Thompsonddff8762009-04-27 16:14:52 +02001045
1046 /* Extract parts into separate data entries */
Borislav Petkov49978112009-10-12 17:23:03 +02001047 pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8;
Doug Thompsonddff8762009-04-27 16:14:52 +02001048 pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
1049 pvt->dram_rw_en[dram] = (low & 0x3);
1050
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02001051 amd64_read_pci_cfg(pvt->F1, K8_DRAM_LIMIT_LOW + off, &low);
Doug Thompsonddff8762009-04-27 16:14:52 +02001052
1053 /*
1054 * Extract parts into separate data entries. Limit is the HIGHEST memory
1055 * location of the region, so lower 24 bits need to be all ones
1056 */
Borislav Petkov49978112009-10-12 17:23:03 +02001057 pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF;
Doug Thompsonddff8762009-04-27 16:14:52 +02001058 pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7;
1059 pvt->dram_DstNode[dram] = (low & 0x7);
1060}
1061
1062static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
Borislav Petkovad6a32e2010-03-09 12:46:00 +01001063 struct err_regs *err_info, u64 sys_addr)
Doug Thompsonddff8762009-04-27 16:14:52 +02001064{
1065 struct mem_ctl_info *src_mci;
Doug Thompsonddff8762009-04-27 16:14:52 +02001066 int channel, csrow;
1067 u32 page, offset;
Borislav Petkovad6a32e2010-03-09 12:46:00 +01001068 u16 syndrome;
Doug Thompsonddff8762009-04-27 16:14:52 +02001069
Borislav Petkovad6a32e2010-03-09 12:46:00 +01001070 syndrome = extract_syndrome(err_info);
Doug Thompsonddff8762009-04-27 16:14:52 +02001071
1072 /* CHIPKILL enabled */
Borislav Petkovad6a32e2010-03-09 12:46:00 +01001073 if (err_info->nbcfg & K8_NBCFG_CHIPKILL) {
Borislav Petkovbfc04ae2009-11-12 19:05:07 +01001074 channel = get_channel_from_ecc_syndrome(mci, syndrome);
Doug Thompsonddff8762009-04-27 16:14:52 +02001075 if (channel < 0) {
1076 /*
1077 * Syndrome didn't map, so we don't know which of the
1078 * 2 DIMMs is in error. So we need to ID 'both' of them
1079 * as suspect.
1080 */
Borislav Petkov24f9a7f2010-10-07 18:29:15 +02001081 amd64_mc_warn(mci, "unknown syndrome 0x%04x - possible "
1082 "error reporting race\n", syndrome);
Doug Thompsonddff8762009-04-27 16:14:52 +02001083 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1084 return;
1085 }
1086 } else {
1087 /*
1088 * non-chipkill ecc mode
1089 *
1090 * The k8 documentation is unclear about how to determine the
1091 * channel number when using non-chipkill memory. This method
1092 * was obtained from email communication with someone at AMD.
1093 * (Wish the email was placed in this comment - norsk)
1094 */
Borislav Petkov44e9e2e2009-10-26 15:00:19 +01001095 channel = ((sys_addr & BIT(3)) != 0);
Doug Thompsonddff8762009-04-27 16:14:52 +02001096 }
1097
1098 /*
1099 * Find out which node the error address belongs to. This may be
1100 * different from the node that detected the error.
1101 */
Borislav Petkov44e9e2e2009-10-26 15:00:19 +01001102 src_mci = find_mc_by_sys_addr(mci, sys_addr);
Keith Mannthey2cff18c2009-09-18 14:35:23 +02001103 if (!src_mci) {
Borislav Petkov24f9a7f2010-10-07 18:29:15 +02001104 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
Borislav Petkov44e9e2e2009-10-26 15:00:19 +01001105 (unsigned long)sys_addr);
Doug Thompsonddff8762009-04-27 16:14:52 +02001106 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1107 return;
1108 }
1109
Borislav Petkov44e9e2e2009-10-26 15:00:19 +01001110 /* Now map the sys_addr to a CSROW */
1111 csrow = sys_addr_to_csrow(src_mci, sys_addr);
Doug Thompsonddff8762009-04-27 16:14:52 +02001112 if (csrow < 0) {
1113 edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
1114 } else {
Borislav Petkov44e9e2e2009-10-26 15:00:19 +01001115 error_address_to_page_and_offset(sys_addr, &page, &offset);
Doug Thompsonddff8762009-04-27 16:14:52 +02001116
1117 edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
1118 channel, EDAC_MOD_STR);
1119 }
1120}
1121
Borislav Petkov1433eb92009-10-21 13:44:36 +02001122static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
Doug Thompsonddff8762009-04-27 16:14:52 +02001123{
Borislav Petkov1433eb92009-10-21 13:44:36 +02001124 int *dbam_map;
Doug Thompsonddff8762009-04-27 16:14:52 +02001125
Borislav Petkov1433eb92009-10-21 13:44:36 +02001126 if (pvt->ext_model >= K8_REV_F)
1127 dbam_map = ddr2_dbam;
1128 else if (pvt->ext_model >= K8_REV_D)
1129 dbam_map = ddr2_dbam_revD;
1130 else
1131 dbam_map = ddr2_dbam_revCG;
Doug Thompsonddff8762009-04-27 16:14:52 +02001132
Borislav Petkov1433eb92009-10-21 13:44:36 +02001133 return dbam_map[cs_mode];
Doug Thompsonddff8762009-04-27 16:14:52 +02001134}
1135
Doug Thompson1afd3c92009-04-27 16:16:50 +02001136/*
1137 * Get the number of DCT channels in use.
1138 *
1139 * Return:
1140 * number of Memory Channels in operation
1141 * Pass back:
1142 * contents of the DCL0_LOW register
1143 */
1144static int f10_early_channel_count(struct amd64_pvt *pvt)
1145{
Wan Wei57a30852009-08-07 17:04:49 +02001146 int dbams[] = { DBAM0, DBAM1 };
Borislav Petkov6ba5dcd2009-10-13 19:26:55 +02001147 int i, j, channels = 0;
Doug Thompson1afd3c92009-04-27 16:16:50 +02001148 u32 dbam;
Doug Thompsonddff8762009-04-27 16:14:52 +02001149
Doug Thompson1afd3c92009-04-27 16:16:50 +02001150 /* If we are in 128 bit mode, then we are using 2 channels */
1151 if (pvt->dclr0 & F10_WIDTH_128) {
Doug Thompson1afd3c92009-04-27 16:16:50 +02001152 channels = 2;
1153 return channels;
1154 }
1155
1156 /*
Borislav Petkovd16149e2009-10-16 19:55:49 +02001157 * Need to check if in unganged mode: In such, there are 2 channels,
1158 * but they are not in 128 bit mode and thus the above 'dclr0' status
1159 * bit will be OFF.
Doug Thompson1afd3c92009-04-27 16:16:50 +02001160 *
1161 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1162 * their CSEnable bit on. If so, then SINGLE DIMM case.
1163 */
Borislav Petkovd16149e2009-10-16 19:55:49 +02001164 debugf0("Data width is not 128 bits - need more decoding\n");
Doug Thompson1afd3c92009-04-27 16:16:50 +02001165
1166 /*
1167 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1168 * is more than just one DIMM present in unganged mode. Need to check
1169 * both controllers since DIMMs can be placed in either one.
1170 */
Wan Wei57a30852009-08-07 17:04:49 +02001171 for (i = 0; i < ARRAY_SIZE(dbams); i++) {
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02001172 if (amd64_read_pci_cfg(pvt->F2, dbams[i], &dbam))
Doug Thompson1afd3c92009-04-27 16:16:50 +02001173 goto err_reg;
1174
Wan Wei57a30852009-08-07 17:04:49 +02001175 for (j = 0; j < 4; j++) {
1176 if (DBAM_DIMM(j, dbam) > 0) {
1177 channels++;
1178 break;
1179 }
1180 }
Doug Thompson1afd3c92009-04-27 16:16:50 +02001181 }
1182
Borislav Petkovd16149e2009-10-16 19:55:49 +02001183 if (channels > 2)
1184 channels = 2;
1185
Borislav Petkov24f9a7f2010-10-07 18:29:15 +02001186 amd64_info("MCT channel count: %d\n", channels);
Doug Thompson1afd3c92009-04-27 16:16:50 +02001187
1188 return channels;
1189
1190err_reg:
1191 return -1;
1192
1193}
1194
Borislav Petkov1433eb92009-10-21 13:44:36 +02001195static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
Doug Thompson1afd3c92009-04-27 16:16:50 +02001196{
Borislav Petkov1433eb92009-10-21 13:44:36 +02001197 int *dbam_map;
1198
1199 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1200 dbam_map = ddr3_dbam;
1201 else
1202 dbam_map = ddr2_dbam;
1203
1204 return dbam_map[cs_mode];
Doug Thompson1afd3c92009-04-27 16:16:50 +02001205}
1206
Doug Thompson1afd3c92009-04-27 16:16:50 +02001207static u64 f10_get_error_address(struct mem_ctl_info *mci,
Borislav Petkovef44cc42009-07-23 14:45:48 +02001208 struct err_regs *info)
Doug Thompson1afd3c92009-04-27 16:16:50 +02001209{
1210 return (((u64) (info->nbeah & 0xffff)) << 32) +
1211 (info->nbeal & ~0x01);
1212}
1213
1214/*
1215 * Read the Base and Limit registers for F10 based Memory controllers. Extract
1216 * fields from the 'raw' reg into separate data fields.
1217 *
1218 * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN.
1219 */
1220static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1221{
1222 u32 high_offset, low_offset, high_base, low_base, high_limit, low_limit;
1223
1224 low_offset = K8_DRAM_BASE_LOW + (dram << 3);
1225 high_offset = F10_DRAM_BASE_HIGH + (dram << 3);
1226
1227 /* read the 'raw' DRAM BASE Address register */
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02001228 amd64_read_pci_cfg(pvt->F1, low_offset, &low_base);
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02001229 amd64_read_pci_cfg(pvt->F1, high_offset, &high_base);
Doug Thompson1afd3c92009-04-27 16:16:50 +02001230
1231 /* Extract parts into separate data entries */
1232 pvt->dram_rw_en[dram] = (low_base & 0x3);
1233
1234 if (pvt->dram_rw_en[dram] == 0)
1235 return;
1236
1237 pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7;
1238
Borislav Petkov66216a72009-09-22 16:48:37 +02001239 pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) |
Borislav Petkov49978112009-10-12 17:23:03 +02001240 (((u64)low_base & 0xFFFF0000) << 8);
Doug Thompson1afd3c92009-04-27 16:16:50 +02001241
1242 low_offset = K8_DRAM_LIMIT_LOW + (dram << 3);
1243 high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
1244
1245 /* read the 'raw' LIMIT registers */
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02001246 amd64_read_pci_cfg(pvt->F1, low_offset, &low_limit);
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02001247 amd64_read_pci_cfg(pvt->F1, high_offset, &high_limit);
Doug Thompson1afd3c92009-04-27 16:16:50 +02001248
Doug Thompson1afd3c92009-04-27 16:16:50 +02001249 pvt->dram_DstNode[dram] = (low_limit & 0x7);
1250 pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7;
1251
1252 /*
1253 * Extract address values and form a LIMIT address. Limit is the HIGHEST
1254 * memory location of the region, so low 24 bits need to be all ones.
1255 */
Borislav Petkov66216a72009-09-22 16:48:37 +02001256 pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) |
Borislav Petkov49978112009-10-12 17:23:03 +02001257 (((u64) low_limit & 0xFFFF0000) << 8) |
Borislav Petkov66216a72009-09-22 16:48:37 +02001258 0x00FFFFFF;
Doug Thompson1afd3c92009-04-27 16:16:50 +02001259}
Doug Thompson6163b5d2009-04-27 16:20:17 +02001260
1261static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
1262{
Doug Thompson6163b5d2009-04-27 16:20:17 +02001263
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02001264 if (!amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_LOW,
Borislav Petkov6ba5dcd2009-10-13 19:26:55 +02001265 &pvt->dram_ctl_select_low)) {
Borislav Petkov72381bd2009-10-09 19:14:43 +02001266 debugf0("F2x110 (DCTL Sel. Low): 0x%08x, "
1267 "High range addresses at: 0x%x\n",
1268 pvt->dram_ctl_select_low,
1269 dct_sel_baseaddr(pvt));
Doug Thompson6163b5d2009-04-27 16:20:17 +02001270
Borislav Petkov72381bd2009-10-09 19:14:43 +02001271 debugf0(" DCT mode: %s, All DCTs on: %s\n",
1272 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"),
1273 (dct_dram_enabled(pvt) ? "yes" : "no"));
Doug Thompson6163b5d2009-04-27 16:20:17 +02001274
Borislav Petkov72381bd2009-10-09 19:14:43 +02001275 if (!dct_ganging_enabled(pvt))
1276 debugf0(" Address range split per DCT: %s\n",
1277 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1278
1279 debugf0(" DCT data interleave for ECC: %s, "
1280 "DRAM cleared since last warm reset: %s\n",
1281 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1282 (dct_memory_cleared(pvt) ? "yes" : "no"));
1283
1284 debugf0(" DCT channel interleave: %s, "
1285 "DCT interleave bits selector: 0x%x\n",
1286 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
Doug Thompson6163b5d2009-04-27 16:20:17 +02001287 dct_sel_interleave_addr(pvt));
1288 }
1289
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02001290 amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_HIGH,
Borislav Petkov6ba5dcd2009-10-13 19:26:55 +02001291 &pvt->dram_ctl_select_high);
Doug Thompson6163b5d2009-04-27 16:20:17 +02001292}
1293
Doug Thompsonf71d0a02009-04-27 16:22:43 +02001294/*
1295 * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1296 * Interleaving Modes.
1297 */
Doug Thompson6163b5d2009-04-27 16:20:17 +02001298static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1299 int hi_range_sel, u32 intlv_en)
1300{
1301 u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1;
1302
1303 if (dct_ganging_enabled(pvt))
1304 cs = 0;
1305 else if (hi_range_sel)
1306 cs = dct_sel_high;
1307 else if (dct_interleave_enabled(pvt)) {
Doug Thompsonf71d0a02009-04-27 16:22:43 +02001308 /*
1309 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1310 */
Doug Thompson6163b5d2009-04-27 16:20:17 +02001311 if (dct_sel_interleave_addr(pvt) == 0)
1312 cs = sys_addr >> 6 & 1;
1313 else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) {
1314 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
1315
1316 if (dct_sel_interleave_addr(pvt) & 1)
1317 cs = (sys_addr >> 9 & 1) ^ temp;
1318 else
1319 cs = (sys_addr >> 6 & 1) ^ temp;
1320 } else if (intlv_en & 4)
1321 cs = sys_addr >> 15 & 1;
1322 else if (intlv_en & 2)
1323 cs = sys_addr >> 14 & 1;
1324 else if (intlv_en & 1)
1325 cs = sys_addr >> 13 & 1;
1326 else
1327 cs = sys_addr >> 12 & 1;
1328 } else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt))
1329 cs = ~dct_sel_high & 1;
1330 else
1331 cs = 0;
1332
1333 return cs;
1334}
1335
1336static inline u32 f10_map_intlv_en_to_shift(u32 intlv_en)
1337{
1338 if (intlv_en == 1)
1339 return 1;
1340 else if (intlv_en == 3)
1341 return 2;
1342 else if (intlv_en == 7)
1343 return 3;
1344
1345 return 0;
1346}
1347
Doug Thompsonf71d0a02009-04-27 16:22:43 +02001348/* See F10h BKDG, 2.8.10.2 DctSelBaseOffset Programming */
1349static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel,
Doug Thompson6163b5d2009-04-27 16:20:17 +02001350 u32 dct_sel_base_addr,
1351 u64 dct_sel_base_off,
Doug Thompsonf71d0a02009-04-27 16:22:43 +02001352 u32 hole_valid, u32 hole_off,
Doug Thompson6163b5d2009-04-27 16:20:17 +02001353 u64 dram_base)
1354{
1355 u64 chan_off;
1356
1357 if (hi_range_sel) {
Borislav Petkov9975a5f2010-03-08 18:29:35 +01001358 if (!(dct_sel_base_addr & 0xFFFF0000) &&
Doug Thompsonf71d0a02009-04-27 16:22:43 +02001359 hole_valid && (sys_addr >= 0x100000000ULL))
Doug Thompson6163b5d2009-04-27 16:20:17 +02001360 chan_off = hole_off << 16;
1361 else
1362 chan_off = dct_sel_base_off;
1363 } else {
Doug Thompsonf71d0a02009-04-27 16:22:43 +02001364 if (hole_valid && (sys_addr >= 0x100000000ULL))
Doug Thompson6163b5d2009-04-27 16:20:17 +02001365 chan_off = hole_off << 16;
1366 else
1367 chan_off = dram_base & 0xFFFFF8000000ULL;
1368 }
1369
1370 return (sys_addr & 0x0000FFFFFFFFFFC0ULL) -
1371 (chan_off & 0x0000FFFFFF800000ULL);
1372}
1373
1374/* Hack for the time being - Can we get this from BIOS?? */
1375#define CH0SPARE_RANK 0
1376#define CH1SPARE_RANK 1
1377
1378/*
1379 * checks if the csrow passed in is marked as SPARED, if so returns the new
1380 * spare row
1381 */
1382static inline int f10_process_possible_spare(int csrow,
1383 u32 cs, struct amd64_pvt *pvt)
1384{
1385 u32 swap_done;
1386 u32 bad_dram_cs;
1387
1388 /* Depending on channel, isolate respective SPARING info */
1389 if (cs) {
1390 swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare);
1391 bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare);
1392 if (swap_done && (csrow == bad_dram_cs))
1393 csrow = CH1SPARE_RANK;
1394 } else {
1395 swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare);
1396 bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare);
1397 if (swap_done && (csrow == bad_dram_cs))
1398 csrow = CH0SPARE_RANK;
1399 }
1400 return csrow;
1401}
1402
1403/*
1404 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1405 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1406 *
1407 * Return:
1408 * -EINVAL: NOT FOUND
1409 * 0..csrow = Chip-Select Row
1410 */
1411static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
1412{
1413 struct mem_ctl_info *mci;
1414 struct amd64_pvt *pvt;
1415 u32 cs_base, cs_mask;
1416 int cs_found = -EINVAL;
1417 int csrow;
1418
Borislav Petkovcc4d8862010-10-13 16:11:59 +02001419 mci = mcis[nid];
Doug Thompson6163b5d2009-04-27 16:20:17 +02001420 if (!mci)
1421 return cs_found;
1422
1423 pvt = mci->pvt_info;
1424
1425 debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs);
1426
Borislav Petkov9d858bb2009-09-21 14:35:51 +02001427 for (csrow = 0; csrow < pvt->cs_count; csrow++) {
Doug Thompson6163b5d2009-04-27 16:20:17 +02001428
1429 cs_base = amd64_get_dct_base(pvt, cs, csrow);
1430 if (!(cs_base & K8_DCSB_CS_ENABLE))
1431 continue;
1432
1433 /*
1434 * We have an ENABLED CSROW, Isolate just the MASK bits of the
1435 * target: [28:19] and [13:5], which map to [36:27] and [21:13]
1436 * of the actual address.
1437 */
1438 cs_base &= REV_F_F1Xh_DCSB_BASE_BITS;
1439
1440 /*
1441 * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and
1442 * [4:0] to become ON. Then mask off bits [28:0] ([36:8])
1443 */
1444 cs_mask = amd64_get_dct_mask(pvt, cs, csrow);
1445
1446 debugf1(" CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n",
1447 csrow, cs_base, cs_mask);
1448
1449 cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF;
1450
1451 debugf1(" Final CSMask=0x%x\n", cs_mask);
1452 debugf1(" (InputAddr & ~CSMask)=0x%x "
1453 "(CSBase & ~CSMask)=0x%x\n",
1454 (in_addr & ~cs_mask), (cs_base & ~cs_mask));
1455
1456 if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) {
1457 cs_found = f10_process_possible_spare(csrow, cs, pvt);
1458
1459 debugf1(" MATCH csrow=%d\n", cs_found);
1460 break;
1461 }
1462 }
1463 return cs_found;
1464}
1465
Doug Thompsonf71d0a02009-04-27 16:22:43 +02001466/* For a given @dram_range, check if @sys_addr falls within it. */
1467static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
1468 u64 sys_addr, int *nid, int *chan_sel)
1469{
1470 int node_id, cs_found = -EINVAL, high_range = 0;
1471 u32 intlv_en, intlv_sel, intlv_shift, hole_off;
1472 u32 hole_valid, tmp, dct_sel_base, channel;
1473 u64 dram_base, chan_addr, dct_sel_base_off;
1474
1475 dram_base = pvt->dram_base[dram_range];
1476 intlv_en = pvt->dram_IntlvEn[dram_range];
1477
1478 node_id = pvt->dram_DstNode[dram_range];
1479 intlv_sel = pvt->dram_IntlvSel[dram_range];
1480
1481 debugf1("(dram=%d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n",
1482 dram_range, dram_base, sys_addr, pvt->dram_limit[dram_range]);
1483
1484 /*
1485 * This assumes that one node's DHAR is the same as all the other
1486 * nodes' DHAR.
1487 */
1488 hole_off = (pvt->dhar & 0x0000FF80);
1489 hole_valid = (pvt->dhar & 0x1);
1490 dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16;
1491
1492 debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n",
1493 hole_off, hole_valid, intlv_sel);
1494
Borislav Petkove726f3c2010-12-06 16:20:25 +01001495 if (intlv_en &&
Doug Thompsonf71d0a02009-04-27 16:22:43 +02001496 (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1497 return -EINVAL;
1498
1499 dct_sel_base = dct_sel_baseaddr(pvt);
1500
1501 /*
1502 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1503 * select between DCT0 and DCT1.
1504 */
1505 if (dct_high_range_enabled(pvt) &&
1506 !dct_ganging_enabled(pvt) &&
1507 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1508 high_range = 1;
1509
1510 channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en);
1511
1512 chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base,
1513 dct_sel_base_off, hole_valid,
1514 hole_off, dram_base);
1515
1516 intlv_shift = f10_map_intlv_en_to_shift(intlv_en);
1517
1518 /* remove Node ID (in case of memory interleaving) */
1519 tmp = chan_addr & 0xFC0;
1520
1521 chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp;
1522
1523 /* remove channel interleave and hash */
1524 if (dct_interleave_enabled(pvt) &&
1525 !dct_high_range_enabled(pvt) &&
1526 !dct_ganging_enabled(pvt)) {
1527 if (dct_sel_interleave_addr(pvt) != 1)
1528 chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL;
1529 else {
1530 tmp = chan_addr & 0xFC0;
1531 chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1)
1532 | tmp;
1533 }
1534 }
1535
1536 debugf1(" (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n",
1537 chan_addr, (u32)(chan_addr >> 8));
1538
1539 cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel);
1540
1541 if (cs_found >= 0) {
1542 *nid = node_id;
1543 *chan_sel = channel;
1544 }
1545 return cs_found;
1546}
1547
1548static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
1549 int *node, int *chan_sel)
1550{
1551 int dram_range, cs_found = -EINVAL;
1552 u64 dram_base, dram_limit;
1553
1554 for (dram_range = 0; dram_range < DRAM_REG_COUNT; dram_range++) {
1555
1556 if (!pvt->dram_rw_en[dram_range])
1557 continue;
1558
1559 dram_base = pvt->dram_base[dram_range];
1560 dram_limit = pvt->dram_limit[dram_range];
1561
1562 if ((dram_base <= sys_addr) && (sys_addr <= dram_limit)) {
1563
1564 cs_found = f10_match_to_this_node(pvt, dram_range,
1565 sys_addr, node,
1566 chan_sel);
1567 if (cs_found >= 0)
1568 break;
1569 }
1570 }
1571 return cs_found;
1572}
1573
1574/*
Borislav Petkovbdc30a02009-11-13 15:10:43 +01001575 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
1576 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
Doug Thompsonf71d0a02009-04-27 16:22:43 +02001577 *
Borislav Petkovbdc30a02009-11-13 15:10:43 +01001578 * The @sys_addr is usually an error address received from the hardware
1579 * (MCX_ADDR).
Doug Thompsonf71d0a02009-04-27 16:22:43 +02001580 */
1581static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
Borislav Petkovad6a32e2010-03-09 12:46:00 +01001582 struct err_regs *err_info,
Doug Thompsonf71d0a02009-04-27 16:22:43 +02001583 u64 sys_addr)
1584{
1585 struct amd64_pvt *pvt = mci->pvt_info;
1586 u32 page, offset;
Doug Thompsonf71d0a02009-04-27 16:22:43 +02001587 int nid, csrow, chan = 0;
Borislav Petkovad6a32e2010-03-09 12:46:00 +01001588 u16 syndrome;
Doug Thompsonf71d0a02009-04-27 16:22:43 +02001589
1590 csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
1591
Borislav Petkovbdc30a02009-11-13 15:10:43 +01001592 if (csrow < 0) {
Doug Thompsonf71d0a02009-04-27 16:22:43 +02001593 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
Borislav Petkovbdc30a02009-11-13 15:10:43 +01001594 return;
Doug Thompsonf71d0a02009-04-27 16:22:43 +02001595 }
Borislav Petkovbdc30a02009-11-13 15:10:43 +01001596
1597 error_address_to_page_and_offset(sys_addr, &page, &offset);
1598
Borislav Petkovad6a32e2010-03-09 12:46:00 +01001599 syndrome = extract_syndrome(err_info);
Borislav Petkovbdc30a02009-11-13 15:10:43 +01001600
1601 /*
1602 * We need the syndromes for channel detection only when we're
1603 * ganged. Otherwise @chan should already contain the channel at
1604 * this point.
1605 */
Borislav Petkov962b70a2010-08-03 16:51:28 +02001606 if (dct_ganging_enabled(pvt) && (pvt->nbcfg & K8_NBCFG_CHIPKILL))
Borislav Petkovbdc30a02009-11-13 15:10:43 +01001607 chan = get_channel_from_ecc_syndrome(mci, syndrome);
1608
1609 if (chan >= 0)
1610 edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan,
1611 EDAC_MOD_STR);
1612 else
1613 /*
1614 * Channel unknown, report all channels on this CSROW as failed.
1615 */
1616 for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++)
1617 edac_mc_handle_ce(mci, page, offset, syndrome,
1618 csrow, chan, EDAC_MOD_STR);
Doug Thompsonf71d0a02009-04-27 16:22:43 +02001619}
1620
1621/*
Borislav Petkov8566c4d2009-10-16 13:48:28 +02001622 * debug routine to display the memory sizes of all logical DIMMs and its
Doug Thompsonf71d0a02009-04-27 16:22:43 +02001623 * CSROWs as well
1624 */
Borislav Petkov8566c4d2009-10-16 13:48:28 +02001625static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
Doug Thompsonf71d0a02009-04-27 16:22:43 +02001626{
Borislav Petkov603adaf2009-12-21 14:52:53 +01001627 int dimm, size0, size1, factor = 0;
Doug Thompsonf71d0a02009-04-27 16:22:43 +02001628 u32 dbam;
1629 u32 *dcsb;
1630
Borislav Petkov8566c4d2009-10-16 13:48:28 +02001631 if (boot_cpu_data.x86 == 0xf) {
Borislav Petkov603adaf2009-12-21 14:52:53 +01001632 if (pvt->dclr0 & F10_WIDTH_128)
1633 factor = 1;
1634
Borislav Petkov8566c4d2009-10-16 13:48:28 +02001635 /* K8 families < revF not supported yet */
Borislav Petkov1433eb92009-10-21 13:44:36 +02001636 if (pvt->ext_model < K8_REV_F)
Borislav Petkov8566c4d2009-10-16 13:48:28 +02001637 return;
1638 else
1639 WARN_ON(ctrl != 0);
1640 }
1641
1642 debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
1643 ctrl, ctrl ? pvt->dbam1 : pvt->dbam0);
Doug Thompsonf71d0a02009-04-27 16:22:43 +02001644
1645 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
1646 dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0;
1647
Borislav Petkov8566c4d2009-10-16 13:48:28 +02001648 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1649
Doug Thompsonf71d0a02009-04-27 16:22:43 +02001650 /* Dump memory sizes for DIMM and its CSROWs */
1651 for (dimm = 0; dimm < 4; dimm++) {
1652
1653 size0 = 0;
1654 if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE)
Borislav Petkov1433eb92009-10-21 13:44:36 +02001655 size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
Doug Thompsonf71d0a02009-04-27 16:22:43 +02001656
1657 size1 = 0;
1658 if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE)
Borislav Petkov1433eb92009-10-21 13:44:36 +02001659 size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
Doug Thompsonf71d0a02009-04-27 16:22:43 +02001660
Borislav Petkov24f9a7f2010-10-07 18:29:15 +02001661 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1662 dimm * 2, size0 << factor,
1663 dimm * 2 + 1, size1 << factor);
Doug Thompsonf71d0a02009-04-27 16:22:43 +02001664 }
1665}
1666
Doug Thompson4d376072009-04-27 16:25:05 +02001667static struct amd64_family_type amd64_family_types[] = {
1668 [K8_CPUS] = {
Borislav Petkov0092b202010-10-01 19:20:05 +02001669 .ctl_name = "K8",
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02001670 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1671 .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
Doug Thompson4d376072009-04-27 16:25:05 +02001672 .ops = {
Borislav Petkov1433eb92009-10-21 13:44:36 +02001673 .early_channel_count = k8_early_channel_count,
1674 .get_error_address = k8_get_error_address,
1675 .read_dram_base_limit = k8_read_dram_base_limit,
1676 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1677 .dbam_to_cs = k8_dbam_to_chip_select,
Doug Thompson4d376072009-04-27 16:25:05 +02001678 }
1679 },
1680 [F10_CPUS] = {
Borislav Petkov0092b202010-10-01 19:20:05 +02001681 .ctl_name = "F10h",
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02001682 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1683 .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
Doug Thompson4d376072009-04-27 16:25:05 +02001684 .ops = {
Borislav Petkov1433eb92009-10-21 13:44:36 +02001685 .early_channel_count = f10_early_channel_count,
1686 .get_error_address = f10_get_error_address,
1687 .read_dram_base_limit = f10_read_dram_base_limit,
1688 .read_dram_ctl_register = f10_read_dram_ctl_register,
1689 .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
1690 .dbam_to_cs = f10_dbam_to_chip_select,
Doug Thompson4d376072009-04-27 16:25:05 +02001691 }
1692 },
Doug Thompson4d376072009-04-27 16:25:05 +02001693};
1694
1695static struct pci_dev *pci_get_related_function(unsigned int vendor,
1696 unsigned int device,
1697 struct pci_dev *related)
1698{
1699 struct pci_dev *dev = NULL;
1700
1701 dev = pci_get_device(vendor, device, dev);
1702 while (dev) {
1703 if ((dev->bus->number == related->bus->number) &&
1704 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1705 break;
1706 dev = pci_get_device(vendor, device, dev);
1707 }
1708
1709 return dev;
1710}
1711
Doug Thompsonb1289d62009-04-27 16:37:05 +02001712/*
Borislav Petkovbfc04ae2009-11-12 19:05:07 +01001713 * These are tables of eigenvectors (one per line) which can be used for the
1714 * construction of the syndrome tables. The modified syndrome search algorithm
1715 * uses those to find the symbol in error and thus the DIMM.
Doug Thompsonb1289d62009-04-27 16:37:05 +02001716 *
Borislav Petkovbfc04ae2009-11-12 19:05:07 +01001717 * Algorithm courtesy of Ross LaFetra from AMD.
Doug Thompsonb1289d62009-04-27 16:37:05 +02001718 */
Borislav Petkovbfc04ae2009-11-12 19:05:07 +01001719static u16 x4_vectors[] = {
1720 0x2f57, 0x1afe, 0x66cc, 0xdd88,
1721 0x11eb, 0x3396, 0x7f4c, 0xeac8,
1722 0x0001, 0x0002, 0x0004, 0x0008,
1723 0x1013, 0x3032, 0x4044, 0x8088,
1724 0x106b, 0x30d6, 0x70fc, 0xe0a8,
1725 0x4857, 0xc4fe, 0x13cc, 0x3288,
1726 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
1727 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
1728 0x15c1, 0x2a42, 0x89ac, 0x4758,
1729 0x2b03, 0x1602, 0x4f0c, 0xca08,
1730 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
1731 0x8ba7, 0x465e, 0x244c, 0x1cc8,
1732 0x2b87, 0x164e, 0x642c, 0xdc18,
1733 0x40b9, 0x80de, 0x1094, 0x20e8,
1734 0x27db, 0x1eb6, 0x9dac, 0x7b58,
1735 0x11c1, 0x2242, 0x84ac, 0x4c58,
1736 0x1be5, 0x2d7a, 0x5e34, 0xa718,
1737 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
1738 0x4c97, 0xc87e, 0x11fc, 0x33a8,
1739 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
1740 0x16b3, 0x3d62, 0x4f34, 0x8518,
1741 0x1e2f, 0x391a, 0x5cac, 0xf858,
1742 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
1743 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
1744 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
1745 0x4397, 0xc27e, 0x17fc, 0x3ea8,
1746 0x1617, 0x3d3e, 0x6464, 0xb8b8,
1747 0x23ff, 0x12aa, 0xab6c, 0x56d8,
1748 0x2dfb, 0x1ba6, 0x913c, 0x7328,
1749 0x185d, 0x2ca6, 0x7914, 0x9e28,
1750 0x171b, 0x3e36, 0x7d7c, 0xebe8,
1751 0x4199, 0x82ee, 0x19f4, 0x2e58,
1752 0x4807, 0xc40e, 0x130c, 0x3208,
1753 0x1905, 0x2e0a, 0x5804, 0xac08,
1754 0x213f, 0x132a, 0xadfc, 0x5ba8,
1755 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
Doug Thompsonb1289d62009-04-27 16:37:05 +02001756};
1757
Borislav Petkovbfc04ae2009-11-12 19:05:07 +01001758static u16 x8_vectors[] = {
1759 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
1760 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
1761 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
1762 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
1763 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
1764 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
1765 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
1766 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
1767 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
1768 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
1769 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
1770 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
1771 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
1772 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
1773 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
1774 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
1775 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
1776 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
1777 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
1778};
1779
1780static int decode_syndrome(u16 syndrome, u16 *vectors, int num_vecs,
Borislav Petkovad6a32e2010-03-09 12:46:00 +01001781 int v_dim)
Doug Thompsonb1289d62009-04-27 16:37:05 +02001782{
Borislav Petkovbfc04ae2009-11-12 19:05:07 +01001783 unsigned int i, err_sym;
Doug Thompsonb1289d62009-04-27 16:37:05 +02001784
Borislav Petkovbfc04ae2009-11-12 19:05:07 +01001785 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
1786 u16 s = syndrome;
1787 int v_idx = err_sym * v_dim;
1788 int v_end = (err_sym + 1) * v_dim;
Doug Thompsonb1289d62009-04-27 16:37:05 +02001789
Borislav Petkovbfc04ae2009-11-12 19:05:07 +01001790 /* walk over all 16 bits of the syndrome */
1791 for (i = 1; i < (1U << 16); i <<= 1) {
1792
1793 /* if bit is set in that eigenvector... */
1794 if (v_idx < v_end && vectors[v_idx] & i) {
1795 u16 ev_comp = vectors[v_idx++];
1796
1797 /* ... and bit set in the modified syndrome, */
1798 if (s & i) {
1799 /* remove it. */
1800 s ^= ev_comp;
1801
1802 if (!s)
1803 return err_sym;
1804 }
1805
1806 } else if (s & i)
1807 /* can't get to zero, move to next symbol */
1808 break;
1809 }
Doug Thompsonb1289d62009-04-27 16:37:05 +02001810 }
1811
1812 debugf0("syndrome(%x) not found\n", syndrome);
1813 return -1;
1814}
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001815
Borislav Petkovbfc04ae2009-11-12 19:05:07 +01001816static int map_err_sym_to_channel(int err_sym, int sym_size)
1817{
1818 if (sym_size == 4)
1819 switch (err_sym) {
1820 case 0x20:
1821 case 0x21:
1822 return 0;
1823 break;
1824 case 0x22:
1825 case 0x23:
1826 return 1;
1827 break;
1828 default:
1829 return err_sym >> 4;
1830 break;
1831 }
1832 /* x8 symbols */
1833 else
1834 switch (err_sym) {
1835 /* imaginary bits not in a DIMM */
1836 case 0x10:
1837 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
1838 err_sym);
1839 return -1;
1840 break;
1841
1842 case 0x11:
1843 return 0;
1844 break;
1845 case 0x12:
1846 return 1;
1847 break;
1848 default:
1849 return err_sym >> 3;
1850 break;
1851 }
1852 return -1;
1853}
1854
1855static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
1856{
1857 struct amd64_pvt *pvt = mci->pvt_info;
Borislav Petkovad6a32e2010-03-09 12:46:00 +01001858 int err_sym = -1;
Borislav Petkovbfc04ae2009-11-12 19:05:07 +01001859
Borislav Petkovad6a32e2010-03-09 12:46:00 +01001860 if (pvt->syn_type == 8)
1861 err_sym = decode_syndrome(syndrome, x8_vectors,
1862 ARRAY_SIZE(x8_vectors),
1863 pvt->syn_type);
1864 else if (pvt->syn_type == 4)
1865 err_sym = decode_syndrome(syndrome, x4_vectors,
1866 ARRAY_SIZE(x4_vectors),
1867 pvt->syn_type);
1868 else {
Borislav Petkov24f9a7f2010-10-07 18:29:15 +02001869 amd64_warn("Illegal syndrome type: %u\n", pvt->syn_type);
Borislav Petkovad6a32e2010-03-09 12:46:00 +01001870 return err_sym;
Borislav Petkovbfc04ae2009-11-12 19:05:07 +01001871 }
Borislav Petkovad6a32e2010-03-09 12:46:00 +01001872
1873 return map_err_sym_to_channel(err_sym, pvt->syn_type);
Borislav Petkovbfc04ae2009-11-12 19:05:07 +01001874}
1875
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001876/*
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001877 * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
1878 * ADDRESS and process.
1879 */
1880static void amd64_handle_ce(struct mem_ctl_info *mci,
Borislav Petkovef44cc42009-07-23 14:45:48 +02001881 struct err_regs *info)
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001882{
1883 struct amd64_pvt *pvt = mci->pvt_info;
Borislav Petkov44e9e2e2009-10-26 15:00:19 +01001884 u64 sys_addr;
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001885
1886 /* Ensure that the Error Address is VALID */
Borislav Petkov24f9a7f2010-10-07 18:29:15 +02001887 if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) {
1888 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001889 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1890 return;
1891 }
1892
Borislav Petkov1f6bcee2009-11-13 14:02:57 +01001893 sys_addr = pvt->ops->get_error_address(mci, info);
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001894
Borislav Petkov24f9a7f2010-10-07 18:29:15 +02001895 amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr);
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001896
Borislav Petkov44e9e2e2009-10-26 15:00:19 +01001897 pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr);
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001898}
1899
1900/* Handle any Un-correctable Errors (UEs) */
1901static void amd64_handle_ue(struct mem_ctl_info *mci,
Borislav Petkovef44cc42009-07-23 14:45:48 +02001902 struct err_regs *info)
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001903{
Borislav Petkov1f6bcee2009-11-13 14:02:57 +01001904 struct amd64_pvt *pvt = mci->pvt_info;
1905 struct mem_ctl_info *log_mci, *src_mci = NULL;
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001906 int csrow;
Borislav Petkov44e9e2e2009-10-26 15:00:19 +01001907 u64 sys_addr;
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001908 u32 page, offset;
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001909
1910 log_mci = mci;
1911
Borislav Petkov24f9a7f2010-10-07 18:29:15 +02001912 if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) {
1913 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001914 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
1915 return;
1916 }
1917
Borislav Petkov1f6bcee2009-11-13 14:02:57 +01001918 sys_addr = pvt->ops->get_error_address(mci, info);
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001919
1920 /*
1921 * Find out which node the error address belongs to. This may be
1922 * different from the node that detected the error.
1923 */
Borislav Petkov44e9e2e2009-10-26 15:00:19 +01001924 src_mci = find_mc_by_sys_addr(mci, sys_addr);
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001925 if (!src_mci) {
Borislav Petkov24f9a7f2010-10-07 18:29:15 +02001926 amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n",
1927 (unsigned long)sys_addr);
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001928 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
1929 return;
1930 }
1931
1932 log_mci = src_mci;
1933
Borislav Petkov44e9e2e2009-10-26 15:00:19 +01001934 csrow = sys_addr_to_csrow(log_mci, sys_addr);
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001935 if (csrow < 0) {
Borislav Petkov24f9a7f2010-10-07 18:29:15 +02001936 amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n",
1937 (unsigned long)sys_addr);
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001938 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
1939 } else {
Borislav Petkov44e9e2e2009-10-26 15:00:19 +01001940 error_address_to_page_and_offset(sys_addr, &page, &offset);
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001941 edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR);
1942 }
1943}
1944
Borislav Petkov549d0422009-07-24 13:51:42 +02001945static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
Borislav Petkovb69b29d2009-07-27 16:21:14 +02001946 struct err_regs *info)
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001947{
Borislav Petkovb70ef012009-06-25 19:32:38 +02001948 u32 ec = ERROR_CODE(info->nbsl);
1949 u32 xec = EXT_ERROR_CODE(info->nbsl);
Borislav Petkov17adea02009-11-04 14:04:06 +01001950 int ecc_type = (info->nbsh >> 13) & 0x3;
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001951
Borislav Petkovb70ef012009-06-25 19:32:38 +02001952 /* Bail early out if this was an 'observed' error */
1953 if (PP(ec) == K8_NBSL_PP_OBS)
1954 return;
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001955
Borislav Petkovecaf5602009-07-23 16:32:01 +02001956 /* Do only ECC errors */
1957 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001958 return;
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001959
Borislav Petkovecaf5602009-07-23 16:32:01 +02001960 if (ecc_type == 2)
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001961 amd64_handle_ce(mci, info);
Borislav Petkovecaf5602009-07-23 16:32:01 +02001962 else if (ecc_type == 1)
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001963 amd64_handle_ue(mci, info);
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001964}
1965
Borislav Petkov7cfd4a82010-09-01 14:45:20 +02001966void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg)
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001967{
Borislav Petkovcc4d8862010-10-13 16:11:59 +02001968 struct mem_ctl_info *mci = mcis[node_id];
Borislav Petkov7cfd4a82010-09-01 14:45:20 +02001969 struct err_regs regs;
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001970
Borislav Petkov7cfd4a82010-09-01 14:45:20 +02001971 regs.nbsl = (u32) m->status;
1972 regs.nbsh = (u32)(m->status >> 32);
1973 regs.nbeal = (u32) m->addr;
1974 regs.nbeah = (u32)(m->addr >> 32);
1975 regs.nbcfg = nbcfg;
1976
1977 __amd64_decode_bus_error(mci, &regs);
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001978
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001979 /*
1980 * Check the UE bit of the NB status high register, if set generate some
1981 * logs. If NOT a GART error, then process the event as a NO-INFO event.
1982 * If it was a GART error, skip that process.
Borislav Petkov549d0422009-07-24 13:51:42 +02001983 *
1984 * FIXME: this should go somewhere else, if at all.
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001985 */
Borislav Petkov7cfd4a82010-09-01 14:45:20 +02001986 if (regs.nbsh & K8_NBSH_UC_ERR && !report_gart_errors)
Borislav Petkov5110dbd2009-06-25 19:51:04 +02001987 edac_mc_handle_ue_no_info(mci, "UE bit is set");
Borislav Petkov549d0422009-07-24 13:51:42 +02001988
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001989}
Doug Thompsond27bf6f2009-05-06 17:55:27 +02001990
Doug Thompson0ec449e2009-04-27 19:41:25 +02001991/*
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02001992 * Use pvt->F2 which contains the F2 CPU PCI device to get the related
Borislav Petkovbbd0c1f2010-10-01 19:27:58 +02001993 * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
Doug Thompson0ec449e2009-04-27 19:41:25 +02001994 */
Borislav Petkovbbd0c1f2010-10-01 19:27:58 +02001995static int amd64_reserve_mc_sibling_devices(struct amd64_pvt *pvt, u16 f1_id,
1996 u16 f3_id)
Doug Thompson0ec449e2009-04-27 19:41:25 +02001997{
Doug Thompson0ec449e2009-04-27 19:41:25 +02001998 /* Reserve the ADDRESS MAP Device */
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02001999 pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2);
2000 if (!pvt->F1) {
Borislav Petkov24f9a7f2010-10-07 18:29:15 +02002001 amd64_err("error address map device not found: "
2002 "vendor %x device 0x%x (broken BIOS?)\n",
2003 PCI_VENDOR_ID_AMD, f1_id);
Borislav Petkovbbd0c1f2010-10-01 19:27:58 +02002004 return -ENODEV;
Doug Thompson0ec449e2009-04-27 19:41:25 +02002005 }
2006
2007 /* Reserve the MISC Device */
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02002008 pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2);
2009 if (!pvt->F3) {
2010 pci_dev_put(pvt->F1);
2011 pvt->F1 = NULL;
Doug Thompson0ec449e2009-04-27 19:41:25 +02002012
Borislav Petkov24f9a7f2010-10-07 18:29:15 +02002013 amd64_err("error F3 device not found: "
2014 "vendor %x device 0x%x (broken BIOS?)\n",
2015 PCI_VENDOR_ID_AMD, f3_id);
Doug Thompson0ec449e2009-04-27 19:41:25 +02002016
Borislav Petkovbbd0c1f2010-10-01 19:27:58 +02002017 return -ENODEV;
Doug Thompson0ec449e2009-04-27 19:41:25 +02002018 }
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02002019 debugf1("F1: %s\n", pci_name(pvt->F1));
2020 debugf1("F2: %s\n", pci_name(pvt->F2));
2021 debugf1("F3: %s\n", pci_name(pvt->F3));
Doug Thompson0ec449e2009-04-27 19:41:25 +02002022
2023 return 0;
2024}
2025
2026static void amd64_free_mc_sibling_devices(struct amd64_pvt *pvt)
2027{
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02002028 pci_dev_put(pvt->F1);
2029 pci_dev_put(pvt->F3);
Doug Thompson0ec449e2009-04-27 19:41:25 +02002030}
2031
2032/*
2033 * Retrieve the hardware registers of the memory controller (this includes the
2034 * 'Address Map' and 'Misc' device regs)
2035 */
2036static void amd64_read_mc_registers(struct amd64_pvt *pvt)
2037{
2038 u64 msr_val;
Borislav Petkovad6a32e2010-03-09 12:46:00 +01002039 u32 tmp;
Borislav Petkov6ba5dcd2009-10-13 19:26:55 +02002040 int dram;
Doug Thompson0ec449e2009-04-27 19:41:25 +02002041
2042 /*
2043 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2044 * those are Read-As-Zero
2045 */
Borislav Petkove97f8bb2009-10-12 15:27:45 +02002046 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2047 debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem);
Doug Thompson0ec449e2009-04-27 19:41:25 +02002048
2049 /* check first whether TOP_MEM2 is enabled */
2050 rdmsrl(MSR_K8_SYSCFG, msr_val);
2051 if (msr_val & (1U << 21)) {
Borislav Petkove97f8bb2009-10-12 15:27:45 +02002052 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2053 debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
Doug Thompson0ec449e2009-04-27 19:41:25 +02002054 } else
2055 debugf0(" TOP_MEM2 disabled.\n");
2056
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02002057 amd64_read_pci_cfg(pvt->F3, K8_NBCAP, &pvt->nbcap);
Doug Thompson0ec449e2009-04-27 19:41:25 +02002058
2059 if (pvt->ops->read_dram_ctl_register)
2060 pvt->ops->read_dram_ctl_register(pvt);
2061
2062 for (dram = 0; dram < DRAM_REG_COUNT; dram++) {
2063 /*
2064 * Call CPU specific READ function to get the DRAM Base and
2065 * Limit values from the DCT.
2066 */
2067 pvt->ops->read_dram_base_limit(pvt, dram);
2068
2069 /*
2070 * Only print out debug info on rows with both R and W Enabled.
2071 * Normal processing, compiler should optimize this whole 'if'
2072 * debug output block away.
2073 */
2074 if (pvt->dram_rw_en[dram] != 0) {
Borislav Petkove97f8bb2009-10-12 15:27:45 +02002075 debugf1(" DRAM-BASE[%d]: 0x%016llx "
2076 "DRAM-LIMIT: 0x%016llx\n",
Doug Thompson0ec449e2009-04-27 19:41:25 +02002077 dram,
Borislav Petkove97f8bb2009-10-12 15:27:45 +02002078 pvt->dram_base[dram],
2079 pvt->dram_limit[dram]);
2080
Doug Thompson0ec449e2009-04-27 19:41:25 +02002081 debugf1(" IntlvEn=%s %s %s "
2082 "IntlvSel=%d DstNode=%d\n",
2083 pvt->dram_IntlvEn[dram] ?
2084 "Enabled" : "Disabled",
2085 (pvt->dram_rw_en[dram] & 0x2) ? "W" : "!W",
2086 (pvt->dram_rw_en[dram] & 0x1) ? "R" : "!R",
2087 pvt->dram_IntlvSel[dram],
2088 pvt->dram_DstNode[dram]);
2089 }
2090 }
2091
2092 amd64_read_dct_base_mask(pvt);
2093
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02002094 amd64_read_pci_cfg(pvt->F1, K8_DHAR, &pvt->dhar);
Doug Thompson0ec449e2009-04-27 19:41:25 +02002095 amd64_read_dbam_reg(pvt);
2096
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02002097 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
Doug Thompson0ec449e2009-04-27 19:41:25 +02002098
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02002099 amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0);
2100 amd64_read_pci_cfg(pvt->F2, F10_DCHR_0, &pvt->dchr0);
Doug Thompson0ec449e2009-04-27 19:41:25 +02002101
Borislav Petkovad6a32e2010-03-09 12:46:00 +01002102 if (boot_cpu_data.x86 >= 0x10) {
2103 if (!dct_ganging_enabled(pvt)) {
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02002104 amd64_read_pci_cfg(pvt->F2, F10_DCLR_1, &pvt->dclr1);
2105 amd64_read_pci_cfg(pvt->F2, F10_DCHR_1, &pvt->dchr1);
Borislav Petkovad6a32e2010-03-09 12:46:00 +01002106 }
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02002107 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
Doug Thompson0ec449e2009-04-27 19:41:25 +02002108 }
Borislav Petkovad6a32e2010-03-09 12:46:00 +01002109
2110 if (boot_cpu_data.x86 == 0x10 &&
2111 boot_cpu_data.x86_model > 7 &&
2112 /* F3x180[EccSymbolSize]=1 => x8 symbols */
2113 tmp & BIT(25))
2114 pvt->syn_type = 8;
2115 else
2116 pvt->syn_type = 4;
2117
Doug Thompson0ec449e2009-04-27 19:41:25 +02002118 amd64_dump_misc_regs(pvt);
Doug Thompson0ec449e2009-04-27 19:41:25 +02002119}
2120
2121/*
2122 * NOTE: CPU Revision Dependent code
2123 *
2124 * Input:
Borislav Petkov9d858bb2009-09-21 14:35:51 +02002125 * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1)
Doug Thompson0ec449e2009-04-27 19:41:25 +02002126 * k8 private pointer to -->
2127 * DRAM Bank Address mapping register
2128 * node_id
2129 * DCL register where dual_channel_active is
2130 *
2131 * The DBAM register consists of 4 sets of 4 bits each definitions:
2132 *
2133 * Bits: CSROWs
2134 * 0-3 CSROWs 0 and 1
2135 * 4-7 CSROWs 2 and 3
2136 * 8-11 CSROWs 4 and 5
2137 * 12-15 CSROWs 6 and 7
2138 *
2139 * Values range from: 0 to 15
2140 * The meaning of the values depends on CPU revision and dual-channel state,
2141 * see relevant BKDG more info.
2142 *
2143 * The memory controller provides for total of only 8 CSROWs in its current
2144 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2145 * single channel or two (2) DIMMs in dual channel mode.
2146 *
2147 * The following code logic collapses the various tables for CSROW based on CPU
2148 * revision.
2149 *
2150 * Returns:
2151 * The number of PAGE_SIZE pages on the specified CSROW number it
2152 * encompasses
2153 *
2154 */
2155static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
2156{
Borislav Petkov1433eb92009-10-21 13:44:36 +02002157 u32 cs_mode, nr_pages;
Doug Thompson0ec449e2009-04-27 19:41:25 +02002158
2159 /*
2160 * The math on this doesn't look right on the surface because x/2*4 can
2161 * be simplified to x*2 but this expression makes use of the fact that
2162 * it is integral math where 1/2=0. This intermediate value becomes the
2163 * number of bits to shift the DBAM register to extract the proper CSROW
2164 * field.
2165 */
Borislav Petkov1433eb92009-10-21 13:44:36 +02002166 cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
Doug Thompson0ec449e2009-04-27 19:41:25 +02002167
Borislav Petkov1433eb92009-10-21 13:44:36 +02002168 nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT);
Doug Thompson0ec449e2009-04-27 19:41:25 +02002169
2170 /*
2171 * If dual channel then double the memory size of single channel.
2172 * Channel count is 1 or 2
2173 */
2174 nr_pages <<= (pvt->channel_count - 1);
2175
Borislav Petkov1433eb92009-10-21 13:44:36 +02002176 debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
Doug Thompson0ec449e2009-04-27 19:41:25 +02002177 debugf0(" nr_pages= %u channel-count = %d\n",
2178 nr_pages, pvt->channel_count);
2179
2180 return nr_pages;
2181}
2182
2183/*
2184 * Initialize the array of csrow attribute instances, based on the values
2185 * from pci config hardware registers.
2186 */
2187static int amd64_init_csrows(struct mem_ctl_info *mci)
2188{
2189 struct csrow_info *csrow;
2190 struct amd64_pvt *pvt;
2191 u64 input_addr_min, input_addr_max, sys_addr;
Borislav Petkov6ba5dcd2009-10-13 19:26:55 +02002192 int i, empty = 1;
Doug Thompson0ec449e2009-04-27 19:41:25 +02002193
2194 pvt = mci->pvt_info;
2195
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02002196 amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &pvt->nbcfg);
Doug Thompson0ec449e2009-04-27 19:41:25 +02002197
2198 debugf0("NBCFG= 0x%x CHIPKILL= %s DRAM ECC= %s\n", pvt->nbcfg,
2199 (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
2200 (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"
2201 );
2202
Borislav Petkov9d858bb2009-09-21 14:35:51 +02002203 for (i = 0; i < pvt->cs_count; i++) {
Doug Thompson0ec449e2009-04-27 19:41:25 +02002204 csrow = &mci->csrows[i];
2205
2206 if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) {
2207 debugf1("----CSROW %d EMPTY for node %d\n", i,
2208 pvt->mc_node_id);
2209 continue;
2210 }
2211
2212 debugf1("----CSROW %d VALID for MC node %d\n",
2213 i, pvt->mc_node_id);
2214
2215 empty = 0;
2216 csrow->nr_pages = amd64_csrow_nr_pages(i, pvt);
2217 find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
2218 sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
2219 csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
2220 sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
2221 csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
2222 csrow->page_mask = ~mask_from_dct_mask(pvt, i);
2223 /* 8 bytes of resolution */
2224
Borislav Petkov24f9a7f2010-10-07 18:29:15 +02002225 csrow->mtype = amd64_determine_memory_type(pvt, i);
Doug Thompson0ec449e2009-04-27 19:41:25 +02002226
2227 debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i);
2228 debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n",
2229 (unsigned long)input_addr_min,
2230 (unsigned long)input_addr_max);
2231 debugf1(" sys_addr: 0x%lx page_mask: 0x%lx\n",
2232 (unsigned long)sys_addr, csrow->page_mask);
2233 debugf1(" nr_pages: %u first_page: 0x%lx "
2234 "last_page: 0x%lx\n",
2235 (unsigned)csrow->nr_pages,
2236 csrow->first_page, csrow->last_page);
2237
2238 /*
2239 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2240 */
2241 if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE)
2242 csrow->edac_mode =
2243 (pvt->nbcfg & K8_NBCFG_CHIPKILL) ?
2244 EDAC_S4ECD4ED : EDAC_SECDED;
2245 else
2246 csrow->edac_mode = EDAC_NONE;
2247 }
2248
2249 return empty;
2250}
Doug Thompsond27bf6f2009-05-06 17:55:27 +02002251
Borislav Petkov06724532009-09-16 13:05:46 +02002252/* get all cores on this DCT */
Rusty Russellba578cb2009-11-03 14:56:35 +10302253static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
Doug Thompsonf9431992009-04-27 19:46:08 +02002254{
Borislav Petkov06724532009-09-16 13:05:46 +02002255 int cpu;
Doug Thompsonf9431992009-04-27 19:46:08 +02002256
Borislav Petkov06724532009-09-16 13:05:46 +02002257 for_each_online_cpu(cpu)
2258 if (amd_get_nb_id(cpu) == nid)
2259 cpumask_set_cpu(cpu, mask);
Doug Thompsonf9431992009-04-27 19:46:08 +02002260}
2261
2262/* check MCG_CTL on all the cpus on this node */
Borislav Petkov06724532009-09-16 13:05:46 +02002263static bool amd64_nb_mce_bank_enabled_on_node(int nid)
Doug Thompsonf9431992009-04-27 19:46:08 +02002264{
Rusty Russellba578cb2009-11-03 14:56:35 +10302265 cpumask_var_t mask;
Borislav Petkov50542252009-12-11 18:14:40 +01002266 int cpu, nbe;
Borislav Petkov06724532009-09-16 13:05:46 +02002267 bool ret = false;
Doug Thompsonf9431992009-04-27 19:46:08 +02002268
Rusty Russellba578cb2009-11-03 14:56:35 +10302269 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
Borislav Petkov24f9a7f2010-10-07 18:29:15 +02002270 amd64_warn("%s: Error allocating mask\n", __func__);
Rusty Russellba578cb2009-11-03 14:56:35 +10302271 return false;
2272 }
Borislav Petkov06724532009-09-16 13:05:46 +02002273
Rusty Russellba578cb2009-11-03 14:56:35 +10302274 get_cpus_on_this_dct_cpumask(mask, nid);
Borislav Petkov06724532009-09-16 13:05:46 +02002275
Rusty Russellba578cb2009-11-03 14:56:35 +10302276 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
Borislav Petkov06724532009-09-16 13:05:46 +02002277
Rusty Russellba578cb2009-11-03 14:56:35 +10302278 for_each_cpu(cpu, mask) {
Borislav Petkov50542252009-12-11 18:14:40 +01002279 struct msr *reg = per_cpu_ptr(msrs, cpu);
2280 nbe = reg->l & K8_MSR_MCGCTL_NBE;
Borislav Petkov06724532009-09-16 13:05:46 +02002281
2282 debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
Borislav Petkov50542252009-12-11 18:14:40 +01002283 cpu, reg->q,
Borislav Petkov06724532009-09-16 13:05:46 +02002284 (nbe ? "enabled" : "disabled"));
2285
2286 if (!nbe)
2287 goto out;
Borislav Petkov06724532009-09-16 13:05:46 +02002288 }
2289 ret = true;
2290
2291out:
Rusty Russellba578cb2009-11-03 14:56:35 +10302292 free_cpumask_var(mask);
Doug Thompsonf9431992009-04-27 19:46:08 +02002293 return ret;
2294}
2295
Borislav Petkovf6d6ae962009-11-03 15:29:26 +01002296static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
2297{
2298 cpumask_var_t cmask;
Borislav Petkov50542252009-12-11 18:14:40 +01002299 int cpu;
Borislav Petkovf6d6ae962009-11-03 15:29:26 +01002300
2301 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
Borislav Petkov24f9a7f2010-10-07 18:29:15 +02002302 amd64_warn("%s: error allocating mask\n", __func__);
Borislav Petkovf6d6ae962009-11-03 15:29:26 +01002303 return false;
2304 }
2305
2306 get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id);
2307
Borislav Petkovf6d6ae962009-11-03 15:29:26 +01002308 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2309
2310 for_each_cpu(cpu, cmask) {
2311
Borislav Petkov50542252009-12-11 18:14:40 +01002312 struct msr *reg = per_cpu_ptr(msrs, cpu);
2313
Borislav Petkovf6d6ae962009-11-03 15:29:26 +01002314 if (on) {
Borislav Petkov50542252009-12-11 18:14:40 +01002315 if (reg->l & K8_MSR_MCGCTL_NBE)
Borislav Petkovd95cf4d2010-02-24 14:49:47 +01002316 pvt->flags.nb_mce_enable = 1;
Borislav Petkovf6d6ae962009-11-03 15:29:26 +01002317
Borislav Petkov50542252009-12-11 18:14:40 +01002318 reg->l |= K8_MSR_MCGCTL_NBE;
Borislav Petkovf6d6ae962009-11-03 15:29:26 +01002319 } else {
2320 /*
Borislav Petkovd95cf4d2010-02-24 14:49:47 +01002321 * Turn off NB MCE reporting only when it was off before
Borislav Petkovf6d6ae962009-11-03 15:29:26 +01002322 */
Borislav Petkovd95cf4d2010-02-24 14:49:47 +01002323 if (!pvt->flags.nb_mce_enable)
Borislav Petkov50542252009-12-11 18:14:40 +01002324 reg->l &= ~K8_MSR_MCGCTL_NBE;
Borislav Petkovf6d6ae962009-11-03 15:29:26 +01002325 }
Borislav Petkovf6d6ae962009-11-03 15:29:26 +01002326 }
2327 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2328
Borislav Petkovf6d6ae962009-11-03 15:29:26 +01002329 free_cpumask_var(cmask);
2330
2331 return 0;
2332}
2333
Borislav Petkovf6d6ae962009-11-03 15:29:26 +01002334static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
2335{
2336 struct amd64_pvt *pvt = mci->pvt_info;
Borislav Petkovf6d6ae962009-11-03 15:29:26 +01002337 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2338
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02002339 amd64_read_pci_cfg(pvt->F3, K8_NBCTL, &value);
Borislav Petkovf6d6ae962009-11-03 15:29:26 +01002340
2341 /* turn on UECCn and CECCEn bits */
2342 pvt->old_nbctl = value & mask;
2343 pvt->nbctl_mcgctl_saved = 1;
2344
2345 value |= mask;
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02002346 pci_write_config_dword(pvt->F3, K8_NBCTL, value);
Borislav Petkovf6d6ae962009-11-03 15:29:26 +01002347
2348 if (amd64_toggle_ecc_err_reporting(pvt, ON))
Borislav Petkov24f9a7f2010-10-07 18:29:15 +02002349 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
Borislav Petkovf6d6ae962009-11-03 15:29:26 +01002350
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02002351 amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &value);
Borislav Petkovf6d6ae962009-11-03 15:29:26 +01002352
2353 debugf0("NBCFG(1)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value,
2354 (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
2355 (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
2356
2357 if (!(value & K8_NBCFG_ECC_ENABLE)) {
Borislav Petkov24f9a7f2010-10-07 18:29:15 +02002358 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
Borislav Petkovf6d6ae962009-11-03 15:29:26 +01002359
Borislav Petkovd95cf4d2010-02-24 14:49:47 +01002360 pvt->flags.nb_ecc_prev = 0;
2361
Borislav Petkovf6d6ae962009-11-03 15:29:26 +01002362 /* Attempt to turn on DRAM ECC Enable */
2363 value |= K8_NBCFG_ECC_ENABLE;
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02002364 pci_write_config_dword(pvt->F3, K8_NBCFG, value);
Borislav Petkovf6d6ae962009-11-03 15:29:26 +01002365
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02002366 amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &value);
Borislav Petkovf6d6ae962009-11-03 15:29:26 +01002367
2368 if (!(value & K8_NBCFG_ECC_ENABLE)) {
Borislav Petkov24f9a7f2010-10-07 18:29:15 +02002369 amd64_warn("Hardware rejected DRAM ECC enable,"
2370 "check memory DIMM configuration.\n");
Borislav Petkovf6d6ae962009-11-03 15:29:26 +01002371 } else {
Borislav Petkov24f9a7f2010-10-07 18:29:15 +02002372 amd64_info("Hardware accepted DRAM ECC Enable\n");
Borislav Petkovf6d6ae962009-11-03 15:29:26 +01002373 }
Borislav Petkovd95cf4d2010-02-24 14:49:47 +01002374 } else {
2375 pvt->flags.nb_ecc_prev = 1;
Borislav Petkovf6d6ae962009-11-03 15:29:26 +01002376 }
Borislav Petkovd95cf4d2010-02-24 14:49:47 +01002377
Borislav Petkovf6d6ae962009-11-03 15:29:26 +01002378 debugf0("NBCFG(2)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value,
2379 (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
2380 (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
2381
2382 pvt->ctl_error_info.nbcfg = value;
2383}
2384
2385static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
2386{
Borislav Petkovf6d6ae962009-11-03 15:29:26 +01002387 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2388
2389 if (!pvt->nbctl_mcgctl_saved)
2390 return;
2391
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02002392 amd64_read_pci_cfg(pvt->F3, K8_NBCTL, &value);
Borislav Petkovf6d6ae962009-11-03 15:29:26 +01002393 value &= ~mask;
2394 value |= pvt->old_nbctl;
2395
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02002396 pci_write_config_dword(pvt->F3, K8_NBCTL, value);
Borislav Petkovf6d6ae962009-11-03 15:29:26 +01002397
Borislav Petkovd95cf4d2010-02-24 14:49:47 +01002398 /* restore previous BIOS DRAM ECC "off" setting which we force-enabled */
2399 if (!pvt->flags.nb_ecc_prev) {
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02002400 amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &value);
Borislav Petkovd95cf4d2010-02-24 14:49:47 +01002401 value &= ~K8_NBCFG_ECC_ENABLE;
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02002402 pci_write_config_dword(pvt->F3, K8_NBCFG, value);
Borislav Petkovd95cf4d2010-02-24 14:49:47 +01002403 }
2404
2405 /* restore the NB Enable MCGCTL bit */
Borislav Petkovf6d6ae962009-11-03 15:29:26 +01002406 if (amd64_toggle_ecc_err_reporting(pvt, OFF))
Borislav Petkov24f9a7f2010-10-07 18:29:15 +02002407 amd64_warn("Error restoring NB MCGCTL settings!\n");
Borislav Petkovf6d6ae962009-11-03 15:29:26 +01002408}
2409
Doug Thompsonf9431992009-04-27 19:46:08 +02002410/*
2411 * EDAC requires that the BIOS have ECC enabled before taking over the
2412 * processing of ECC errors. This is because the BIOS can properly initialize
2413 * the memory system completely. A command line option allows to force-enable
2414 * hardware ECC later in amd64_enable_ecc_error_reporting().
2415 */
Borislav Petkovcab4d272010-02-11 17:15:57 +01002416static const char *ecc_msg =
2417 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2418 " Either enable ECC checking or force module loading by setting "
2419 "'ecc_enable_override'.\n"
2420 " (Note that use of the override may cause unknown side effects.)\n";
Borislav Petkovbe3468e2009-08-05 15:47:22 +02002421
Doug Thompsonf9431992009-04-27 19:46:08 +02002422static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
2423{
2424 u32 value;
Borislav Petkov06724532009-09-16 13:05:46 +02002425 u8 ecc_enabled = 0;
2426 bool nb_mce_en = false;
Doug Thompsonf9431992009-04-27 19:46:08 +02002427
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02002428 amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &value);
Doug Thompsonf9431992009-04-27 19:46:08 +02002429
2430 ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE);
Borislav Petkov24f9a7f2010-10-07 18:29:15 +02002431 amd64_info("DRAM ECC %s.\n", (ecc_enabled ? "enabled" : "disabled"));
Doug Thompsonf9431992009-04-27 19:46:08 +02002432
Borislav Petkov06724532009-09-16 13:05:46 +02002433 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id);
2434 if (!nb_mce_en)
Borislav Petkov24f9a7f2010-10-07 18:29:15 +02002435 amd64_notice("NB MCE bank disabled, "
2436 "set MSR 0x%08x[4] on node %d to enable.\n",
Borislav Petkovbe3468e2009-08-05 15:47:22 +02002437 MSR_IA32_MCG_CTL, pvt->mc_node_id);
Doug Thompsonf9431992009-04-27 19:46:08 +02002438
Borislav Petkov06724532009-09-16 13:05:46 +02002439 if (!ecc_enabled || !nb_mce_en) {
Doug Thompsonf9431992009-04-27 19:46:08 +02002440 if (!ecc_enable_override) {
Borislav Petkov24f9a7f2010-10-07 18:29:15 +02002441 amd64_notice("%s", ecc_msg);
Borislav Petkovbe3468e2009-08-05 15:47:22 +02002442 return -ENODEV;
Borislav Petkovd95cf4d2010-02-24 14:49:47 +01002443 } else {
Borislav Petkov24f9a7f2010-10-07 18:29:15 +02002444 amd64_warn("Forcing ECC on!\n");
Borislav Petkovbe3468e2009-08-05 15:47:22 +02002445 }
Borislav Petkov43f5e682009-12-21 18:55:18 +01002446 }
Doug Thompsonf9431992009-04-27 19:46:08 +02002447
Borislav Petkovbe3468e2009-08-05 15:47:22 +02002448 return 0;
Doug Thompsonf9431992009-04-27 19:46:08 +02002449}
2450
Doug Thompson7d6034d2009-04-27 20:01:01 +02002451struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) +
2452 ARRAY_SIZE(amd64_inj_attrs) +
2453 1];
2454
2455struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } };
2456
2457static void amd64_set_mc_sysfs_attributes(struct mem_ctl_info *mci)
2458{
2459 unsigned int i = 0, j = 0;
2460
2461 for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++)
2462 sysfs_attrs[i] = amd64_dbg_attrs[i];
2463
2464 for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++)
2465 sysfs_attrs[i] = amd64_inj_attrs[j];
2466
2467 sysfs_attrs[i] = terminator;
2468
2469 mci->mc_driver_sysfs_attributes = sysfs_attrs;
2470}
2471
2472static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci)
2473{
2474 struct amd64_pvt *pvt = mci->pvt_info;
2475
2476 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
2477 mci->edac_ctl_cap = EDAC_FLAG_NONE;
Doug Thompson7d6034d2009-04-27 20:01:01 +02002478
2479 if (pvt->nbcap & K8_NBCAP_SECDED)
2480 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
2481
2482 if (pvt->nbcap & K8_NBCAP_CHIPKILL)
2483 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2484
2485 mci->edac_cap = amd64_determine_edac_cap(pvt);
2486 mci->mod_name = EDAC_MOD_STR;
2487 mci->mod_ver = EDAC_AMD64_VERSION;
Borislav Petkov0092b202010-10-01 19:20:05 +02002488 mci->ctl_name = pvt->ctl_name;
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02002489 mci->dev_name = pci_name(pvt->F2);
Doug Thompson7d6034d2009-04-27 20:01:01 +02002490 mci->ctl_page_to_phys = NULL;
2491
Doug Thompson7d6034d2009-04-27 20:01:01 +02002492 /* memory scrubber interface */
2493 mci->set_sdram_scrub_rate = amd64_set_scrub_rate;
2494 mci->get_sdram_scrub_rate = amd64_get_scrub_rate;
2495}
2496
Borislav Petkov0092b202010-10-01 19:20:05 +02002497/*
2498 * returns a pointer to the family descriptor on success, NULL otherwise.
2499 */
2500static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
Borislav Petkov395ae782010-10-01 18:38:19 +02002501{
Borislav Petkov0092b202010-10-01 19:20:05 +02002502 u8 fam = boot_cpu_data.x86;
2503 struct amd64_family_type *fam_type = NULL;
2504
2505 switch (fam) {
Borislav Petkov395ae782010-10-01 18:38:19 +02002506 case 0xf:
Borislav Petkov0092b202010-10-01 19:20:05 +02002507 fam_type = &amd64_family_types[K8_CPUS];
Borislav Petkovb8cfa022010-10-01 19:35:38 +02002508 pvt->ops = &amd64_family_types[K8_CPUS].ops;
Borislav Petkov0092b202010-10-01 19:20:05 +02002509 pvt->ctl_name = fam_type->ctl_name;
2510 pvt->min_scrubrate = K8_MIN_SCRUB_RATE_BITS;
Borislav Petkov395ae782010-10-01 18:38:19 +02002511 break;
2512 case 0x10:
Borislav Petkov0092b202010-10-01 19:20:05 +02002513 fam_type = &amd64_family_types[F10_CPUS];
Borislav Petkovb8cfa022010-10-01 19:35:38 +02002514 pvt->ops = &amd64_family_types[F10_CPUS].ops;
Borislav Petkov0092b202010-10-01 19:20:05 +02002515 pvt->ctl_name = fam_type->ctl_name;
2516 pvt->min_scrubrate = F10_MIN_SCRUB_RATE_BITS;
Borislav Petkov395ae782010-10-01 18:38:19 +02002517 break;
2518
2519 default:
Borislav Petkov24f9a7f2010-10-07 18:29:15 +02002520 amd64_err("Unsupported family!\n");
Borislav Petkov0092b202010-10-01 19:20:05 +02002521 return NULL;
Borislav Petkov395ae782010-10-01 18:38:19 +02002522 }
Borislav Petkov0092b202010-10-01 19:20:05 +02002523
Borislav Petkovb8cfa022010-10-01 19:35:38 +02002524 pvt->ext_model = boot_cpu_data.x86_model >> 4;
2525
Borislav Petkov24f9a7f2010-10-07 18:29:15 +02002526 amd64_info("%s %sdetected (node %d).\n", pvt->ctl_name,
Borislav Petkov0092b202010-10-01 19:20:05 +02002527 (fam == 0xf ?
Borislav Petkov24f9a7f2010-10-07 18:29:15 +02002528 (pvt->ext_model >= K8_REV_F ? "revF or later "
2529 : "revE or earlier ")
2530 : ""), pvt->mc_node_id);
Borislav Petkov0092b202010-10-01 19:20:05 +02002531 return fam_type;
Borislav Petkov395ae782010-10-01 18:38:19 +02002532}
2533
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02002534static int amd64_probe_one_instance(struct pci_dev *F2)
Doug Thompson7d6034d2009-04-27 20:01:01 +02002535{
2536 struct amd64_pvt *pvt = NULL;
Borislav Petkov0092b202010-10-01 19:20:05 +02002537 struct amd64_family_type *fam_type = NULL;
Doug Thompson7d6034d2009-04-27 20:01:01 +02002538 int err = 0, ret;
2539
2540 ret = -ENOMEM;
2541 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
2542 if (!pvt)
2543 goto err_exit;
2544
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02002545 pvt->mc_node_id = get_node_id(F2);
2546 pvt->F2 = F2;
Doug Thompson7d6034d2009-04-27 20:01:01 +02002547
Borislav Petkov395ae782010-10-01 18:38:19 +02002548 ret = -EINVAL;
Borislav Petkov0092b202010-10-01 19:20:05 +02002549 fam_type = amd64_per_family_init(pvt);
2550 if (!fam_type)
Borislav Petkov395ae782010-10-01 18:38:19 +02002551 goto err_free;
2552
Doug Thompson7d6034d2009-04-27 20:01:01 +02002553 ret = -ENODEV;
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02002554 err = amd64_reserve_mc_sibling_devices(pvt, fam_type->f1_id,
2555 fam_type->f3_id);
Doug Thompson7d6034d2009-04-27 20:01:01 +02002556 if (err)
2557 goto err_free;
2558
2559 ret = -EINVAL;
2560 err = amd64_check_ecc_enabled(pvt);
2561 if (err)
2562 goto err_put;
2563
2564 /*
Doug Thompson7d6034d2009-04-27 20:01:01 +02002565 * Save the pointer to the private data for use in 2nd initialization
2566 * stage
2567 */
Borislav Petkovcc4d8862010-10-13 16:11:59 +02002568 pvts[pvt->mc_node_id] = pvt;
Doug Thompson7d6034d2009-04-27 20:01:01 +02002569
2570 return 0;
2571
2572err_put:
2573 amd64_free_mc_sibling_devices(pvt);
2574
2575err_free:
2576 kfree(pvt);
2577
2578err_exit:
2579 return ret;
2580}
2581
2582/*
2583 * This is the finishing stage of the init code. Needs to be performed after all
2584 * MCs' hardware have been prepped for accessing extended config space.
2585 */
2586static int amd64_init_2nd_stage(struct amd64_pvt *pvt)
2587{
2588 int node_id = pvt->mc_node_id;
2589 struct mem_ctl_info *mci;
Andrew Morton18ba54a2009-12-07 19:04:23 +01002590 int ret = -ENODEV;
Doug Thompson7d6034d2009-04-27 20:01:01 +02002591
2592 amd64_read_mc_registers(pvt);
2593
Doug Thompson7d6034d2009-04-27 20:01:01 +02002594 /*
2595 * We need to determine how many memory channels there are. Then use
2596 * that information for calculating the size of the dynamic instance
2597 * tables in the 'mci' structure
2598 */
2599 pvt->channel_count = pvt->ops->early_channel_count(pvt);
2600 if (pvt->channel_count < 0)
2601 goto err_exit;
2602
2603 ret = -ENOMEM;
Borislav Petkov9d858bb2009-09-21 14:35:51 +02002604 mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, node_id);
Doug Thompson7d6034d2009-04-27 20:01:01 +02002605 if (!mci)
2606 goto err_exit;
2607
2608 mci->pvt_info = pvt;
2609
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02002610 mci->dev = &pvt->F2->dev;
Doug Thompson7d6034d2009-04-27 20:01:01 +02002611 amd64_setup_mci_misc_attributes(mci);
2612
2613 if (amd64_init_csrows(mci))
2614 mci->edac_cap = EDAC_FLAG_NONE;
2615
2616 amd64_enable_ecc_error_reporting(mci);
2617 amd64_set_mc_sysfs_attributes(mci);
2618
2619 ret = -ENODEV;
2620 if (edac_mc_add_mc(mci)) {
2621 debugf1("failed edac_mc_add_mc()\n");
2622 goto err_add_mc;
2623 }
2624
Borislav Petkovcc4d8862010-10-13 16:11:59 +02002625 mcis[node_id] = mci;
2626 pvts[node_id] = NULL;
Borislav Petkov549d0422009-07-24 13:51:42 +02002627
2628 /* register stuff with EDAC MCE */
2629 if (report_gart_errors)
2630 amd_report_gart_errors(true);
2631
2632 amd_register_ecc_decoder(amd64_decode_bus_error);
2633
Doug Thompson7d6034d2009-04-27 20:01:01 +02002634 return 0;
2635
2636err_add_mc:
2637 edac_mc_free(mci);
2638
2639err_exit:
2640 debugf0("failure to init 2nd stage: ret=%d\n", ret);
2641
2642 amd64_restore_ecc_error_reporting(pvt);
2643
Doug Thompson7d6034d2009-04-27 20:01:01 +02002644 amd64_free_mc_sibling_devices(pvt);
2645
Borislav Petkovcc4d8862010-10-13 16:11:59 +02002646 kfree(pvts[pvt->mc_node_id]);
2647 pvts[node_id] = NULL;
Doug Thompson7d6034d2009-04-27 20:01:01 +02002648
2649 return ret;
2650}
2651
2652
2653static int __devinit amd64_init_one_instance(struct pci_dev *pdev,
Borislav Petkovb8cfa022010-10-01 19:35:38 +02002654 const struct pci_device_id *mc_type)
Doug Thompson7d6034d2009-04-27 20:01:01 +02002655{
2656 int ret = 0;
2657
Doug Thompson7d6034d2009-04-27 20:01:01 +02002658 ret = pci_enable_device(pdev);
Borislav Petkovb8cfa022010-10-01 19:35:38 +02002659 if (ret < 0) {
Doug Thompson7d6034d2009-04-27 20:01:01 +02002660 debugf0("ret=%d\n", ret);
Borislav Petkovb8cfa022010-10-01 19:35:38 +02002661 return -EIO;
2662 }
2663
2664 ret = amd64_probe_one_instance(pdev);
2665 if (ret < 0)
Borislav Petkov24f9a7f2010-10-07 18:29:15 +02002666 amd64_err("Error probing instance: %d\n", get_node_id(pdev));
Doug Thompson7d6034d2009-04-27 20:01:01 +02002667
2668 return ret;
2669}
2670
2671static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
2672{
2673 struct mem_ctl_info *mci;
2674 struct amd64_pvt *pvt;
2675
2676 /* Remove from EDAC CORE tracking list */
2677 mci = edac_mc_del_mc(&pdev->dev);
2678 if (!mci)
2679 return;
2680
2681 pvt = mci->pvt_info;
2682
2683 amd64_restore_ecc_error_reporting(pvt);
2684
Doug Thompson7d6034d2009-04-27 20:01:01 +02002685 amd64_free_mc_sibling_devices(pvt);
2686
Borislav Petkov549d0422009-07-24 13:51:42 +02002687 /* unregister from EDAC MCE */
2688 amd_report_gart_errors(false);
2689 amd_unregister_ecc_decoder(amd64_decode_bus_error);
2690
Doug Thompson7d6034d2009-04-27 20:01:01 +02002691 /* Free the EDAC CORE resources */
Borislav Petkov8f68ed92009-12-21 15:15:59 +01002692 mci->pvt_info = NULL;
Borislav Petkovcc4d8862010-10-13 16:11:59 +02002693 mcis[pvt->mc_node_id] = NULL;
Borislav Petkov8f68ed92009-12-21 15:15:59 +01002694
2695 kfree(pvt);
Doug Thompson7d6034d2009-04-27 20:01:01 +02002696 edac_mc_free(mci);
2697}
2698
2699/*
2700 * This table is part of the interface for loading drivers for PCI devices. The
2701 * PCI core identifies what devices are on a system during boot, and then
2702 * inquiry this table to see if this driver is for a given device found.
2703 */
2704static const struct pci_device_id amd64_pci_table[] __devinitdata = {
2705 {
2706 .vendor = PCI_VENDOR_ID_AMD,
2707 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2708 .subvendor = PCI_ANY_ID,
2709 .subdevice = PCI_ANY_ID,
2710 .class = 0,
2711 .class_mask = 0,
Doug Thompson7d6034d2009-04-27 20:01:01 +02002712 },
2713 {
2714 .vendor = PCI_VENDOR_ID_AMD,
2715 .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2716 .subvendor = PCI_ANY_ID,
2717 .subdevice = PCI_ANY_ID,
2718 .class = 0,
2719 .class_mask = 0,
Doug Thompson7d6034d2009-04-27 20:01:01 +02002720 },
Doug Thompson7d6034d2009-04-27 20:01:01 +02002721 {0, }
2722};
2723MODULE_DEVICE_TABLE(pci, amd64_pci_table);
2724
2725static struct pci_driver amd64_pci_driver = {
2726 .name = EDAC_MOD_STR,
2727 .probe = amd64_init_one_instance,
2728 .remove = __devexit_p(amd64_remove_one_instance),
2729 .id_table = amd64_pci_table,
2730};
2731
2732static void amd64_setup_pci_device(void)
2733{
2734 struct mem_ctl_info *mci;
2735 struct amd64_pvt *pvt;
2736
2737 if (amd64_ctl_pci)
2738 return;
2739
Borislav Petkovcc4d8862010-10-13 16:11:59 +02002740 mci = mcis[0];
Doug Thompson7d6034d2009-04-27 20:01:01 +02002741 if (mci) {
2742
2743 pvt = mci->pvt_info;
2744 amd64_ctl_pci =
Borislav Petkov8d5b5d92010-10-01 20:11:07 +02002745 edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
Doug Thompson7d6034d2009-04-27 20:01:01 +02002746
2747 if (!amd64_ctl_pci) {
2748 pr_warning("%s(): Unable to create PCI control\n",
2749 __func__);
2750
2751 pr_warning("%s(): PCI error report via EDAC not set\n",
2752 __func__);
2753 }
2754 }
2755}
2756
2757static int __init amd64_edac_init(void)
2758{
2759 int nb, err = -ENODEV;
Borislav Petkov56b34b92009-12-21 18:13:01 +01002760 bool load_ok = false;
Doug Thompson7d6034d2009-04-27 20:01:01 +02002761
2762 edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
2763
2764 opstate_init();
2765
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +02002766 if (amd_cache_northbridges() < 0)
Borislav Petkov56b34b92009-12-21 18:13:01 +01002767 goto err_ret;
Doug Thompson7d6034d2009-04-27 20:01:01 +02002768
Borislav Petkovcc4d8862010-10-13 16:11:59 +02002769 err = -ENOMEM;
2770 pvts = kzalloc(amd_nb_num() * sizeof(pvts[0]), GFP_KERNEL);
2771 mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL);
2772 if (!(pvts && mcis))
2773 goto err_ret;
2774
Borislav Petkov50542252009-12-11 18:14:40 +01002775 msrs = msrs_alloc();
Borislav Petkov56b34b92009-12-21 18:13:01 +01002776 if (!msrs)
2777 goto err_ret;
Borislav Petkov50542252009-12-11 18:14:40 +01002778
Doug Thompson7d6034d2009-04-27 20:01:01 +02002779 err = pci_register_driver(&amd64_pci_driver);
2780 if (err)
Borislav Petkov56b34b92009-12-21 18:13:01 +01002781 goto err_pci;
Doug Thompson7d6034d2009-04-27 20:01:01 +02002782
2783 /*
Borislav Petkovcc4d8862010-10-13 16:11:59 +02002784 * At this point, the array 'pvts[]' contains pointers to alloc'd
Doug Thompson7d6034d2009-04-27 20:01:01 +02002785 * amd64_pvt structs. These will be used in the 2nd stage init function
2786 * to finish initialization of the MC instances.
2787 */
Borislav Petkov56b34b92009-12-21 18:13:01 +01002788 err = -ENODEV;
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +02002789 for (nb = 0; nb < amd_nb_num(); nb++) {
Borislav Petkovcc4d8862010-10-13 16:11:59 +02002790 if (!pvts[nb])
Doug Thompson7d6034d2009-04-27 20:01:01 +02002791 continue;
2792
Borislav Petkovcc4d8862010-10-13 16:11:59 +02002793 err = amd64_init_2nd_stage(pvts[nb]);
Doug Thompson7d6034d2009-04-27 20:01:01 +02002794 if (err)
Borislav Petkov37da0452009-06-10 17:36:57 +02002795 goto err_2nd_stage;
Borislav Petkov56b34b92009-12-21 18:13:01 +01002796
2797 load_ok = true;
Doug Thompson7d6034d2009-04-27 20:01:01 +02002798 }
2799
Borislav Petkov56b34b92009-12-21 18:13:01 +01002800 if (load_ok) {
2801 amd64_setup_pci_device();
2802 return 0;
2803 }
Doug Thompson7d6034d2009-04-27 20:01:01 +02002804
Borislav Petkov37da0452009-06-10 17:36:57 +02002805err_2nd_stage:
Doug Thompson7d6034d2009-04-27 20:01:01 +02002806 pci_unregister_driver(&amd64_pci_driver);
Borislav Petkovcc4d8862010-10-13 16:11:59 +02002807
Borislav Petkov56b34b92009-12-21 18:13:01 +01002808err_pci:
2809 msrs_free(msrs);
2810 msrs = NULL;
Borislav Petkovcc4d8862010-10-13 16:11:59 +02002811
Borislav Petkov56b34b92009-12-21 18:13:01 +01002812err_ret:
Doug Thompson7d6034d2009-04-27 20:01:01 +02002813 return err;
2814}
2815
2816static void __exit amd64_edac_exit(void)
2817{
2818 if (amd64_ctl_pci)
2819 edac_pci_release_generic_ctl(amd64_ctl_pci);
2820
2821 pci_unregister_driver(&amd64_pci_driver);
Borislav Petkov50542252009-12-11 18:14:40 +01002822
Borislav Petkovcc4d8862010-10-13 16:11:59 +02002823 kfree(mcis);
2824 mcis = NULL;
2825
2826 kfree(pvts);
2827 pvts = NULL;
2828
Borislav Petkov50542252009-12-11 18:14:40 +01002829 msrs_free(msrs);
2830 msrs = NULL;
Doug Thompson7d6034d2009-04-27 20:01:01 +02002831}
2832
2833module_init(amd64_edac_init);
2834module_exit(amd64_edac_exit);
2835
2836MODULE_LICENSE("GPL");
2837MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
2838 "Dave Peterson, Thayne Harbaugh");
2839MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
2840 EDAC_AMD64_VERSION);
2841
2842module_param(edac_op_state, int, 0444);
2843MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");