blob: a8767a6c1481db3dca8cd17dc16dcccb929fcb99 [file] [log] [blame]
Arthur Jones8f421c592008-07-25 01:49:04 -07001/*
2 * Intel 5100 Memory Controllers kernel module
3 *
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * This module is based on the following document:
8 *
9 * Intel 5100X Chipset Memory Controller Hub (MCH) - Datasheet
10 * http://download.intel.com/design/chipsets/datashts/318378.pdf
11 *
12 */
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/pci.h>
16#include <linux/pci_ids.h>
17#include <linux/slab.h>
18#include <linux/edac.h>
19#include <linux/delay.h>
20#include <linux/mmzone.h>
21
22#include "edac_core.h"
23
24/* register addresses and bit field accessors... */
25
26/* device 16, func 1 */
27#define I5100_MS 0x44 /* Memory Status Register */
28#define I5100_SPDDATA 0x48 /* Serial Presence Detect Status Reg */
29#define I5100_SPDDATA_RDO(a) ((a) >> 15 & 1)
30#define I5100_SPDDATA_SBE(a) ((a) >> 13 & 1)
31#define I5100_SPDDATA_BUSY(a) ((a) >> 12 & 1)
32#define I5100_SPDDATA_DATA(a) ((a) & ((1 << 8) - 1))
33#define I5100_SPDCMD 0x4c /* Serial Presence Detect Command Reg */
34#define I5100_SPDCMD_DTI(a) (((a) & ((1 << 4) - 1)) << 28)
35#define I5100_SPDCMD_CKOVRD(a) (((a) & 1) << 27)
36#define I5100_SPDCMD_SA(a) (((a) & ((1 << 3) - 1)) << 24)
37#define I5100_SPDCMD_BA(a) (((a) & ((1 << 8) - 1)) << 16)
38#define I5100_SPDCMD_DATA(a) (((a) & ((1 << 8) - 1)) << 8)
39#define I5100_SPDCMD_CMD(a) ((a) & 1)
40#define I5100_TOLM 0x6c /* Top of Low Memory */
41#define I5100_TOLM_TOLM(a) ((a) >> 12 & ((1 << 4) - 1))
42#define I5100_MIR0 0x80 /* Memory Interleave Range 0 */
43#define I5100_MIR1 0x84 /* Memory Interleave Range 1 */
44#define I5100_AMIR_0 0x8c /* Adjusted Memory Interleave Range 0 */
45#define I5100_AMIR_1 0x90 /* Adjusted Memory Interleave Range 1 */
46#define I5100_MIR_LIMIT(a) ((a) >> 4 & ((1 << 12) - 1))
47#define I5100_MIR_WAY1(a) ((a) >> 1 & 1)
48#define I5100_MIR_WAY0(a) ((a) & 1)
49#define I5100_FERR_NF_MEM 0xa0 /* MC First Non Fatal Errors */
50#define I5100_FERR_NF_MEM_CHAN_INDX(a) ((a) >> 28 & 1)
51#define I5100_FERR_NF_MEM_SPD_MASK (1 << 18)
52#define I5100_FERR_NF_MEM_M16ERR_MASK (1 << 16)
53#define I5100_FERR_NF_MEM_M15ERR_MASK (1 << 15)
54#define I5100_FERR_NF_MEM_M14ERR_MASK (1 << 14)
Arthur Jonesf7952ff2008-07-25 01:49:05 -070055#define I5100_FERR_NF_MEM_M12ERR_MASK (1 << 12)
56#define I5100_FERR_NF_MEM_M11ERR_MASK (1 << 11)
57#define I5100_FERR_NF_MEM_M10ERR_MASK (1 << 10)
58#define I5100_FERR_NF_MEM_M6ERR_MASK (1 << 6)
59#define I5100_FERR_NF_MEM_M5ERR_MASK (1 << 5)
60#define I5100_FERR_NF_MEM_M4ERR_MASK (1 << 4)
61#define I5100_FERR_NF_MEM_M1ERR_MASK 1
Arthur Jones8f421c592008-07-25 01:49:04 -070062#define I5100_FERR_NF_MEM_ANY_MASK \
63 (I5100_FERR_NF_MEM_M16ERR_MASK | \
64 I5100_FERR_NF_MEM_M15ERR_MASK | \
Arthur Jonesf7952ff2008-07-25 01:49:05 -070065 I5100_FERR_NF_MEM_M14ERR_MASK | \
66 I5100_FERR_NF_MEM_M12ERR_MASK | \
67 I5100_FERR_NF_MEM_M11ERR_MASK | \
68 I5100_FERR_NF_MEM_M10ERR_MASK | \
69 I5100_FERR_NF_MEM_M6ERR_MASK | \
70 I5100_FERR_NF_MEM_M5ERR_MASK | \
71 I5100_FERR_NF_MEM_M4ERR_MASK | \
72 I5100_FERR_NF_MEM_M1ERR_MASK)
Arthur Jones8f421c592008-07-25 01:49:04 -070073#define I5100_FERR_NF_MEM_ANY(a) ((a) & I5100_FERR_NF_MEM_ANY_MASK)
74#define I5100_NERR_NF_MEM 0xa4 /* MC Next Non-Fatal Errors */
75#define I5100_NERR_NF_MEM_ANY(a) I5100_FERR_NF_MEM_ANY(a)
76
77/* device 21 and 22, func 0 */
78#define I5100_MTR_0 0x154 /* Memory Technology Registers 0-3 */
79#define I5100_DMIR 0x15c /* DIMM Interleave Range */
80#define I5100_DMIR_LIMIT(a) ((a) >> 16 & ((1 << 11) - 1))
81#define I5100_DMIR_RANK(a, i) ((a) >> (4 * i) & ((1 << 2) - 1))
82#define I5100_MTR_4 0x1b0 /* Memory Technology Registers 4,5 */
83#define I5100_MTR_PRESENT(a) ((a) >> 10 & 1)
84#define I5100_MTR_ETHROTTLE(a) ((a) >> 9 & 1)
85#define I5100_MTR_WIDTH(a) ((a) >> 8 & 1)
86#define I5100_MTR_NUMBANK(a) ((a) >> 6 & 1)
87#define I5100_MTR_NUMROW(a) ((a) >> 2 & ((1 << 2) - 1))
88#define I5100_MTR_NUMCOL(a) ((a) & ((1 << 2) - 1))
89#define I5100_VALIDLOG 0x18c /* Valid Log Markers */
90#define I5100_VALIDLOG_REDMEMVALID(a) ((a) >> 2 & 1)
91#define I5100_VALIDLOG_RECMEMVALID(a) ((a) >> 1 & 1)
92#define I5100_VALIDLOG_NRECMEMVALID(a) ((a) & 1)
93#define I5100_NRECMEMA 0x190 /* Non-Recoverable Memory Error Log Reg A */
94#define I5100_NRECMEMA_MERR(a) ((a) >> 15 & ((1 << 5) - 1))
95#define I5100_NRECMEMA_BANK(a) ((a) >> 12 & ((1 << 3) - 1))
96#define I5100_NRECMEMA_RANK(a) ((a) >> 8 & ((1 << 3) - 1))
97#define I5100_NRECMEMA_DM_BUF_ID(a) ((a) & ((1 << 8) - 1))
98#define I5100_NRECMEMB 0x194 /* Non-Recoverable Memory Error Log Reg B */
99#define I5100_NRECMEMB_CAS(a) ((a) >> 16 & ((1 << 13) - 1))
100#define I5100_NRECMEMB_RAS(a) ((a) & ((1 << 16) - 1))
101#define I5100_REDMEMA 0x198 /* Recoverable Memory Data Error Log Reg A */
102#define I5100_REDMEMA_SYNDROME(a) (a)
103#define I5100_REDMEMB 0x19c /* Recoverable Memory Data Error Log Reg B */
104#define I5100_REDMEMB_ECC_LOCATOR(a) ((a) & ((1 << 18) - 1))
105#define I5100_RECMEMA 0x1a0 /* Recoverable Memory Error Log Reg A */
106#define I5100_RECMEMA_MERR(a) I5100_NRECMEMA_MERR(a)
107#define I5100_RECMEMA_BANK(a) I5100_NRECMEMA_BANK(a)
108#define I5100_RECMEMA_RANK(a) I5100_NRECMEMA_RANK(a)
109#define I5100_RECMEMA_DM_BUF_ID(a) I5100_NRECMEMA_DM_BUF_ID(a)
110#define I5100_RECMEMB 0x1a4 /* Recoverable Memory Error Log Reg B */
111#define I5100_RECMEMB_CAS(a) I5100_NRECMEMB_CAS(a)
112#define I5100_RECMEMB_RAS(a) I5100_NRECMEMB_RAS(a)
113
114/* some generic limits */
115#define I5100_MAX_RANKS_PER_CTLR 6
116#define I5100_MAX_CTLRS 2
117#define I5100_MAX_RANKS_PER_DIMM 4
118#define I5100_DIMM_ADDR_LINES (6 - 3) /* 64 bits / 8 bits per byte */
119#define I5100_MAX_DIMM_SLOTS_PER_CTLR 4
120#define I5100_MAX_RANK_INTERLEAVE 4
121#define I5100_MAX_DMIRS 5
122
123struct i5100_priv {
124 /* ranks on each dimm -- 0 maps to not present -- obtained via SPD */
125 int dimm_numrank[I5100_MAX_CTLRS][I5100_MAX_DIMM_SLOTS_PER_CTLR];
126
127 /*
128 * mainboard chip select map -- maps i5100 chip selects to
129 * DIMM slot chip selects. In the case of only 4 ranks per
130 * controller, the mapping is fairly obvious but not unique.
131 * we map -1 -> NC and assume both controllers use the same
132 * map...
133 *
134 */
135 int dimm_csmap[I5100_MAX_DIMM_SLOTS_PER_CTLR][I5100_MAX_RANKS_PER_DIMM];
136
137 /* memory interleave range */
138 struct {
139 u64 limit;
140 unsigned way[2];
141 } mir[I5100_MAX_CTLRS];
142
143 /* adjusted memory interleave range register */
144 unsigned amir[I5100_MAX_CTLRS];
145
146 /* dimm interleave range */
147 struct {
148 unsigned rank[I5100_MAX_RANK_INTERLEAVE];
149 u64 limit;
150 } dmir[I5100_MAX_CTLRS][I5100_MAX_DMIRS];
151
152 /* memory technology registers... */
153 struct {
154 unsigned present; /* 0 or 1 */
155 unsigned ethrottle; /* 0 or 1 */
156 unsigned width; /* 4 or 8 bits */
157 unsigned numbank; /* 2 or 3 lines */
158 unsigned numrow; /* 13 .. 16 lines */
159 unsigned numcol; /* 11 .. 12 lines */
160 } mtr[I5100_MAX_CTLRS][I5100_MAX_RANKS_PER_CTLR];
161
162 u64 tolm; /* top of low memory in bytes */
163 unsigned ranksperctlr; /* number of ranks per controller */
164
165 struct pci_dev *mc; /* device 16 func 1 */
166 struct pci_dev *ch0mm; /* device 21 func 0 */
167 struct pci_dev *ch1mm; /* device 22 func 0 */
168};
169
170/* map a rank/ctlr to a slot number on the mainboard */
171static int i5100_rank_to_slot(const struct mem_ctl_info *mci,
172 int ctlr, int rank)
173{
174 const struct i5100_priv *priv = mci->pvt_info;
175 int i;
176
177 for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CTLR; i++) {
178 int j;
179 const int numrank = priv->dimm_numrank[ctlr][i];
180
181 for (j = 0; j < numrank; j++)
182 if (priv->dimm_csmap[i][j] == rank)
183 return i * 2 + ctlr;
184 }
185
186 return -1;
187}
188
189/*
190 * The processor bus memory addresses are broken into three
191 * pieces, whereas the controller addresses are contiguous.
192 *
193 * here we map from the controller address space to the
194 * processor address space:
195 *
196 * Processor Address Space
197 * +-----------------------------+
198 * | |
199 * | "high" memory addresses |
200 * | |
201 * +-----------------------------+ <- 4GB on the i5100
202 * | |
203 * | other non-memory addresses |
204 * | |
205 * +-----------------------------+ <- top of low memory
206 * | |
207 * | "low" memory addresses |
208 * | |
209 * +-----------------------------+
210 */
211static unsigned long i5100_ctl_page_to_phys(struct mem_ctl_info *mci,
212 unsigned long cntlr_addr)
213{
214 const struct i5100_priv *priv = mci->pvt_info;
215
216 if (cntlr_addr < priv->tolm)
217 return cntlr_addr;
218
219 return (1ULL << 32) + (cntlr_addr - priv->tolm);
220}
221
222static const char *i5100_err_msg(unsigned err)
223{
224 const char *merrs[] = {
225 "unknown", /* 0 */
226 "uncorrectable data ECC on replay", /* 1 */
227 "unknown", /* 2 */
228 "unknown", /* 3 */
229 "aliased uncorrectable demand data ECC", /* 4 */
230 "aliased uncorrectable spare-copy data ECC", /* 5 */
231 "aliased uncorrectable patrol data ECC", /* 6 */
232 "unknown", /* 7 */
233 "unknown", /* 8 */
234 "unknown", /* 9 */
235 "non-aliased uncorrectable demand data ECC", /* 10 */
236 "non-aliased uncorrectable spare-copy data ECC", /* 11 */
237 "non-aliased uncorrectable patrol data ECC", /* 12 */
238 "unknown", /* 13 */
239 "correctable demand data ECC", /* 14 */
240 "correctable spare-copy data ECC", /* 15 */
241 "correctable patrol data ECC", /* 16 */
242 "unknown", /* 17 */
243 "SPD protocol error", /* 18 */
244 "unknown", /* 19 */
245 "spare copy initiated", /* 20 */
246 "spare copy completed", /* 21 */
247 };
248 unsigned i;
249
250 for (i = 0; i < ARRAY_SIZE(merrs); i++)
251 if (1 << i & err)
252 return merrs[i];
253
254 return "none";
255}
256
257/* convert csrow index into a rank (per controller -- 0..5) */
258static int i5100_csrow_to_rank(const struct mem_ctl_info *mci, int csrow)
259{
260 const struct i5100_priv *priv = mci->pvt_info;
261
262 return csrow % priv->ranksperctlr;
263}
264
265/* convert csrow index into a controller (0..1) */
266static int i5100_csrow_to_cntlr(const struct mem_ctl_info *mci, int csrow)
267{
268 const struct i5100_priv *priv = mci->pvt_info;
269
270 return csrow / priv->ranksperctlr;
271}
272
273static unsigned i5100_rank_to_csrow(const struct mem_ctl_info *mci,
274 int ctlr, int rank)
275{
276 const struct i5100_priv *priv = mci->pvt_info;
277
278 return ctlr * priv->ranksperctlr + rank;
279}
280
281static void i5100_handle_ce(struct mem_ctl_info *mci,
282 int ctlr,
283 unsigned bank,
284 unsigned rank,
285 unsigned long syndrome,
286 unsigned cas,
287 unsigned ras,
288 const char *msg)
289{
290 const int csrow = i5100_rank_to_csrow(mci, ctlr, rank);
291
292 printk(KERN_ERR
293 "CE ctlr %d, bank %u, rank %u, syndrome 0x%lx, "
294 "cas %u, ras %u, csrow %u, label \"%s\": %s\n",
295 ctlr, bank, rank, syndrome, cas, ras,
296 csrow, mci->csrows[csrow].channels[0].label, msg);
297
298 mci->ce_count++;
299 mci->csrows[csrow].ce_count++;
300 mci->csrows[csrow].channels[0].ce_count++;
301}
302
303static void i5100_handle_ue(struct mem_ctl_info *mci,
304 int ctlr,
305 unsigned bank,
306 unsigned rank,
307 unsigned long syndrome,
308 unsigned cas,
309 unsigned ras,
310 const char *msg)
311{
312 const int csrow = i5100_rank_to_csrow(mci, ctlr, rank);
313
314 printk(KERN_ERR
315 "UE ctlr %d, bank %u, rank %u, syndrome 0x%lx, "
316 "cas %u, ras %u, csrow %u, label \"%s\": %s\n",
317 ctlr, bank, rank, syndrome, cas, ras,
318 csrow, mci->csrows[csrow].channels[0].label, msg);
319
320 mci->ue_count++;
321 mci->csrows[csrow].ue_count++;
322}
323
324static void i5100_read_log(struct mem_ctl_info *mci, int ctlr,
325 u32 ferr, u32 nerr)
326{
327 struct i5100_priv *priv = mci->pvt_info;
328 struct pci_dev *pdev = (ctlr) ? priv->ch1mm : priv->ch0mm;
329 u32 dw;
330 u32 dw2;
331 unsigned syndrome = 0;
332 unsigned ecc_loc = 0;
333 unsigned merr;
334 unsigned bank;
335 unsigned rank;
336 unsigned cas;
337 unsigned ras;
338
339 pci_read_config_dword(pdev, I5100_VALIDLOG, &dw);
340
341 if (I5100_VALIDLOG_REDMEMVALID(dw)) {
342 pci_read_config_dword(pdev, I5100_REDMEMA, &dw2);
343 syndrome = I5100_REDMEMA_SYNDROME(dw2);
344 pci_read_config_dword(pdev, I5100_REDMEMB, &dw2);
345 ecc_loc = I5100_REDMEMB_ECC_LOCATOR(dw2);
346 }
347
348 if (I5100_VALIDLOG_RECMEMVALID(dw)) {
349 const char *msg;
350
351 pci_read_config_dword(pdev, I5100_RECMEMA, &dw2);
352 merr = I5100_RECMEMA_MERR(dw2);
353 bank = I5100_RECMEMA_BANK(dw2);
354 rank = I5100_RECMEMA_RANK(dw2);
355
356 pci_read_config_dword(pdev, I5100_RECMEMB, &dw2);
357 cas = I5100_RECMEMB_CAS(dw2);
358 ras = I5100_RECMEMB_RAS(dw2);
359
360 /* FIXME: not really sure if this is what merr is...
361 */
362 if (!merr)
363 msg = i5100_err_msg(ferr);
364 else
365 msg = i5100_err_msg(nerr);
366
367 i5100_handle_ce(mci, ctlr, bank, rank, syndrome, cas, ras, msg);
368 }
369
370 if (I5100_VALIDLOG_NRECMEMVALID(dw)) {
371 const char *msg;
372
373 pci_read_config_dword(pdev, I5100_NRECMEMA, &dw2);
374 merr = I5100_NRECMEMA_MERR(dw2);
375 bank = I5100_NRECMEMA_BANK(dw2);
376 rank = I5100_NRECMEMA_RANK(dw2);
377
378 pci_read_config_dword(pdev, I5100_NRECMEMB, &dw2);
379 cas = I5100_NRECMEMB_CAS(dw2);
380 ras = I5100_NRECMEMB_RAS(dw2);
381
382 /* FIXME: not really sure if this is what merr is...
383 */
384 if (!merr)
385 msg = i5100_err_msg(ferr);
386 else
387 msg = i5100_err_msg(nerr);
388
389 i5100_handle_ue(mci, ctlr, bank, rank, syndrome, cas, ras, msg);
390 }
391
392 pci_write_config_dword(pdev, I5100_VALIDLOG, dw);
393}
394
395static void i5100_check_error(struct mem_ctl_info *mci)
396{
397 struct i5100_priv *priv = mci->pvt_info;
398 u32 dw;
399
400
401 pci_read_config_dword(priv->mc, I5100_FERR_NF_MEM, &dw);
402 if (I5100_FERR_NF_MEM_ANY(dw)) {
403 u32 dw2;
404
405 pci_read_config_dword(priv->mc, I5100_NERR_NF_MEM, &dw2);
406 if (dw2)
407 pci_write_config_dword(priv->mc, I5100_NERR_NF_MEM,
408 dw2);
409 pci_write_config_dword(priv->mc, I5100_FERR_NF_MEM, dw);
410
411 i5100_read_log(mci, I5100_FERR_NF_MEM_CHAN_INDX(dw),
412 I5100_FERR_NF_MEM_ANY(dw),
413 I5100_NERR_NF_MEM_ANY(dw2));
414 }
415}
416
417static struct pci_dev *pci_get_device_func(unsigned vendor,
418 unsigned device,
419 unsigned func)
420{
421 struct pci_dev *ret = NULL;
422
423 while (1) {
424 ret = pci_get_device(vendor, device, ret);
425
426 if (!ret)
427 break;
428
429 if (PCI_FUNC(ret->devfn) == func)
430 break;
431 }
432
433 return ret;
434}
435
436static unsigned long __devinit i5100_npages(struct mem_ctl_info *mci,
437 int csrow)
438{
439 struct i5100_priv *priv = mci->pvt_info;
440 const unsigned ctlr_rank = i5100_csrow_to_rank(mci, csrow);
441 const unsigned ctlr = i5100_csrow_to_cntlr(mci, csrow);
442 unsigned addr_lines;
443
444 /* dimm present? */
445 if (!priv->mtr[ctlr][ctlr_rank].present)
446 return 0ULL;
447
448 addr_lines =
449 I5100_DIMM_ADDR_LINES +
450 priv->mtr[ctlr][ctlr_rank].numcol +
451 priv->mtr[ctlr][ctlr_rank].numrow +
452 priv->mtr[ctlr][ctlr_rank].numbank;
453
454 return (unsigned long)
455 ((unsigned long long) (1ULL << addr_lines) / PAGE_SIZE);
456}
457
458static void __devinit i5100_init_mtr(struct mem_ctl_info *mci)
459{
460 struct i5100_priv *priv = mci->pvt_info;
461 struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm };
462 int i;
463
464 for (i = 0; i < I5100_MAX_CTLRS; i++) {
465 int j;
466 struct pci_dev *pdev = mms[i];
467
468 for (j = 0; j < I5100_MAX_RANKS_PER_CTLR; j++) {
469 const unsigned addr =
470 (j < 4) ? I5100_MTR_0 + j * 2 :
471 I5100_MTR_4 + (j - 4) * 2;
472 u16 w;
473
474 pci_read_config_word(pdev, addr, &w);
475
476 priv->mtr[i][j].present = I5100_MTR_PRESENT(w);
477 priv->mtr[i][j].ethrottle = I5100_MTR_ETHROTTLE(w);
478 priv->mtr[i][j].width = 4 + 4 * I5100_MTR_WIDTH(w);
479 priv->mtr[i][j].numbank = 2 + I5100_MTR_NUMBANK(w);
480 priv->mtr[i][j].numrow = 13 + I5100_MTR_NUMROW(w);
481 priv->mtr[i][j].numcol = 10 + I5100_MTR_NUMCOL(w);
482 }
483 }
484}
485
486/*
487 * FIXME: make this into a real i2c adapter (so that dimm-decode
488 * will work)?
489 */
490static int i5100_read_spd_byte(const struct mem_ctl_info *mci,
491 u8 ch, u8 slot, u8 addr, u8 *byte)
492{
493 struct i5100_priv *priv = mci->pvt_info;
494 u16 w;
495 u32 dw;
496 unsigned long et;
497
498 pci_read_config_word(priv->mc, I5100_SPDDATA, &w);
499 if (I5100_SPDDATA_BUSY(w))
500 return -1;
501
502 dw = I5100_SPDCMD_DTI(0xa) |
503 I5100_SPDCMD_CKOVRD(1) |
504 I5100_SPDCMD_SA(ch * 4 + slot) |
505 I5100_SPDCMD_BA(addr) |
506 I5100_SPDCMD_DATA(0) |
507 I5100_SPDCMD_CMD(0);
508 pci_write_config_dword(priv->mc, I5100_SPDCMD, dw);
509
510 /* wait up to 100ms */
511 et = jiffies + HZ / 10;
512 udelay(100);
513 while (1) {
514 pci_read_config_word(priv->mc, I5100_SPDDATA, &w);
515 if (!I5100_SPDDATA_BUSY(w))
516 break;
517 udelay(100);
518 }
519
520 if (!I5100_SPDDATA_RDO(w) || I5100_SPDDATA_SBE(w))
521 return -1;
522
523 *byte = I5100_SPDDATA_DATA(w);
524
525 return 0;
526}
527
528/*
529 * fill dimm chip select map
530 *
531 * FIXME:
532 * o only valid for 4 ranks per controller
533 * o not the only way to may chip selects to dimm slots
534 * o investigate if there is some way to obtain this map from the bios
535 */
536static void __devinit i5100_init_dimm_csmap(struct mem_ctl_info *mci)
537{
538 struct i5100_priv *priv = mci->pvt_info;
539 int i;
540
541 WARN_ON(priv->ranksperctlr != 4);
542
543 for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CTLR; i++) {
544 int j;
545
546 for (j = 0; j < I5100_MAX_RANKS_PER_DIMM; j++)
547 priv->dimm_csmap[i][j] = -1; /* default NC */
548 }
549
550 /* only 2 chip selects per slot... */
551 priv->dimm_csmap[0][0] = 0;
552 priv->dimm_csmap[0][1] = 3;
553 priv->dimm_csmap[1][0] = 1;
554 priv->dimm_csmap[1][1] = 2;
555 priv->dimm_csmap[2][0] = 2;
556 priv->dimm_csmap[3][0] = 3;
557}
558
559static void __devinit i5100_init_dimm_layout(struct pci_dev *pdev,
560 struct mem_ctl_info *mci)
561{
562 struct i5100_priv *priv = mci->pvt_info;
563 int i;
564
565 for (i = 0; i < I5100_MAX_CTLRS; i++) {
566 int j;
567
568 for (j = 0; j < I5100_MAX_DIMM_SLOTS_PER_CTLR; j++) {
569 u8 rank;
570
571 if (i5100_read_spd_byte(mci, i, j, 5, &rank) < 0)
572 priv->dimm_numrank[i][j] = 0;
573 else
574 priv->dimm_numrank[i][j] = (rank & 3) + 1;
575 }
576 }
577
578 i5100_init_dimm_csmap(mci);
579}
580
581static void __devinit i5100_init_interleaving(struct pci_dev *pdev,
582 struct mem_ctl_info *mci)
583{
584 u16 w;
585 u32 dw;
586 struct i5100_priv *priv = mci->pvt_info;
587 struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm };
588 int i;
589
590 pci_read_config_word(pdev, I5100_TOLM, &w);
591 priv->tolm = (u64) I5100_TOLM_TOLM(w) * 256 * 1024 * 1024;
592
593 pci_read_config_word(pdev, I5100_MIR0, &w);
594 priv->mir[0].limit = (u64) I5100_MIR_LIMIT(w) << 28;
595 priv->mir[0].way[1] = I5100_MIR_WAY1(w);
596 priv->mir[0].way[0] = I5100_MIR_WAY0(w);
597
598 pci_read_config_word(pdev, I5100_MIR1, &w);
599 priv->mir[1].limit = (u64) I5100_MIR_LIMIT(w) << 28;
600 priv->mir[1].way[1] = I5100_MIR_WAY1(w);
601 priv->mir[1].way[0] = I5100_MIR_WAY0(w);
602
603 pci_read_config_word(pdev, I5100_AMIR_0, &w);
604 priv->amir[0] = w;
605 pci_read_config_word(pdev, I5100_AMIR_1, &w);
606 priv->amir[1] = w;
607
608 for (i = 0; i < I5100_MAX_CTLRS; i++) {
609 int j;
610
611 for (j = 0; j < 5; j++) {
612 int k;
613
614 pci_read_config_dword(mms[i], I5100_DMIR + j * 4, &dw);
615
616 priv->dmir[i][j].limit =
617 (u64) I5100_DMIR_LIMIT(dw) << 28;
618 for (k = 0; k < I5100_MAX_RANKS_PER_DIMM; k++)
619 priv->dmir[i][j].rank[k] =
620 I5100_DMIR_RANK(dw, k);
621 }
622 }
623
624 i5100_init_mtr(mci);
625}
626
627static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
628{
629 int i;
630 unsigned long total_pages = 0UL;
631 struct i5100_priv *priv = mci->pvt_info;
632
633 for (i = 0; i < mci->nr_csrows; i++) {
634 const unsigned long npages = i5100_npages(mci, i);
635 const unsigned cntlr = i5100_csrow_to_cntlr(mci, i);
636 const unsigned rank = i5100_csrow_to_rank(mci, i);
637
638 if (!npages)
639 continue;
640
641 /*
642 * FIXME: these two are totally bogus -- I don't see how to
643 * map them correctly to this structure...
644 */
645 mci->csrows[i].first_page = total_pages;
646 mci->csrows[i].last_page = total_pages + npages - 1;
647 mci->csrows[i].page_mask = 0UL;
648
649 mci->csrows[i].nr_pages = npages;
650 mci->csrows[i].grain = 32;
651 mci->csrows[i].csrow_idx = i;
652 mci->csrows[i].dtype =
653 (priv->mtr[cntlr][rank].width == 4) ? DEV_X4 : DEV_X8;
654 mci->csrows[i].ue_count = 0;
655 mci->csrows[i].ce_count = 0;
656 mci->csrows[i].mtype = MEM_RDDR2;
657 mci->csrows[i].edac_mode = EDAC_SECDED;
658 mci->csrows[i].mci = mci;
659 mci->csrows[i].nr_channels = 1;
660 mci->csrows[i].channels[0].chan_idx = 0;
661 mci->csrows[i].channels[0].ce_count = 0;
662 mci->csrows[i].channels[0].csrow = mci->csrows + i;
663 snprintf(mci->csrows[i].channels[0].label,
664 sizeof(mci->csrows[i].channels[0].label),
665 "DIMM%u", i5100_rank_to_slot(mci, cntlr, rank));
666
667 total_pages += npages;
668 }
669}
670
671static int __devinit i5100_init_one(struct pci_dev *pdev,
672 const struct pci_device_id *id)
673{
674 int rc;
675 struct mem_ctl_info *mci;
676 struct i5100_priv *priv;
677 struct pci_dev *ch0mm, *ch1mm;
678 int ret = 0;
679 u32 dw;
680 int ranksperch;
681
682 if (PCI_FUNC(pdev->devfn) != 1)
683 return -ENODEV;
684
685 rc = pci_enable_device(pdev);
686 if (rc < 0) {
687 ret = rc;
688 goto bail;
689 }
690
691 /* figure out how many ranks, from strapped state of 48GB_Mode input */
692 pci_read_config_dword(pdev, I5100_MS, &dw);
693 ranksperch = !!(dw & (1 << 8)) * 2 + 4;
694
695 if (ranksperch != 4) {
696 /* FIXME: get 6 ranks / controller to work - need hw... */
697 printk(KERN_INFO "i5100_edac: unsupported configuration.\n");
698 ret = -ENODEV;
699 goto bail;
700 }
701
702 /* device 21, func 0, Channel 0 Memory Map, Error Flag/Mask, etc... */
703 ch0mm = pci_get_device_func(PCI_VENDOR_ID_INTEL,
704 PCI_DEVICE_ID_INTEL_5100_21, 0);
705 if (!ch0mm)
706 return -ENODEV;
707
708 rc = pci_enable_device(ch0mm);
709 if (rc < 0) {
710 ret = rc;
711 goto bail_ch0;
712 }
713
714 /* device 22, func 0, Channel 1 Memory Map, Error Flag/Mask, etc... */
715 ch1mm = pci_get_device_func(PCI_VENDOR_ID_INTEL,
716 PCI_DEVICE_ID_INTEL_5100_22, 0);
717 if (!ch1mm) {
718 ret = -ENODEV;
719 goto bail_ch0;
720 }
721
722 rc = pci_enable_device(ch1mm);
723 if (rc < 0) {
724 ret = rc;
725 goto bail_ch1;
726 }
727
728 mci = edac_mc_alloc(sizeof(*priv), ranksperch * 2, 1, 0);
729 if (!mci) {
730 ret = -ENOMEM;
731 goto bail_ch1;
732 }
733
734 mci->dev = &pdev->dev;
735
736 priv = mci->pvt_info;
737 priv->ranksperctlr = ranksperch;
738 priv->mc = pdev;
739 priv->ch0mm = ch0mm;
740 priv->ch1mm = ch1mm;
741
742 i5100_init_dimm_layout(pdev, mci);
743 i5100_init_interleaving(pdev, mci);
744
745 mci->mtype_cap = MEM_FLAG_FB_DDR2;
746 mci->edac_ctl_cap = EDAC_FLAG_SECDED;
747 mci->edac_cap = EDAC_FLAG_SECDED;
748 mci->mod_name = "i5100_edac.c";
749 mci->mod_ver = "not versioned";
750 mci->ctl_name = "i5100";
751 mci->dev_name = pci_name(pdev);
752 mci->ctl_page_to_phys = i5100_ctl_page_to_phys;
753
754 mci->edac_check = i5100_check_error;
755
756 i5100_init_csrows(mci);
757
758 /* this strange construction seems to be in every driver, dunno why */
759 switch (edac_op_state) {
760 case EDAC_OPSTATE_POLL:
761 case EDAC_OPSTATE_NMI:
762 break;
763 default:
764 edac_op_state = EDAC_OPSTATE_POLL;
765 break;
766 }
767
768 if (edac_mc_add_mc(mci)) {
769 ret = -ENODEV;
770 goto bail_mc;
771 }
772
773 goto bail;
774
775bail_mc:
776 edac_mc_free(mci);
777
778bail_ch1:
779 pci_dev_put(ch1mm);
780
781bail_ch0:
782 pci_dev_put(ch0mm);
783
784bail:
785 return ret;
786}
787
788static void __devexit i5100_remove_one(struct pci_dev *pdev)
789{
790 struct mem_ctl_info *mci;
791 struct i5100_priv *priv;
792
793 mci = edac_mc_del_mc(&pdev->dev);
794
795 if (!mci)
796 return;
797
798 priv = mci->pvt_info;
799 pci_dev_put(priv->ch0mm);
800 pci_dev_put(priv->ch1mm);
801
802 edac_mc_free(mci);
803}
804
805static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
806 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
807 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
808 { 0, }
809};
810MODULE_DEVICE_TABLE(pci, i5100_pci_tbl);
811
812static struct pci_driver i5100_driver = {
813 .name = KBUILD_BASENAME,
814 .probe = i5100_init_one,
815 .remove = __devexit_p(i5100_remove_one),
816 .id_table = i5100_pci_tbl,
817};
818
819static int __init i5100_init(void)
820{
821 int pci_rc;
822
823 pci_rc = pci_register_driver(&i5100_driver);
824
825 return (pci_rc < 0) ? pci_rc : 0;
826}
827
828static void __exit i5100_exit(void)
829{
830 pci_unregister_driver(&i5100_driver);
831}
832
833module_init(i5100_init);
834module_exit(i5100_exit);
835
836MODULE_LICENSE("GPL");
837MODULE_AUTHOR
838 ("Arthur Jones <ajones@riverbed.com>");
839MODULE_DESCRIPTION("MC Driver for Intel I5100 memory controllers");