blob: 407f239553f4f024f549df3849047c0db1940d94 [file] [log] [blame]
Tony Luck5c71ad12017-03-09 01:45:39 +08001/*
2 * Driver for Pondicherry2 memory controller.
3 *
4 * Copyright (c) 2016, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * [Derived from sb_edac.c]
16 *
17 * Translation of system physical addresses to DIMM addresses
18 * is a two stage process:
19 *
20 * First the Pondicherry 2 memory controller handles slice and channel interleaving
21 * in "sys2pmi()". This is (almost) completley common between platforms.
22 *
23 * Then a platform specific dunit (DIMM unit) completes the process to provide DIMM,
24 * rank, bank, row and column using the appropriate "dunit_ops" functions/parameters.
25 */
26
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/pci.h>
30#include <linux/pci_ids.h>
31#include <linux/slab.h>
32#include <linux/delay.h>
33#include <linux/edac.h>
34#include <linux/mmzone.h>
35#include <linux/smp.h>
36#include <linux/bitmap.h>
37#include <linux/math64.h>
38#include <linux/mod_devicetable.h>
39#include <asm/cpu_device_id.h>
40#include <asm/intel-family.h>
41#include <asm/processor.h>
42#include <asm/mce.h>
43
44#include "edac_mc.h"
45#include "edac_module.h"
46#include "pnd2_edac.h"
47
48#define APL_NUM_CHANNELS 4
49#define DNV_NUM_CHANNELS 2
50#define DNV_MAX_DIMMS 2 /* Max DIMMs per channel */
51
52enum type {
53 APL,
54 DNV, /* All requests go to PMI CH0 on each slice (CH1 disabled) */
55};
56
57struct dram_addr {
58 int chan;
59 int dimm;
60 int rank;
61 int bank;
62 int row;
63 int col;
64};
65
66struct pnd2_pvt {
67 int dimm_geom[APL_NUM_CHANNELS];
68 u64 tolm, tohm;
69};
70
71/*
72 * System address space is divided into multiple regions with
73 * different interleave rules in each. The as0/as1 regions
74 * have no interleaving at all. The as2 region is interleaved
75 * between two channels. The mot region is magic and may overlap
76 * other regions, with its interleave rules taking precedence.
77 * Addresses not in any of these regions are interleaved across
78 * all four channels.
79 */
80static struct region {
81 u64 base;
82 u64 limit;
83 u8 enabled;
84} mot, as0, as1, as2;
85
86static struct dunit_ops {
87 char *name;
88 enum type type;
89 int pmiaddr_shift;
90 int pmiidx_shift;
91 int channels;
92 int dimms_per_channel;
93 int (*rd_reg)(int port, int off, int op, void *data, size_t sz, char *name);
94 int (*get_registers)(void);
95 int (*check_ecc)(void);
96 void (*mk_region)(char *name, struct region *rp, void *asym);
97 void (*get_dimm_config)(struct mem_ctl_info *mci);
98 int (*pmi2mem)(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
99 struct dram_addr *daddr, char *msg);
100} *ops;
101
102static struct mem_ctl_info *pnd2_mci;
103
104#define PND2_MSG_SIZE 256
105
106/* Debug macros */
107#define pnd2_printk(level, fmt, arg...) \
108 edac_printk(level, "pnd2", fmt, ##arg)
109
110#define pnd2_mc_printk(mci, level, fmt, arg...) \
111 edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg)
112
113#define MOT_CHAN_INTLV_BIT_1SLC_2CH 12
114#define MOT_CHAN_INTLV_BIT_2SLC_2CH 13
115#define SELECTOR_DISABLED (-1)
116#define _4GB (1ul << 32)
117
118#define PMI_ADDRESS_WIDTH 31
119#define PND_MAX_PHYS_BIT 39
120
121#define APL_ASYMSHIFT 28
122#define DNV_ASYMSHIFT 31
123#define CH_HASH_MASK_LSB 6
124#define SLICE_HASH_MASK_LSB 6
125#define MOT_SLC_INTLV_BIT 12
126#define LOG2_PMI_ADDR_GRANULARITY 5
127#define MOT_SHIFT 24
128
129#define GET_BITFIELD(v, lo, hi) (((v) & GENMASK_ULL(hi, lo)) >> (lo))
130#define U64_LSHIFT(val, s) ((u64)(val) << (s))
131
132#ifdef CONFIG_X86_INTEL_SBI_APL
133#include "linux/platform_data/sbi_apl.h"
Colin Ian King77641da2017-06-23 09:48:55 +0100134static int sbi_send(int port, int off, int op, u32 *data)
Tony Luck5c71ad12017-03-09 01:45:39 +0800135{
136 struct sbi_apl_message sbi_arg;
137 int ret, read = 0;
138
139 memset(&sbi_arg, 0, sizeof(sbi_arg));
140
141 if (op == 0 || op == 4 || op == 6)
142 read = 1;
143 else
144 sbi_arg.data = *data;
145
146 sbi_arg.opcode = op;
147 sbi_arg.port_address = port;
148 sbi_arg.register_offset = off;
149 ret = sbi_apl_commit(&sbi_arg);
150 if (ret || sbi_arg.status)
151 edac_dbg(2, "sbi_send status=%d ret=%d data=%x\n",
152 sbi_arg.status, ret, sbi_arg.data);
153
154 if (ret == 0)
155 ret = sbi_arg.status;
156
157 if (ret == 0 && read)
158 *data = sbi_arg.data;
159
160 return ret;
161}
162#else
Colin Ian King77641da2017-06-23 09:48:55 +0100163static int sbi_send(int port, int off, int op, u32 *data)
Tony Luck5c71ad12017-03-09 01:45:39 +0800164{
165 return -EUNATCH;
166}
167#endif
168
169static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
170{
Gustavo A. R. Silvaee514c72017-06-22 17:05:35 -0500171 int ret = 0;
Tony Luck5c71ad12017-03-09 01:45:39 +0800172
173 edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op);
174 switch (sz) {
175 case 8:
176 ret = sbi_send(port, off + 4, op, (u32 *)(data + 4));
Gustavo A. R. Silvaee514c72017-06-22 17:05:35 -0500177 /* fall through */
Tony Luck5c71ad12017-03-09 01:45:39 +0800178 case 4:
Gustavo A. R. Silvaee514c72017-06-22 17:05:35 -0500179 ret |= sbi_send(port, off, op, (u32 *)data);
Tony Luck5c71ad12017-03-09 01:45:39 +0800180 pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
181 sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret);
182 break;
183 }
184
185 return ret;
186}
187
188static u64 get_mem_ctrl_hub_base_addr(void)
189{
190 struct b_cr_mchbar_lo_pci lo;
191 struct b_cr_mchbar_hi_pci hi;
192 struct pci_dev *pdev;
193
194 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
195 if (pdev) {
196 pci_read_config_dword(pdev, 0x48, (u32 *)&lo);
197 pci_read_config_dword(pdev, 0x4c, (u32 *)&hi);
198 pci_dev_put(pdev);
199 } else {
200 return 0;
201 }
202
203 if (!lo.enable) {
204 edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n");
205 return 0;
206 }
207
208 return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15);
209}
210
211static u64 get_sideband_reg_base_addr(void)
212{
213 struct pci_dev *pdev;
214 u32 hi, lo;
215
216 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x19dd, NULL);
217 if (pdev) {
218 pci_read_config_dword(pdev, 0x10, &lo);
219 pci_read_config_dword(pdev, 0x14, &hi);
220 pci_dev_put(pdev);
221 return (U64_LSHIFT(hi, 32) | U64_LSHIFT(lo, 0));
222 } else {
223 return 0xfd000000;
224 }
225}
226
227static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
228{
229 struct pci_dev *pdev;
230 char *base;
231 u64 addr;
232
233 if (op == 4) {
234 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
235 if (!pdev)
236 return -ENODEV;
237
238 pci_read_config_dword(pdev, off, data);
239 pci_dev_put(pdev);
240 } else {
241 /* MMIO via memory controller hub base address */
242 if (op == 0 && port == 0x4c) {
243 addr = get_mem_ctrl_hub_base_addr();
244 if (!addr)
245 return -ENODEV;
246 } else {
247 /* MMIO via sideband register base address */
248 addr = get_sideband_reg_base_addr();
249 if (!addr)
250 return -ENODEV;
251 addr += (port << 16);
252 }
253
254 base = ioremap((resource_size_t)addr, 0x10000);
255 if (!base)
256 return -ENODEV;
257
258 if (sz == 8)
259 *(u32 *)(data + 4) = *(u32 *)(base + off + 4);
260 *(u32 *)data = *(u32 *)(base + off);
261
262 iounmap(base);
263 }
264
265 edac_dbg(2, "Read %s=%.8x_%.8x\n", name,
266 (sz == 8) ? *(u32 *)(data + 4) : 0, *(u32 *)data);
267
268 return 0;
269}
270
271#define RD_REGP(regp, regname, port) \
272 ops->rd_reg(port, \
273 regname##_offset, \
274 regname##_r_opcode, \
275 regp, sizeof(struct regname), \
276 #regname)
277
278#define RD_REG(regp, regname) \
279 ops->rd_reg(regname ## _port, \
280 regname##_offset, \
281 regname##_r_opcode, \
282 regp, sizeof(struct regname), \
283 #regname)
284
285static u64 top_lm, top_hm;
286static bool two_slices;
287static bool two_channels; /* Both PMI channels in one slice enabled */
288
289static u8 sym_chan_mask;
290static u8 asym_chan_mask;
291static u8 chan_mask;
292
293static int slice_selector = -1;
294static int chan_selector = -1;
295static u64 slice_hash_mask;
296static u64 chan_hash_mask;
297
298static void mk_region(char *name, struct region *rp, u64 base, u64 limit)
299{
300 rp->enabled = 1;
301 rp->base = base;
302 rp->limit = limit;
303 edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, limit);
304}
305
306static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask)
307{
308 if (mask == 0) {
309 pr_info(FW_BUG "MOT mask cannot be zero\n");
310 return;
311 }
312 if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) {
313 pr_info(FW_BUG "MOT mask not power of two\n");
314 return;
315 }
316 if (base & ~mask) {
317 pr_info(FW_BUG "MOT region base/mask alignment error\n");
318 return;
319 }
320 rp->base = base;
321 rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0);
322 rp->enabled = 1;
323 edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit);
324}
325
326static bool in_region(struct region *rp, u64 addr)
327{
328 if (!rp->enabled)
329 return false;
330
331 return rp->base <= addr && addr <= rp->limit;
332}
333
334static int gen_sym_mask(struct b_cr_slice_channel_hash *p)
335{
336 int mask = 0;
337
338 if (!p->slice_0_mem_disabled)
339 mask |= p->sym_slice0_channel_enabled;
340
341 if (!p->slice_1_disabled)
342 mask |= p->sym_slice1_channel_enabled << 2;
343
344 if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
345 mask &= 0x5;
346
347 return mask;
348}
349
350static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
351 struct b_cr_asym_mem_region0_mchbar *as0,
352 struct b_cr_asym_mem_region1_mchbar *as1,
353 struct b_cr_asym_2way_mem_region_mchbar *as2way)
354{
355 const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
356 int mask = 0;
357
358 if (as2way->asym_2way_interleave_enable)
359 mask = intlv[as2way->asym_2way_intlv_mode];
360 if (as0->slice0_asym_enable)
361 mask |= (1 << as0->slice0_asym_channel_select);
362 if (as1->slice1_asym_enable)
363 mask |= (4 << as1->slice1_asym_channel_select);
364 if (p->slice_0_mem_disabled)
365 mask &= 0xc;
366 if (p->slice_1_disabled)
367 mask &= 0x3;
368 if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
369 mask &= 0x5;
370
371 return mask;
372}
373
374static struct b_cr_tolud_pci tolud;
375static struct b_cr_touud_lo_pci touud_lo;
376static struct b_cr_touud_hi_pci touud_hi;
377static struct b_cr_asym_mem_region0_mchbar asym0;
378static struct b_cr_asym_mem_region1_mchbar asym1;
379static struct b_cr_asym_2way_mem_region_mchbar asym_2way;
380static struct b_cr_mot_out_base_mchbar mot_base;
381static struct b_cr_mot_out_mask_mchbar mot_mask;
382static struct b_cr_slice_channel_hash chash;
383
384/* Apollo Lake dunit */
385/*
386 * Validated on board with just two DIMMs in the [0] and [2] positions
387 * in this array. Other port number matches documentation, but caution
388 * advised.
389 */
390static const int apl_dports[APL_NUM_CHANNELS] = { 0x18, 0x10, 0x11, 0x19 };
391static struct d_cr_drp0 drp0[APL_NUM_CHANNELS];
392
393/* Denverton dunit */
394static const int dnv_dports[DNV_NUM_CHANNELS] = { 0x10, 0x12 };
395static struct d_cr_dsch dsch;
396static struct d_cr_ecc_ctrl ecc_ctrl[DNV_NUM_CHANNELS];
397static struct d_cr_drp drp[DNV_NUM_CHANNELS];
398static struct d_cr_dmap dmap[DNV_NUM_CHANNELS];
399static struct d_cr_dmap1 dmap1[DNV_NUM_CHANNELS];
400static struct d_cr_dmap2 dmap2[DNV_NUM_CHANNELS];
401static struct d_cr_dmap3 dmap3[DNV_NUM_CHANNELS];
402static struct d_cr_dmap4 dmap4[DNV_NUM_CHANNELS];
403static struct d_cr_dmap5 dmap5[DNV_NUM_CHANNELS];
404
405static void apl_mk_region(char *name, struct region *rp, void *asym)
406{
407 struct b_cr_asym_mem_region0_mchbar *a = asym;
408
409 mk_region(name, rp,
410 U64_LSHIFT(a->slice0_asym_base, APL_ASYMSHIFT),
411 U64_LSHIFT(a->slice0_asym_limit, APL_ASYMSHIFT) +
412 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
413}
414
415static void dnv_mk_region(char *name, struct region *rp, void *asym)
416{
417 struct b_cr_asym_mem_region_denverton *a = asym;
418
419 mk_region(name, rp,
420 U64_LSHIFT(a->slice_asym_base, DNV_ASYMSHIFT),
421 U64_LSHIFT(a->slice_asym_limit, DNV_ASYMSHIFT) +
422 GENMASK_ULL(DNV_ASYMSHIFT - 1, 0));
423}
424
425static int apl_get_registers(void)
426{
427 int i;
428
429 if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar))
430 return -ENODEV;
431
432 for (i = 0; i < APL_NUM_CHANNELS; i++)
433 if (RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i]))
434 return -ENODEV;
435
436 return 0;
437}
438
439static int dnv_get_registers(void)
440{
441 int i;
442
443 if (RD_REG(&dsch, d_cr_dsch))
444 return -ENODEV;
445
446 for (i = 0; i < DNV_NUM_CHANNELS; i++)
447 if (RD_REGP(&ecc_ctrl[i], d_cr_ecc_ctrl, dnv_dports[i]) ||
448 RD_REGP(&drp[i], d_cr_drp, dnv_dports[i]) ||
449 RD_REGP(&dmap[i], d_cr_dmap, dnv_dports[i]) ||
450 RD_REGP(&dmap1[i], d_cr_dmap1, dnv_dports[i]) ||
451 RD_REGP(&dmap2[i], d_cr_dmap2, dnv_dports[i]) ||
452 RD_REGP(&dmap3[i], d_cr_dmap3, dnv_dports[i]) ||
453 RD_REGP(&dmap4[i], d_cr_dmap4, dnv_dports[i]) ||
454 RD_REGP(&dmap5[i], d_cr_dmap5, dnv_dports[i]))
455 return -ENODEV;
456
457 return 0;
458}
459
460/*
461 * Read all the h/w config registers once here (they don't
462 * change at run time. Figure out which address ranges have
463 * which interleave characteristics.
464 */
465static int get_registers(void)
466{
467 const int intlv[] = { 10, 11, 12, 12 };
468
469 if (RD_REG(&tolud, b_cr_tolud_pci) ||
470 RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
471 RD_REG(&touud_hi, b_cr_touud_hi_pci) ||
472 RD_REG(&asym0, b_cr_asym_mem_region0_mchbar) ||
473 RD_REG(&asym1, b_cr_asym_mem_region1_mchbar) ||
474 RD_REG(&mot_base, b_cr_mot_out_base_mchbar) ||
475 RD_REG(&mot_mask, b_cr_mot_out_mask_mchbar) ||
476 RD_REG(&chash, b_cr_slice_channel_hash))
477 return -ENODEV;
478
479 if (ops->get_registers())
480 return -ENODEV;
481
482 if (ops->type == DNV) {
483 /* PMI channel idx (always 0) for asymmetric region */
484 asym0.slice0_asym_channel_select = 0;
485 asym1.slice1_asym_channel_select = 0;
486 /* PMI channel bitmap (always 1) for symmetric region */
487 chash.sym_slice0_channel_enabled = 0x1;
488 chash.sym_slice1_channel_enabled = 0x1;
489 }
490
491 if (asym0.slice0_asym_enable)
492 ops->mk_region("as0", &as0, &asym0);
493
494 if (asym1.slice1_asym_enable)
495 ops->mk_region("as1", &as1, &asym1);
496
497 if (asym_2way.asym_2way_interleave_enable) {
498 mk_region("as2way", &as2,
499 U64_LSHIFT(asym_2way.asym_2way_base, APL_ASYMSHIFT),
500 U64_LSHIFT(asym_2way.asym_2way_limit, APL_ASYMSHIFT) +
501 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
502 }
503
504 if (mot_base.imr_en) {
505 mk_region_mask("mot", &mot,
506 U64_LSHIFT(mot_base.mot_out_base, MOT_SHIFT),
507 U64_LSHIFT(mot_mask.mot_out_mask, MOT_SHIFT));
508 }
509
510 top_lm = U64_LSHIFT(tolud.tolud, 20);
511 top_hm = U64_LSHIFT(touud_hi.touud, 32) | U64_LSHIFT(touud_lo.touud, 20);
512
513 two_slices = !chash.slice_1_disabled &&
514 !chash.slice_0_mem_disabled &&
515 (chash.sym_slice0_channel_enabled != 0) &&
516 (chash.sym_slice1_channel_enabled != 0);
517 two_channels = !chash.ch_1_disabled &&
518 !chash.enable_pmi_dual_data_mode &&
519 ((chash.sym_slice0_channel_enabled == 3) ||
520 (chash.sym_slice1_channel_enabled == 3));
521
522 sym_chan_mask = gen_sym_mask(&chash);
523 asym_chan_mask = gen_asym_mask(&chash, &asym0, &asym1, &asym_2way);
524 chan_mask = sym_chan_mask | asym_chan_mask;
525
526 if (two_slices && !two_channels) {
527 if (chash.hvm_mode)
528 slice_selector = 29;
529 else
530 slice_selector = intlv[chash.interleave_mode];
531 } else if (!two_slices && two_channels) {
532 if (chash.hvm_mode)
533 chan_selector = 29;
534 else
535 chan_selector = intlv[chash.interleave_mode];
536 } else if (two_slices && two_channels) {
537 if (chash.hvm_mode) {
538 slice_selector = 29;
539 chan_selector = 30;
540 } else {
541 slice_selector = intlv[chash.interleave_mode];
542 chan_selector = intlv[chash.interleave_mode] + 1;
543 }
544 }
545
546 if (two_slices) {
547 if (!chash.hvm_mode)
548 slice_hash_mask = chash.slice_hash_mask << SLICE_HASH_MASK_LSB;
549 if (!two_channels)
550 slice_hash_mask |= BIT_ULL(slice_selector);
551 }
552
553 if (two_channels) {
554 if (!chash.hvm_mode)
555 chan_hash_mask = chash.ch_hash_mask << CH_HASH_MASK_LSB;
556 if (!two_slices)
557 chan_hash_mask |= BIT_ULL(chan_selector);
558 }
559
560 return 0;
561}
562
563/* Get a contiguous memory address (remove the MMIO gap) */
564static u64 remove_mmio_gap(u64 sys)
565{
566 return (sys < _4GB) ? sys : sys - (_4GB - top_lm);
567}
568
569/* Squeeze out one address bit, shift upper part down to fill gap */
570static void remove_addr_bit(u64 *addr, int bitidx)
571{
572 u64 mask;
573
574 if (bitidx == -1)
575 return;
576
577 mask = (1ull << bitidx) - 1;
578 *addr = ((*addr >> 1) & ~mask) | (*addr & mask);
579}
580
581/* XOR all the bits from addr specified in mask */
582static int hash_by_mask(u64 addr, u64 mask)
583{
584 u64 result = addr & mask;
585
586 result = (result >> 32) ^ result;
587 result = (result >> 16) ^ result;
588 result = (result >> 8) ^ result;
589 result = (result >> 4) ^ result;
590 result = (result >> 2) ^ result;
591 result = (result >> 1) ^ result;
592
593 return (int)result & 1;
594}
595
596/*
597 * First stage decode. Take the system address and figure out which
598 * second stage will deal with it based on interleave modes.
599 */
600static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg)
601{
602 u64 contig_addr, contig_base, contig_offset, contig_base_adj;
603 int mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
604 MOT_CHAN_INTLV_BIT_1SLC_2CH;
605 int slice_intlv_bit_rm = SELECTOR_DISABLED;
606 int chan_intlv_bit_rm = SELECTOR_DISABLED;
607 /* Determine if address is in the MOT region. */
608 bool mot_hit = in_region(&mot, addr);
609 /* Calculate the number of symmetric regions enabled. */
610 int sym_channels = hweight8(sym_chan_mask);
611
612 /*
613 * The amount we need to shift the asym base can be determined by the
614 * number of enabled symmetric channels.
615 * NOTE: This can only work because symmetric memory is not supposed
616 * to do a 3-way interleave.
617 */
618 int sym_chan_shift = sym_channels >> 1;
619
620 /* Give up if address is out of range, or in MMIO gap */
621 if (addr >= (1ul << PND_MAX_PHYS_BIT) ||
622 (addr >= top_lm && addr < _4GB) || addr >= top_hm) {
623 snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr);
624 return -EINVAL;
625 }
626
627 /* Get a contiguous memory address (remove the MMIO gap) */
628 contig_addr = remove_mmio_gap(addr);
629
630 if (in_region(&as0, addr)) {
631 *pmiidx = asym0.slice0_asym_channel_select;
632
633 contig_base = remove_mmio_gap(as0.base);
634 contig_offset = contig_addr - contig_base;
635 contig_base_adj = (contig_base >> sym_chan_shift) *
636 ((chash.sym_slice0_channel_enabled >> (*pmiidx & 1)) & 1);
637 contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
638 } else if (in_region(&as1, addr)) {
639 *pmiidx = 2u + asym1.slice1_asym_channel_select;
640
641 contig_base = remove_mmio_gap(as1.base);
642 contig_offset = contig_addr - contig_base;
643 contig_base_adj = (contig_base >> sym_chan_shift) *
644 ((chash.sym_slice1_channel_enabled >> (*pmiidx & 1)) & 1);
645 contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
646 } else if (in_region(&as2, addr) && (asym_2way.asym_2way_intlv_mode == 0x3ul)) {
647 bool channel1;
648
649 mot_intlv_bit = MOT_CHAN_INTLV_BIT_1SLC_2CH;
650 *pmiidx = (asym_2way.asym_2way_intlv_mode & 1) << 1;
651 channel1 = mot_hit ? ((bool)((addr >> mot_intlv_bit) & 1)) :
652 hash_by_mask(contig_addr, chan_hash_mask);
653 *pmiidx |= (u32)channel1;
654
655 contig_base = remove_mmio_gap(as2.base);
656 chan_intlv_bit_rm = mot_hit ? mot_intlv_bit : chan_selector;
657 contig_offset = contig_addr - contig_base;
658 remove_addr_bit(&contig_offset, chan_intlv_bit_rm);
659 contig_addr = (contig_base >> sym_chan_shift) + contig_offset;
660 } else {
661 /* Otherwise we're in normal, boring symmetric mode. */
662 *pmiidx = 0u;
663
664 if (two_slices) {
665 bool slice1;
666
667 if (mot_hit) {
668 slice_intlv_bit_rm = MOT_SLC_INTLV_BIT;
669 slice1 = (addr >> MOT_SLC_INTLV_BIT) & 1;
670 } else {
671 slice_intlv_bit_rm = slice_selector;
672 slice1 = hash_by_mask(addr, slice_hash_mask);
673 }
674
675 *pmiidx = (u32)slice1 << 1;
676 }
677
678 if (two_channels) {
679 bool channel1;
680
681 mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
682 MOT_CHAN_INTLV_BIT_1SLC_2CH;
683
684 if (mot_hit) {
685 chan_intlv_bit_rm = mot_intlv_bit;
686 channel1 = (addr >> mot_intlv_bit) & 1;
687 } else {
688 chan_intlv_bit_rm = chan_selector;
689 channel1 = hash_by_mask(contig_addr, chan_hash_mask);
690 }
691
692 *pmiidx |= (u32)channel1;
693 }
694 }
695
696 /* Remove the chan_selector bit first */
697 remove_addr_bit(&contig_addr, chan_intlv_bit_rm);
698 /* Remove the slice bit (we remove it second because it must be lower */
699 remove_addr_bit(&contig_addr, slice_intlv_bit_rm);
700 *pmiaddr = contig_addr;
701
702 return 0;
703}
704
705/* Translate PMI address to memory (rank, row, bank, column) */
706#define C(n) (0x10 | (n)) /* column */
707#define B(n) (0x20 | (n)) /* bank */
708#define R(n) (0x40 | (n)) /* row */
709#define RS (0x80) /* rank */
710
711/* addrdec values */
712#define AMAP_1KB 0
713#define AMAP_2KB 1
714#define AMAP_4KB 2
715#define AMAP_RSVD 3
716
717/* dden values */
718#define DEN_4Gb 0
719#define DEN_8Gb 2
720
721/* dwid values */
722#define X8 0
723#define X16 1
724
725static struct dimm_geometry {
726 u8 addrdec;
727 u8 dden;
728 u8 dwid;
729 u8 rowbits, colbits;
730 u16 bits[PMI_ADDRESS_WIDTH];
731} dimms[] = {
732 {
733 .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X16,
734 .rowbits = 15, .colbits = 10,
735 .bits = {
736 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
737 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
738 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
739 0, 0, 0, 0
740 }
741 },
742 {
743 .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X8,
744 .rowbits = 16, .colbits = 10,
745 .bits = {
746 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
747 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
748 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
749 R(15), 0, 0, 0
750 }
751 },
752 {
753 .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X16,
754 .rowbits = 16, .colbits = 10,
755 .bits = {
756 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
757 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
758 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
759 R(15), 0, 0, 0
760 }
761 },
762 {
763 .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X8,
764 .rowbits = 16, .colbits = 11,
765 .bits = {
766 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
767 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
768 R(10), C(7), C(8), C(9), R(11), RS, C(11), R(12), R(13),
769 R(14), R(15), 0, 0
770 }
771 },
772 {
773 .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X16,
774 .rowbits = 15, .colbits = 10,
775 .bits = {
776 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
777 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
778 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
779 0, 0, 0, 0
780 }
781 },
782 {
783 .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X8,
784 .rowbits = 16, .colbits = 10,
785 .bits = {
786 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
787 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
788 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
789 R(15), 0, 0, 0
790 }
791 },
792 {
793 .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X16,
794 .rowbits = 16, .colbits = 10,
795 .bits = {
796 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
797 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
798 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
799 R(15), 0, 0, 0
800 }
801 },
802 {
803 .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X8,
804 .rowbits = 16, .colbits = 11,
805 .bits = {
806 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
807 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
808 R(9), R(10), C(8), C(9), R(11), RS, C(11), R(12), R(13),
809 R(14), R(15), 0, 0
810 }
811 },
812 {
813 .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X16,
814 .rowbits = 15, .colbits = 10,
815 .bits = {
816 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
817 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
818 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
819 0, 0, 0, 0
820 }
821 },
822 {
823 .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X8,
824 .rowbits = 16, .colbits = 10,
825 .bits = {
826 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
827 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
828 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
829 R(15), 0, 0, 0
830 }
831 },
832 {
833 .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X16,
834 .rowbits = 16, .colbits = 10,
835 .bits = {
836 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
837 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
838 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
839 R(15), 0, 0, 0
840 }
841 },
842 {
843 .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X8,
844 .rowbits = 16, .colbits = 11,
845 .bits = {
846 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
847 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
848 R(8), R(9), R(10), C(9), R(11), RS, C(11), R(12), R(13),
849 R(14), R(15), 0, 0
850 }
851 }
852};
853
854static int bank_hash(u64 pmiaddr, int idx, int shft)
855{
856 int bhash = 0;
857
858 switch (idx) {
859 case 0:
860 bhash ^= ((pmiaddr >> (12 + shft)) ^ (pmiaddr >> (9 + shft))) & 1;
861 break;
862 case 1:
863 bhash ^= (((pmiaddr >> (10 + shft)) ^ (pmiaddr >> (8 + shft))) & 1) << 1;
864 bhash ^= ((pmiaddr >> 22) & 1) << 1;
865 break;
866 case 2:
867 bhash ^= (((pmiaddr >> (13 + shft)) ^ (pmiaddr >> (11 + shft))) & 1) << 2;
868 break;
869 }
870
871 return bhash;
872}
873
874static int rank_hash(u64 pmiaddr)
875{
876 return ((pmiaddr >> 16) ^ (pmiaddr >> 10)) & 1;
877}
878
879/* Second stage decode. Compute rank, bank, row & column. */
880static int apl_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
881 struct dram_addr *daddr, char *msg)
882{
883 struct d_cr_drp0 *cr_drp0 = &drp0[pmiidx];
884 struct pnd2_pvt *pvt = mci->pvt_info;
885 int g = pvt->dimm_geom[pmiidx];
886 struct dimm_geometry *d = &dimms[g];
887 int column = 0, bank = 0, row = 0, rank = 0;
888 int i, idx, type, skiprs = 0;
889
890 for (i = 0; i < PMI_ADDRESS_WIDTH; i++) {
891 int bit = (pmiaddr >> i) & 1;
892
893 if (i + skiprs >= PMI_ADDRESS_WIDTH) {
894 snprintf(msg, PND2_MSG_SIZE, "Bad dimm_geometry[] table\n");
895 return -EINVAL;
896 }
897
898 type = d->bits[i + skiprs] & ~0xf;
899 idx = d->bits[i + skiprs] & 0xf;
900
901 /*
902 * On single rank DIMMs ignore the rank select bit
903 * and shift remainder of "bits[]" down one place.
904 */
905 if (type == RS && (cr_drp0->rken0 + cr_drp0->rken1) == 1) {
906 skiprs = 1;
907 type = d->bits[i + skiprs] & ~0xf;
908 idx = d->bits[i + skiprs] & 0xf;
909 }
910
911 switch (type) {
912 case C(0):
913 column |= (bit << idx);
914 break;
915 case B(0):
916 bank |= (bit << idx);
917 if (cr_drp0->bahen)
918 bank ^= bank_hash(pmiaddr, idx, d->addrdec);
919 break;
920 case R(0):
921 row |= (bit << idx);
922 break;
923 case RS:
924 rank = bit;
925 if (cr_drp0->rsien)
926 rank ^= rank_hash(pmiaddr);
927 break;
928 default:
929 if (bit) {
930 snprintf(msg, PND2_MSG_SIZE, "Bad translation\n");
931 return -EINVAL;
932 }
933 goto done;
934 }
935 }
936
937done:
938 daddr->col = column;
939 daddr->bank = bank;
940 daddr->row = row;
941 daddr->rank = rank;
942 daddr->dimm = 0;
943
944 return 0;
945}
946
947/* Pluck bit "in" from pmiaddr and return value shifted to bit "out" */
948#define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out))
949
950static int dnv_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
951 struct dram_addr *daddr, char *msg)
952{
953 /* Rank 0 or 1 */
954 daddr->rank = dnv_get_bit(pmiaddr, dmap[pmiidx].rs0 + 13, 0);
955 /* Rank 2 or 3 */
956 daddr->rank |= dnv_get_bit(pmiaddr, dmap[pmiidx].rs1 + 13, 1);
957
958 /*
959 * Normally ranks 0,1 are DIMM0, and 2,3 are DIMM1, but we
960 * flip them if DIMM1 is larger than DIMM0.
961 */
962 daddr->dimm = (daddr->rank >= 2) ^ drp[pmiidx].dimmflip;
963
964 daddr->bank = dnv_get_bit(pmiaddr, dmap[pmiidx].ba0 + 6, 0);
965 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].ba1 + 6, 1);
966 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg0 + 6, 2);
967 if (dsch.ddr4en)
968 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg1 + 6, 3);
969 if (dmap1[pmiidx].bxor) {
970 if (dsch.ddr4en) {
971 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 0);
972 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 1);
973 if (dsch.chan_width == 0)
974 /* 64/72 bit dram channel width */
975 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
976 else
977 /* 32/40 bit dram channel width */
978 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
979 daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 3);
980 } else {
981 daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 0);
982 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 1);
983 if (dsch.chan_width == 0)
984 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
985 else
986 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
987 }
988 }
989
990 daddr->row = dnv_get_bit(pmiaddr, dmap2[pmiidx].row0 + 6, 0);
991 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row1 + 6, 1);
992 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 2);
993 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row3 + 6, 3);
994 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row4 + 6, 4);
995 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row5 + 6, 5);
996 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 6);
997 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 7);
998 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row8 + 6, 8);
999 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row9 + 6, 9);
1000 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row10 + 6, 10);
1001 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row11 + 6, 11);
1002 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row12 + 6, 12);
1003 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row13 + 6, 13);
1004 if (dmap4[pmiidx].row14 != 31)
1005 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row14 + 6, 14);
1006 if (dmap4[pmiidx].row15 != 31)
1007 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row15 + 6, 15);
1008 if (dmap4[pmiidx].row16 != 31)
1009 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row16 + 6, 16);
1010 if (dmap4[pmiidx].row17 != 31)
1011 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row17 + 6, 17);
1012
1013 daddr->col = dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 3);
1014 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 4);
1015 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca5 + 6, 5);
1016 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca6 + 6, 6);
1017 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca7 + 6, 7);
1018 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca8 + 6, 8);
1019 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca9 + 6, 9);
1020 if (!dsch.ddr4en && dmap1[pmiidx].ca11 != 0x3f)
1021 daddr->col |= dnv_get_bit(pmiaddr, dmap1[pmiidx].ca11 + 13, 11);
1022
1023 return 0;
1024}
1025
1026static int check_channel(int ch)
1027{
1028 if (drp0[ch].dramtype != 0) {
1029 pnd2_printk(KERN_INFO, "Unsupported DIMM in channel %d\n", ch);
1030 return 1;
1031 } else if (drp0[ch].eccen == 0) {
1032 pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1033 return 1;
1034 }
1035 return 0;
1036}
1037
1038static int apl_check_ecc_active(void)
1039{
1040 int i, ret = 0;
1041
1042 /* Check dramtype and ECC mode for each present DIMM */
1043 for (i = 0; i < APL_NUM_CHANNELS; i++)
1044 if (chan_mask & BIT(i))
1045 ret += check_channel(i);
1046 return ret ? -EINVAL : 0;
1047}
1048
1049#define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3)
1050
1051static int check_unit(int ch)
1052{
1053 struct d_cr_drp *d = &drp[ch];
1054
1055 if (DIMMS_PRESENT(d) && !ecc_ctrl[ch].eccen) {
1056 pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1057 return 1;
1058 }
1059 return 0;
1060}
1061
1062static int dnv_check_ecc_active(void)
1063{
1064 int i, ret = 0;
1065
1066 for (i = 0; i < DNV_NUM_CHANNELS; i++)
1067 ret += check_unit(i);
1068 return ret ? -EINVAL : 0;
1069}
1070
1071static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr,
1072 struct dram_addr *daddr, char *msg)
1073{
1074 u64 pmiaddr;
1075 u32 pmiidx;
1076 int ret;
1077
1078 ret = sys2pmi(addr, &pmiidx, &pmiaddr, msg);
1079 if (ret)
1080 return ret;
1081
1082 pmiaddr >>= ops->pmiaddr_shift;
1083 /* pmi channel idx to dimm channel idx */
1084 pmiidx >>= ops->pmiidx_shift;
1085 daddr->chan = pmiidx;
1086
1087 ret = ops->pmi2mem(mci, pmiaddr, pmiidx, daddr, msg);
1088 if (ret)
1089 return ret;
1090
1091 edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1092 addr, pmiaddr, daddr->chan, daddr->dimm, daddr->rank, daddr->bank, daddr->row, daddr->col);
1093
1094 return 0;
1095}
1096
1097static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
1098 struct dram_addr *daddr)
1099{
1100 enum hw_event_mc_err_type tp_event;
1101 char *optype, msg[PND2_MSG_SIZE];
1102 bool ripv = m->mcgstatus & MCG_STATUS_RIPV;
1103 bool overflow = m->status & MCI_STATUS_OVER;
1104 bool uc_err = m->status & MCI_STATUS_UC;
1105 bool recov = m->status & MCI_STATUS_S;
1106 u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
1107 u32 mscod = GET_BITFIELD(m->status, 16, 31);
1108 u32 errcode = GET_BITFIELD(m->status, 0, 15);
1109 u32 optypenum = GET_BITFIELD(m->status, 4, 6);
1110 int rc;
1111
1112 tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) :
1113 HW_EVENT_ERR_CORRECTED;
1114
1115 /*
1116 * According with Table 15-9 of the Intel Architecture spec vol 3A,
1117 * memory errors should fit in this mask:
1118 * 000f 0000 1mmm cccc (binary)
1119 * where:
1120 * f = Correction Report Filtering Bit. If 1, subsequent errors
1121 * won't be shown
1122 * mmm = error type
1123 * cccc = channel
1124 * If the mask doesn't match, report an error to the parsing logic
1125 */
1126 if (!((errcode & 0xef80) == 0x80)) {
1127 optype = "Can't parse: it is not a mem";
1128 } else {
1129 switch (optypenum) {
1130 case 0:
1131 optype = "generic undef request error";
1132 break;
1133 case 1:
1134 optype = "memory read error";
1135 break;
1136 case 2:
1137 optype = "memory write error";
1138 break;
1139 case 3:
1140 optype = "addr/cmd error";
1141 break;
1142 case 4:
1143 optype = "memory scrubbing error";
1144 break;
1145 default:
1146 optype = "reserved";
1147 break;
1148 }
1149 }
1150
1151 /* Only decode errors with an valid address (ADDRV) */
1152 if (!(m->status & MCI_STATUS_ADDRV))
1153 return;
1154
1155 rc = get_memory_error_data(mci, m->addr, daddr, msg);
1156 if (rc)
1157 goto address_error;
1158
1159 snprintf(msg, sizeof(msg),
1160 "%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d",
1161 overflow ? " OVERFLOW" : "", (uc_err && recov) ? " recoverable" : "", mscod,
1162 errcode, daddr->chan, daddr->dimm, daddr->rank, daddr->row, daddr->bank, daddr->col);
1163
1164 edac_dbg(0, "%s\n", msg);
1165
1166 /* Call the helper to output message */
1167 edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT,
Qiuxu Zhuo819f60f2017-03-25 19:29:01 +08001168 m->addr & ~PAGE_MASK, 0, daddr->chan, daddr->dimm, -1, optype, msg);
Tony Luck5c71ad12017-03-09 01:45:39 +08001169
1170 return;
1171
1172address_error:
1173 edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, msg, "");
1174}
1175
1176static void apl_get_dimm_config(struct mem_ctl_info *mci)
1177{
1178 struct pnd2_pvt *pvt = mci->pvt_info;
1179 struct dimm_info *dimm;
1180 struct d_cr_drp0 *d;
1181 u64 capacity;
1182 int i, g;
1183
1184 for (i = 0; i < APL_NUM_CHANNELS; i++) {
1185 if (!(chan_mask & BIT(i)))
1186 continue;
1187
1188 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, 0, 0);
1189 if (!dimm) {
1190 edac_dbg(0, "No allocated DIMM for channel %d\n", i);
1191 continue;
1192 }
1193
1194 d = &drp0[i];
1195 for (g = 0; g < ARRAY_SIZE(dimms); g++)
1196 if (dimms[g].addrdec == d->addrdec &&
1197 dimms[g].dden == d->dden &&
1198 dimms[g].dwid == d->dwid)
1199 break;
1200
1201 if (g == ARRAY_SIZE(dimms)) {
1202 edac_dbg(0, "Channel %d: unrecognized DIMM\n", i);
1203 continue;
1204 }
1205
1206 pvt->dimm_geom[i] = g;
1207 capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) *
1208 (1ul << dimms[g].colbits);
1209 edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3));
1210 dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1211 dimm->grain = 32;
1212 dimm->dtype = (d->dwid == 0) ? DEV_X8 : DEV_X16;
1213 dimm->mtype = MEM_DDR3;
1214 dimm->edac_mode = EDAC_SECDED;
1215 snprintf(dimm->label, sizeof(dimm->label), "Slice#%d_Chan#%d", i / 2, i % 2);
1216 }
1217}
1218
1219static const int dnv_dtypes[] = {
1220 DEV_X8, DEV_X4, DEV_X16, DEV_UNKNOWN
1221};
1222
1223static void dnv_get_dimm_config(struct mem_ctl_info *mci)
1224{
1225 int i, j, ranks_of_dimm[DNV_MAX_DIMMS], banks, rowbits, colbits, memtype;
1226 struct dimm_info *dimm;
1227 struct d_cr_drp *d;
1228 u64 capacity;
1229
1230 if (dsch.ddr4en) {
1231 memtype = MEM_DDR4;
1232 banks = 16;
1233 colbits = 10;
1234 } else {
1235 memtype = MEM_DDR3;
1236 banks = 8;
1237 }
1238
1239 for (i = 0; i < DNV_NUM_CHANNELS; i++) {
1240 if (dmap4[i].row14 == 31)
1241 rowbits = 14;
1242 else if (dmap4[i].row15 == 31)
1243 rowbits = 15;
1244 else if (dmap4[i].row16 == 31)
1245 rowbits = 16;
1246 else if (dmap4[i].row17 == 31)
1247 rowbits = 17;
1248 else
1249 rowbits = 18;
1250
1251 if (memtype == MEM_DDR3) {
1252 if (dmap1[i].ca11 != 0x3f)
1253 colbits = 12;
1254 else
1255 colbits = 10;
1256 }
1257
1258 d = &drp[i];
1259 /* DIMM0 is present if rank0 and/or rank1 is enabled */
1260 ranks_of_dimm[0] = d->rken0 + d->rken1;
1261 /* DIMM1 is present if rank2 and/or rank3 is enabled */
1262 ranks_of_dimm[1] = d->rken2 + d->rken3;
1263
1264 for (j = 0; j < DNV_MAX_DIMMS; j++) {
1265 if (!ranks_of_dimm[j])
1266 continue;
1267
1268 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0);
1269 if (!dimm) {
1270 edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j);
1271 continue;
1272 }
1273
1274 capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits);
1275 edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3));
1276 dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1277 dimm->grain = 32;
1278 dimm->dtype = dnv_dtypes[j ? d->dimmdwid0 : d->dimmdwid1];
1279 dimm->mtype = memtype;
1280 dimm->edac_mode = EDAC_SECDED;
1281 snprintf(dimm->label, sizeof(dimm->label), "Chan#%d_DIMM#%d", i, j);
1282 }
1283 }
1284}
1285
1286static int pnd2_register_mci(struct mem_ctl_info **ppmci)
1287{
1288 struct edac_mc_layer layers[2];
1289 struct mem_ctl_info *mci;
1290 struct pnd2_pvt *pvt;
1291 int rc;
1292
1293 rc = ops->check_ecc();
1294 if (rc < 0)
1295 return rc;
1296
1297 /* Allocate a new MC control structure */
1298 layers[0].type = EDAC_MC_LAYER_CHANNEL;
1299 layers[0].size = ops->channels;
1300 layers[0].is_virt_csrow = false;
1301 layers[1].type = EDAC_MC_LAYER_SLOT;
1302 layers[1].size = ops->dimms_per_channel;
1303 layers[1].is_virt_csrow = true;
1304 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1305 if (!mci)
1306 return -ENOMEM;
1307
1308 pvt = mci->pvt_info;
1309 memset(pvt, 0, sizeof(*pvt));
1310
1311 mci->mod_name = "pnd2_edac.c";
1312 mci->dev_name = ops->name;
1313 mci->ctl_name = "Pondicherry2";
1314
1315 /* Get dimm basic config and the memory layout */
1316 ops->get_dimm_config(mci);
1317
1318 if (edac_mc_add_mc(mci)) {
1319 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1320 edac_mc_free(mci);
1321 return -EINVAL;
1322 }
1323
1324 *ppmci = mci;
1325
1326 return 0;
1327}
1328
1329static void pnd2_unregister_mci(struct mem_ctl_info *mci)
1330{
1331 if (unlikely(!mci || !mci->pvt_info)) {
1332 pnd2_printk(KERN_ERR, "Couldn't find mci handler\n");
1333 return;
1334 }
1335
1336 /* Remove MC sysfs nodes */
1337 edac_mc_del_mc(NULL);
1338 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
1339 edac_mc_free(mci);
1340}
1341
1342/*
1343 * Callback function registered with core kernel mce code.
1344 * Called once for each logged error.
1345 */
1346static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, void *data)
1347{
1348 struct mce *mce = (struct mce *)data;
1349 struct mem_ctl_info *mci;
1350 struct dram_addr daddr;
1351 char *type;
1352
Borislav Petkovbffc7de2017-02-04 18:10:14 +01001353 if (edac_get_report_status() == EDAC_REPORTING_DISABLED)
Tony Luck5c71ad12017-03-09 01:45:39 +08001354 return NOTIFY_DONE;
1355
1356 mci = pnd2_mci;
1357 if (!mci)
1358 return NOTIFY_DONE;
1359
1360 /*
1361 * Just let mcelog handle it if the error is
1362 * outside the memory controller. A memory error
1363 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
1364 * bit 12 has an special meaning.
1365 */
1366 if ((mce->status & 0xefff) >> 7 != 1)
1367 return NOTIFY_DONE;
1368
1369 if (mce->mcgstatus & MCG_STATUS_MCIP)
1370 type = "Exception";
1371 else
1372 type = "Event";
1373
1374 pnd2_mc_printk(mci, KERN_INFO, "HANDLING MCE MEMORY ERROR\n");
1375 pnd2_mc_printk(mci, KERN_INFO, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n",
1376 mce->extcpu, type, mce->mcgstatus, mce->bank, mce->status);
1377 pnd2_mc_printk(mci, KERN_INFO, "TSC %llx ", mce->tsc);
1378 pnd2_mc_printk(mci, KERN_INFO, "ADDR %llx ", mce->addr);
1379 pnd2_mc_printk(mci, KERN_INFO, "MISC %llx ", mce->misc);
1380 pnd2_mc_printk(mci, KERN_INFO, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
1381 mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid);
1382
1383 pnd2_mce_output_error(mci, mce, &daddr);
1384
1385 /* Advice mcelog that the error were handled */
1386 return NOTIFY_STOP;
1387}
1388
1389static struct notifier_block pnd2_mce_dec = {
1390 .notifier_call = pnd2_mce_check_error,
1391};
1392
1393#ifdef CONFIG_EDAC_DEBUG
1394/*
1395 * Write an address to this file to exercise the address decode
1396 * logic in this driver.
1397 */
1398static u64 pnd2_fake_addr;
1399#define PND2_BLOB_SIZE 1024
1400static char pnd2_result[PND2_BLOB_SIZE];
1401static struct dentry *pnd2_test;
1402static struct debugfs_blob_wrapper pnd2_blob = {
1403 .data = pnd2_result,
1404 .size = 0
1405};
1406
1407static int debugfs_u64_set(void *data, u64 val)
1408{
1409 struct dram_addr daddr;
1410 struct mce m;
1411
1412 *(u64 *)data = val;
1413 m.mcgstatus = 0;
1414 /* ADDRV + MemRd + Unknown channel */
1415 m.status = MCI_STATUS_ADDRV + 0x9f;
1416 m.addr = val;
1417 pnd2_mce_output_error(pnd2_mci, &m, &daddr);
1418 snprintf(pnd2_blob.data, PND2_BLOB_SIZE,
1419 "SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1420 m.addr, daddr.chan, daddr.dimm, daddr.rank, daddr.bank, daddr.row, daddr.col);
1421 pnd2_blob.size = strlen(pnd2_blob.data);
1422
1423 return 0;
1424}
1425DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
1426
1427static void setup_pnd2_debug(void)
1428{
1429 pnd2_test = edac_debugfs_create_dir("pnd2_test");
1430 edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test,
1431 &pnd2_fake_addr, &fops_u64_wo);
1432 debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test, &pnd2_blob);
1433}
1434
1435static void teardown_pnd2_debug(void)
1436{
1437 debugfs_remove_recursive(pnd2_test);
1438}
Borislav Petkovcd1be312017-03-18 18:26:34 +01001439#else
1440static void setup_pnd2_debug(void) {}
1441static void teardown_pnd2_debug(void) {}
1442#endif /* CONFIG_EDAC_DEBUG */
1443
Tony Luck5c71ad12017-03-09 01:45:39 +08001444
1445static int pnd2_probe(void)
1446{
1447 int rc;
1448
1449 edac_dbg(2, "\n");
1450 rc = get_registers();
1451 if (rc)
1452 return rc;
1453
1454 return pnd2_register_mci(&pnd2_mci);
1455}
1456
1457static void pnd2_remove(void)
1458{
1459 edac_dbg(0, "\n");
1460 pnd2_unregister_mci(pnd2_mci);
1461}
1462
1463static struct dunit_ops apl_ops = {
1464 .name = "pnd2/apl",
1465 .type = APL,
1466 .pmiaddr_shift = LOG2_PMI_ADDR_GRANULARITY,
1467 .pmiidx_shift = 0,
1468 .channels = APL_NUM_CHANNELS,
1469 .dimms_per_channel = 1,
1470 .rd_reg = apl_rd_reg,
1471 .get_registers = apl_get_registers,
1472 .check_ecc = apl_check_ecc_active,
1473 .mk_region = apl_mk_region,
1474 .get_dimm_config = apl_get_dimm_config,
1475 .pmi2mem = apl_pmi2mem,
1476};
1477
1478static struct dunit_ops dnv_ops = {
1479 .name = "pnd2/dnv",
1480 .type = DNV,
1481 .pmiaddr_shift = 0,
1482 .pmiidx_shift = 1,
1483 .channels = DNV_NUM_CHANNELS,
1484 .dimms_per_channel = 2,
1485 .rd_reg = dnv_rd_reg,
1486 .get_registers = dnv_get_registers,
1487 .check_ecc = dnv_check_ecc_active,
1488 .mk_region = dnv_mk_region,
1489 .get_dimm_config = dnv_get_dimm_config,
1490 .pmi2mem = dnv_pmi2mem,
1491};
1492
1493static const struct x86_cpu_id pnd2_cpuids[] = {
1494 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops },
1495 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON, 0, (kernel_ulong_t)&dnv_ops },
1496 { }
1497};
1498MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
1499
1500static int __init pnd2_init(void)
1501{
1502 const struct x86_cpu_id *id;
1503 int rc;
1504
1505 edac_dbg(2, "\n");
1506
1507 id = x86_match_cpu(pnd2_cpuids);
1508 if (!id)
1509 return -ENODEV;
1510
1511 ops = (struct dunit_ops *)id->driver_data;
1512
1513 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1514 opstate_init();
1515
1516 rc = pnd2_probe();
1517 if (rc < 0) {
1518 pnd2_printk(KERN_ERR, "Failed to register device with error %d.\n", rc);
1519 return rc;
1520 }
1521
1522 if (!pnd2_mci)
1523 return -ENODEV;
1524
1525 mce_register_decode_chain(&pnd2_mce_dec);
1526 setup_pnd2_debug();
1527
1528 return 0;
1529}
1530
1531static void __exit pnd2_exit(void)
1532{
1533 edac_dbg(2, "\n");
1534 teardown_pnd2_debug();
1535 mce_unregister_decode_chain(&pnd2_mce_dec);
1536 pnd2_remove();
1537}
1538
1539module_init(pnd2_init);
1540module_exit(pnd2_exit);
1541
1542module_param(edac_op_state, int, 0444);
1543MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1544
1545MODULE_LICENSE("GPL v2");
1546MODULE_AUTHOR("Tony Luck");
1547MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller");