blob: 89cdd9a7a733f9c97aec2abd93329a7e14164a56 [file] [log] [blame]
Tony Luck5c71ad12017-03-09 01:45:39 +08001/*
2 * Driver for Pondicherry2 memory controller.
3 *
4 * Copyright (c) 2016, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * [Derived from sb_edac.c]
16 *
17 * Translation of system physical addresses to DIMM addresses
18 * is a two stage process:
19 *
20 * First the Pondicherry 2 memory controller handles slice and channel interleaving
21 * in "sys2pmi()". This is (almost) completley common between platforms.
22 *
23 * Then a platform specific dunit (DIMM unit) completes the process to provide DIMM,
24 * rank, bank, row and column using the appropriate "dunit_ops" functions/parameters.
25 */
26
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/pci.h>
30#include <linux/pci_ids.h>
31#include <linux/slab.h>
32#include <linux/delay.h>
33#include <linux/edac.h>
34#include <linux/mmzone.h>
35#include <linux/smp.h>
36#include <linux/bitmap.h>
37#include <linux/math64.h>
38#include <linux/mod_devicetable.h>
39#include <asm/cpu_device_id.h>
40#include <asm/intel-family.h>
41#include <asm/processor.h>
42#include <asm/mce.h>
43
44#include "edac_mc.h"
45#include "edac_module.h"
46#include "pnd2_edac.h"
47
48#define APL_NUM_CHANNELS 4
49#define DNV_NUM_CHANNELS 2
50#define DNV_MAX_DIMMS 2 /* Max DIMMs per channel */
51
52enum type {
53 APL,
54 DNV, /* All requests go to PMI CH0 on each slice (CH1 disabled) */
55};
56
57struct dram_addr {
58 int chan;
59 int dimm;
60 int rank;
61 int bank;
62 int row;
63 int col;
64};
65
66struct pnd2_pvt {
67 int dimm_geom[APL_NUM_CHANNELS];
68 u64 tolm, tohm;
69};
70
71/*
72 * System address space is divided into multiple regions with
73 * different interleave rules in each. The as0/as1 regions
74 * have no interleaving at all. The as2 region is interleaved
75 * between two channels. The mot region is magic and may overlap
76 * other regions, with its interleave rules taking precedence.
77 * Addresses not in any of these regions are interleaved across
78 * all four channels.
79 */
80static struct region {
81 u64 base;
82 u64 limit;
83 u8 enabled;
84} mot, as0, as1, as2;
85
86static struct dunit_ops {
87 char *name;
88 enum type type;
89 int pmiaddr_shift;
90 int pmiidx_shift;
91 int channels;
92 int dimms_per_channel;
93 int (*rd_reg)(int port, int off, int op, void *data, size_t sz, char *name);
94 int (*get_registers)(void);
95 int (*check_ecc)(void);
96 void (*mk_region)(char *name, struct region *rp, void *asym);
97 void (*get_dimm_config)(struct mem_ctl_info *mci);
98 int (*pmi2mem)(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
99 struct dram_addr *daddr, char *msg);
100} *ops;
101
102static struct mem_ctl_info *pnd2_mci;
103
104#define PND2_MSG_SIZE 256
105
106/* Debug macros */
107#define pnd2_printk(level, fmt, arg...) \
108 edac_printk(level, "pnd2", fmt, ##arg)
109
110#define pnd2_mc_printk(mci, level, fmt, arg...) \
111 edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg)
112
113#define MOT_CHAN_INTLV_BIT_1SLC_2CH 12
114#define MOT_CHAN_INTLV_BIT_2SLC_2CH 13
115#define SELECTOR_DISABLED (-1)
116#define _4GB (1ul << 32)
117
118#define PMI_ADDRESS_WIDTH 31
119#define PND_MAX_PHYS_BIT 39
120
121#define APL_ASYMSHIFT 28
122#define DNV_ASYMSHIFT 31
123#define CH_HASH_MASK_LSB 6
124#define SLICE_HASH_MASK_LSB 6
125#define MOT_SLC_INTLV_BIT 12
126#define LOG2_PMI_ADDR_GRANULARITY 5
127#define MOT_SHIFT 24
128
129#define GET_BITFIELD(v, lo, hi) (((v) & GENMASK_ULL(hi, lo)) >> (lo))
130#define U64_LSHIFT(val, s) ((u64)(val) << (s))
131
Tony Luck3e5d2bd2017-08-03 14:05:36 -0700132/*
133 * On Apollo Lake we access memory controller registers via a
134 * side-band mailbox style interface in a hidden PCI device
135 * configuration space.
136 */
137static struct pci_bus *p2sb_bus;
138#define P2SB_DEVFN PCI_DEVFN(0xd, 0)
139#define P2SB_ADDR_OFF 0xd0
140#define P2SB_DATA_OFF 0xd4
141#define P2SB_STAT_OFF 0xd8
142#define P2SB_ROUT_OFF 0xda
143#define P2SB_EADD_OFF 0xdc
144#define P2SB_HIDE_OFF 0xe1
145
146#define P2SB_BUSY 1
147
148#define P2SB_READ(size, off, ptr) \
149 pci_bus_read_config_##size(p2sb_bus, P2SB_DEVFN, off, ptr)
150#define P2SB_WRITE(size, off, val) \
151 pci_bus_write_config_##size(p2sb_bus, P2SB_DEVFN, off, val)
152
153static bool p2sb_is_busy(u16 *status)
Tony Luck5c71ad12017-03-09 01:45:39 +0800154{
Tony Luck3e5d2bd2017-08-03 14:05:36 -0700155 P2SB_READ(word, P2SB_STAT_OFF, status);
Tony Luck5c71ad12017-03-09 01:45:39 +0800156
Tony Luck3e5d2bd2017-08-03 14:05:36 -0700157 return !!(*status & P2SB_BUSY);
158}
Tony Luck5c71ad12017-03-09 01:45:39 +0800159
Tony Luck3e5d2bd2017-08-03 14:05:36 -0700160static int _apl_rd_reg(int port, int off, int op, u32 *data)
161{
162 int retries = 0xff, ret;
163 u16 status;
Tony Luck5c71ad12017-03-09 01:45:39 +0800164
Tony Luck3e5d2bd2017-08-03 14:05:36 -0700165 P2SB_WRITE(byte, P2SB_HIDE_OFF, 0);
Tony Luck5c71ad12017-03-09 01:45:39 +0800166
Tony Luck3e5d2bd2017-08-03 14:05:36 -0700167 if (p2sb_is_busy(&status)) {
168 ret = -EAGAIN;
169 goto out;
170 }
Tony Luck5c71ad12017-03-09 01:45:39 +0800171
Tony Luck3e5d2bd2017-08-03 14:05:36 -0700172 P2SB_WRITE(dword, P2SB_ADDR_OFF, (port << 24) | off);
173 P2SB_WRITE(dword, P2SB_DATA_OFF, 0);
174 P2SB_WRITE(dword, P2SB_EADD_OFF, 0);
175 P2SB_WRITE(word, P2SB_ROUT_OFF, 0);
176 P2SB_WRITE(word, P2SB_STAT_OFF, (op << 8) | P2SB_BUSY);
177
178 while (p2sb_is_busy(&status)) {
179 if (retries-- == 0) {
180 ret = -EBUSY;
181 goto out;
182 }
183 }
184
185 P2SB_READ(dword, P2SB_DATA_OFF, data);
186 ret = (status >> 1) & 0x3;
187out:
188 P2SB_WRITE(byte, P2SB_HIDE_OFF, 1);
Tony Luck5c71ad12017-03-09 01:45:39 +0800189
190 return ret;
191}
Tony Luck5c71ad12017-03-09 01:45:39 +0800192
193static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
194{
Gustavo A. R. Silvaee514c72017-06-22 17:05:35 -0500195 int ret = 0;
Tony Luck5c71ad12017-03-09 01:45:39 +0800196
197 edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op);
198 switch (sz) {
199 case 8:
Tony Luck3e5d2bd2017-08-03 14:05:36 -0700200 ret = _apl_rd_reg(port, off + 4, op, (u32 *)(data + 4));
Gustavo A. R. Silvaee514c72017-06-22 17:05:35 -0500201 /* fall through */
Tony Luck5c71ad12017-03-09 01:45:39 +0800202 case 4:
Tony Luck3e5d2bd2017-08-03 14:05:36 -0700203 ret |= _apl_rd_reg(port, off, op, (u32 *)data);
Tony Luck5c71ad12017-03-09 01:45:39 +0800204 pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
205 sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret);
206 break;
207 }
208
209 return ret;
210}
211
212static u64 get_mem_ctrl_hub_base_addr(void)
213{
214 struct b_cr_mchbar_lo_pci lo;
215 struct b_cr_mchbar_hi_pci hi;
216 struct pci_dev *pdev;
217
218 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
219 if (pdev) {
220 pci_read_config_dword(pdev, 0x48, (u32 *)&lo);
221 pci_read_config_dword(pdev, 0x4c, (u32 *)&hi);
222 pci_dev_put(pdev);
223 } else {
224 return 0;
225 }
226
227 if (!lo.enable) {
228 edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n");
229 return 0;
230 }
231
232 return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15);
233}
234
235static u64 get_sideband_reg_base_addr(void)
236{
237 struct pci_dev *pdev;
238 u32 hi, lo;
Qiuxu Zhuo5fd77cb2017-08-14 23:48:45 +0800239 u8 hidden;
Tony Luck5c71ad12017-03-09 01:45:39 +0800240
241 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x19dd, NULL);
242 if (pdev) {
Qiuxu Zhuo5fd77cb2017-08-14 23:48:45 +0800243 /* Unhide the P2SB device, if it's hidden */
244 pci_read_config_byte(pdev, 0xe1, &hidden);
245 if (hidden)
246 pci_write_config_byte(pdev, 0xe1, 0);
247
Tony Luck5c71ad12017-03-09 01:45:39 +0800248 pci_read_config_dword(pdev, 0x10, &lo);
249 pci_read_config_dword(pdev, 0x14, &hi);
Qiuxu Zhuod84676a2017-08-14 23:48:13 +0800250 lo &= 0xfffffff0;
Qiuxu Zhuo5fd77cb2017-08-14 23:48:45 +0800251
252 /* Hide the P2SB device, if it was hidden before */
253 if (hidden)
254 pci_write_config_byte(pdev, 0xe1, hidden);
255
Tony Luck5c71ad12017-03-09 01:45:39 +0800256 pci_dev_put(pdev);
257 return (U64_LSHIFT(hi, 32) | U64_LSHIFT(lo, 0));
258 } else {
259 return 0xfd000000;
260 }
261}
262
263static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
264{
265 struct pci_dev *pdev;
266 char *base;
267 u64 addr;
268
269 if (op == 4) {
270 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
271 if (!pdev)
272 return -ENODEV;
273
274 pci_read_config_dword(pdev, off, data);
275 pci_dev_put(pdev);
276 } else {
277 /* MMIO via memory controller hub base address */
278 if (op == 0 && port == 0x4c) {
279 addr = get_mem_ctrl_hub_base_addr();
280 if (!addr)
281 return -ENODEV;
282 } else {
283 /* MMIO via sideband register base address */
284 addr = get_sideband_reg_base_addr();
285 if (!addr)
286 return -ENODEV;
287 addr += (port << 16);
288 }
289
290 base = ioremap((resource_size_t)addr, 0x10000);
291 if (!base)
292 return -ENODEV;
293
294 if (sz == 8)
295 *(u32 *)(data + 4) = *(u32 *)(base + off + 4);
296 *(u32 *)data = *(u32 *)(base + off);
297
298 iounmap(base);
299 }
300
301 edac_dbg(2, "Read %s=%.8x_%.8x\n", name,
302 (sz == 8) ? *(u32 *)(data + 4) : 0, *(u32 *)data);
303
304 return 0;
305}
306
307#define RD_REGP(regp, regname, port) \
308 ops->rd_reg(port, \
309 regname##_offset, \
310 regname##_r_opcode, \
311 regp, sizeof(struct regname), \
312 #regname)
313
314#define RD_REG(regp, regname) \
315 ops->rd_reg(regname ## _port, \
316 regname##_offset, \
317 regname##_r_opcode, \
318 regp, sizeof(struct regname), \
319 #regname)
320
321static u64 top_lm, top_hm;
322static bool two_slices;
323static bool two_channels; /* Both PMI channels in one slice enabled */
324
325static u8 sym_chan_mask;
326static u8 asym_chan_mask;
327static u8 chan_mask;
328
329static int slice_selector = -1;
330static int chan_selector = -1;
331static u64 slice_hash_mask;
332static u64 chan_hash_mask;
333
334static void mk_region(char *name, struct region *rp, u64 base, u64 limit)
335{
336 rp->enabled = 1;
337 rp->base = base;
338 rp->limit = limit;
339 edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, limit);
340}
341
342static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask)
343{
344 if (mask == 0) {
345 pr_info(FW_BUG "MOT mask cannot be zero\n");
346 return;
347 }
348 if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) {
349 pr_info(FW_BUG "MOT mask not power of two\n");
350 return;
351 }
352 if (base & ~mask) {
353 pr_info(FW_BUG "MOT region base/mask alignment error\n");
354 return;
355 }
356 rp->base = base;
357 rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0);
358 rp->enabled = 1;
359 edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit);
360}
361
362static bool in_region(struct region *rp, u64 addr)
363{
364 if (!rp->enabled)
365 return false;
366
367 return rp->base <= addr && addr <= rp->limit;
368}
369
370static int gen_sym_mask(struct b_cr_slice_channel_hash *p)
371{
372 int mask = 0;
373
374 if (!p->slice_0_mem_disabled)
375 mask |= p->sym_slice0_channel_enabled;
376
377 if (!p->slice_1_disabled)
378 mask |= p->sym_slice1_channel_enabled << 2;
379
380 if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
381 mask &= 0x5;
382
383 return mask;
384}
385
386static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
387 struct b_cr_asym_mem_region0_mchbar *as0,
388 struct b_cr_asym_mem_region1_mchbar *as1,
389 struct b_cr_asym_2way_mem_region_mchbar *as2way)
390{
391 const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
392 int mask = 0;
393
394 if (as2way->asym_2way_interleave_enable)
395 mask = intlv[as2way->asym_2way_intlv_mode];
396 if (as0->slice0_asym_enable)
397 mask |= (1 << as0->slice0_asym_channel_select);
398 if (as1->slice1_asym_enable)
399 mask |= (4 << as1->slice1_asym_channel_select);
400 if (p->slice_0_mem_disabled)
401 mask &= 0xc;
402 if (p->slice_1_disabled)
403 mask &= 0x3;
404 if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
405 mask &= 0x5;
406
407 return mask;
408}
409
410static struct b_cr_tolud_pci tolud;
411static struct b_cr_touud_lo_pci touud_lo;
412static struct b_cr_touud_hi_pci touud_hi;
413static struct b_cr_asym_mem_region0_mchbar asym0;
414static struct b_cr_asym_mem_region1_mchbar asym1;
415static struct b_cr_asym_2way_mem_region_mchbar asym_2way;
416static struct b_cr_mot_out_base_mchbar mot_base;
417static struct b_cr_mot_out_mask_mchbar mot_mask;
418static struct b_cr_slice_channel_hash chash;
419
420/* Apollo Lake dunit */
421/*
422 * Validated on board with just two DIMMs in the [0] and [2] positions
423 * in this array. Other port number matches documentation, but caution
424 * advised.
425 */
426static const int apl_dports[APL_NUM_CHANNELS] = { 0x18, 0x10, 0x11, 0x19 };
427static struct d_cr_drp0 drp0[APL_NUM_CHANNELS];
428
429/* Denverton dunit */
430static const int dnv_dports[DNV_NUM_CHANNELS] = { 0x10, 0x12 };
431static struct d_cr_dsch dsch;
432static struct d_cr_ecc_ctrl ecc_ctrl[DNV_NUM_CHANNELS];
433static struct d_cr_drp drp[DNV_NUM_CHANNELS];
434static struct d_cr_dmap dmap[DNV_NUM_CHANNELS];
435static struct d_cr_dmap1 dmap1[DNV_NUM_CHANNELS];
436static struct d_cr_dmap2 dmap2[DNV_NUM_CHANNELS];
437static struct d_cr_dmap3 dmap3[DNV_NUM_CHANNELS];
438static struct d_cr_dmap4 dmap4[DNV_NUM_CHANNELS];
439static struct d_cr_dmap5 dmap5[DNV_NUM_CHANNELS];
440
441static void apl_mk_region(char *name, struct region *rp, void *asym)
442{
443 struct b_cr_asym_mem_region0_mchbar *a = asym;
444
445 mk_region(name, rp,
446 U64_LSHIFT(a->slice0_asym_base, APL_ASYMSHIFT),
447 U64_LSHIFT(a->slice0_asym_limit, APL_ASYMSHIFT) +
448 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
449}
450
451static void dnv_mk_region(char *name, struct region *rp, void *asym)
452{
453 struct b_cr_asym_mem_region_denverton *a = asym;
454
455 mk_region(name, rp,
456 U64_LSHIFT(a->slice_asym_base, DNV_ASYMSHIFT),
457 U64_LSHIFT(a->slice_asym_limit, DNV_ASYMSHIFT) +
458 GENMASK_ULL(DNV_ASYMSHIFT - 1, 0));
459}
460
461static int apl_get_registers(void)
462{
Tony Luck164c2922017-06-28 16:44:07 -0700463 int ret = -ENODEV;
Tony Luck5c71ad12017-03-09 01:45:39 +0800464 int i;
465
466 if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar))
467 return -ENODEV;
468
Tony Luck164c2922017-06-28 16:44:07 -0700469 /*
470 * RD_REGP() will fail for unpopulated or non-existent
471 * DIMM slots. Return success if we find at least one DIMM.
472 */
Tony Luck5c71ad12017-03-09 01:45:39 +0800473 for (i = 0; i < APL_NUM_CHANNELS; i++)
Tony Luck164c2922017-06-28 16:44:07 -0700474 if (!RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i]))
475 ret = 0;
Tony Luck5c71ad12017-03-09 01:45:39 +0800476
Tony Luck164c2922017-06-28 16:44:07 -0700477 return ret;
Tony Luck5c71ad12017-03-09 01:45:39 +0800478}
479
480static int dnv_get_registers(void)
481{
482 int i;
483
484 if (RD_REG(&dsch, d_cr_dsch))
485 return -ENODEV;
486
487 for (i = 0; i < DNV_NUM_CHANNELS; i++)
488 if (RD_REGP(&ecc_ctrl[i], d_cr_ecc_ctrl, dnv_dports[i]) ||
489 RD_REGP(&drp[i], d_cr_drp, dnv_dports[i]) ||
490 RD_REGP(&dmap[i], d_cr_dmap, dnv_dports[i]) ||
491 RD_REGP(&dmap1[i], d_cr_dmap1, dnv_dports[i]) ||
492 RD_REGP(&dmap2[i], d_cr_dmap2, dnv_dports[i]) ||
493 RD_REGP(&dmap3[i], d_cr_dmap3, dnv_dports[i]) ||
494 RD_REGP(&dmap4[i], d_cr_dmap4, dnv_dports[i]) ||
495 RD_REGP(&dmap5[i], d_cr_dmap5, dnv_dports[i]))
496 return -ENODEV;
497
498 return 0;
499}
500
501/*
502 * Read all the h/w config registers once here (they don't
503 * change at run time. Figure out which address ranges have
504 * which interleave characteristics.
505 */
506static int get_registers(void)
507{
508 const int intlv[] = { 10, 11, 12, 12 };
509
510 if (RD_REG(&tolud, b_cr_tolud_pci) ||
511 RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
512 RD_REG(&touud_hi, b_cr_touud_hi_pci) ||
513 RD_REG(&asym0, b_cr_asym_mem_region0_mchbar) ||
514 RD_REG(&asym1, b_cr_asym_mem_region1_mchbar) ||
515 RD_REG(&mot_base, b_cr_mot_out_base_mchbar) ||
516 RD_REG(&mot_mask, b_cr_mot_out_mask_mchbar) ||
517 RD_REG(&chash, b_cr_slice_channel_hash))
518 return -ENODEV;
519
520 if (ops->get_registers())
521 return -ENODEV;
522
523 if (ops->type == DNV) {
524 /* PMI channel idx (always 0) for asymmetric region */
525 asym0.slice0_asym_channel_select = 0;
526 asym1.slice1_asym_channel_select = 0;
527 /* PMI channel bitmap (always 1) for symmetric region */
528 chash.sym_slice0_channel_enabled = 0x1;
529 chash.sym_slice1_channel_enabled = 0x1;
530 }
531
532 if (asym0.slice0_asym_enable)
533 ops->mk_region("as0", &as0, &asym0);
534
535 if (asym1.slice1_asym_enable)
536 ops->mk_region("as1", &as1, &asym1);
537
538 if (asym_2way.asym_2way_interleave_enable) {
539 mk_region("as2way", &as2,
540 U64_LSHIFT(asym_2way.asym_2way_base, APL_ASYMSHIFT),
541 U64_LSHIFT(asym_2way.asym_2way_limit, APL_ASYMSHIFT) +
542 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
543 }
544
545 if (mot_base.imr_en) {
546 mk_region_mask("mot", &mot,
547 U64_LSHIFT(mot_base.mot_out_base, MOT_SHIFT),
548 U64_LSHIFT(mot_mask.mot_out_mask, MOT_SHIFT));
549 }
550
551 top_lm = U64_LSHIFT(tolud.tolud, 20);
552 top_hm = U64_LSHIFT(touud_hi.touud, 32) | U64_LSHIFT(touud_lo.touud, 20);
553
554 two_slices = !chash.slice_1_disabled &&
555 !chash.slice_0_mem_disabled &&
556 (chash.sym_slice0_channel_enabled != 0) &&
557 (chash.sym_slice1_channel_enabled != 0);
558 two_channels = !chash.ch_1_disabled &&
559 !chash.enable_pmi_dual_data_mode &&
560 ((chash.sym_slice0_channel_enabled == 3) ||
561 (chash.sym_slice1_channel_enabled == 3));
562
563 sym_chan_mask = gen_sym_mask(&chash);
564 asym_chan_mask = gen_asym_mask(&chash, &asym0, &asym1, &asym_2way);
565 chan_mask = sym_chan_mask | asym_chan_mask;
566
567 if (two_slices && !two_channels) {
568 if (chash.hvm_mode)
569 slice_selector = 29;
570 else
571 slice_selector = intlv[chash.interleave_mode];
572 } else if (!two_slices && two_channels) {
573 if (chash.hvm_mode)
574 chan_selector = 29;
575 else
576 chan_selector = intlv[chash.interleave_mode];
577 } else if (two_slices && two_channels) {
578 if (chash.hvm_mode) {
579 slice_selector = 29;
580 chan_selector = 30;
581 } else {
582 slice_selector = intlv[chash.interleave_mode];
583 chan_selector = intlv[chash.interleave_mode] + 1;
584 }
585 }
586
587 if (two_slices) {
588 if (!chash.hvm_mode)
589 slice_hash_mask = chash.slice_hash_mask << SLICE_HASH_MASK_LSB;
590 if (!two_channels)
591 slice_hash_mask |= BIT_ULL(slice_selector);
592 }
593
594 if (two_channels) {
595 if (!chash.hvm_mode)
596 chan_hash_mask = chash.ch_hash_mask << CH_HASH_MASK_LSB;
597 if (!two_slices)
598 chan_hash_mask |= BIT_ULL(chan_selector);
599 }
600
601 return 0;
602}
603
604/* Get a contiguous memory address (remove the MMIO gap) */
605static u64 remove_mmio_gap(u64 sys)
606{
607 return (sys < _4GB) ? sys : sys - (_4GB - top_lm);
608}
609
610/* Squeeze out one address bit, shift upper part down to fill gap */
611static void remove_addr_bit(u64 *addr, int bitidx)
612{
613 u64 mask;
614
615 if (bitidx == -1)
616 return;
617
618 mask = (1ull << bitidx) - 1;
619 *addr = ((*addr >> 1) & ~mask) | (*addr & mask);
620}
621
622/* XOR all the bits from addr specified in mask */
623static int hash_by_mask(u64 addr, u64 mask)
624{
625 u64 result = addr & mask;
626
627 result = (result >> 32) ^ result;
628 result = (result >> 16) ^ result;
629 result = (result >> 8) ^ result;
630 result = (result >> 4) ^ result;
631 result = (result >> 2) ^ result;
632 result = (result >> 1) ^ result;
633
634 return (int)result & 1;
635}
636
637/*
638 * First stage decode. Take the system address and figure out which
639 * second stage will deal with it based on interleave modes.
640 */
641static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg)
642{
643 u64 contig_addr, contig_base, contig_offset, contig_base_adj;
644 int mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
645 MOT_CHAN_INTLV_BIT_1SLC_2CH;
646 int slice_intlv_bit_rm = SELECTOR_DISABLED;
647 int chan_intlv_bit_rm = SELECTOR_DISABLED;
648 /* Determine if address is in the MOT region. */
649 bool mot_hit = in_region(&mot, addr);
650 /* Calculate the number of symmetric regions enabled. */
651 int sym_channels = hweight8(sym_chan_mask);
652
653 /*
654 * The amount we need to shift the asym base can be determined by the
655 * number of enabled symmetric channels.
656 * NOTE: This can only work because symmetric memory is not supposed
657 * to do a 3-way interleave.
658 */
659 int sym_chan_shift = sym_channels >> 1;
660
661 /* Give up if address is out of range, or in MMIO gap */
662 if (addr >= (1ul << PND_MAX_PHYS_BIT) ||
663 (addr >= top_lm && addr < _4GB) || addr >= top_hm) {
664 snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr);
665 return -EINVAL;
666 }
667
668 /* Get a contiguous memory address (remove the MMIO gap) */
669 contig_addr = remove_mmio_gap(addr);
670
671 if (in_region(&as0, addr)) {
672 *pmiidx = asym0.slice0_asym_channel_select;
673
674 contig_base = remove_mmio_gap(as0.base);
675 contig_offset = contig_addr - contig_base;
676 contig_base_adj = (contig_base >> sym_chan_shift) *
677 ((chash.sym_slice0_channel_enabled >> (*pmiidx & 1)) & 1);
678 contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
679 } else if (in_region(&as1, addr)) {
680 *pmiidx = 2u + asym1.slice1_asym_channel_select;
681
682 contig_base = remove_mmio_gap(as1.base);
683 contig_offset = contig_addr - contig_base;
684 contig_base_adj = (contig_base >> sym_chan_shift) *
685 ((chash.sym_slice1_channel_enabled >> (*pmiidx & 1)) & 1);
686 contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
687 } else if (in_region(&as2, addr) && (asym_2way.asym_2way_intlv_mode == 0x3ul)) {
688 bool channel1;
689
690 mot_intlv_bit = MOT_CHAN_INTLV_BIT_1SLC_2CH;
691 *pmiidx = (asym_2way.asym_2way_intlv_mode & 1) << 1;
692 channel1 = mot_hit ? ((bool)((addr >> mot_intlv_bit) & 1)) :
693 hash_by_mask(contig_addr, chan_hash_mask);
694 *pmiidx |= (u32)channel1;
695
696 contig_base = remove_mmio_gap(as2.base);
697 chan_intlv_bit_rm = mot_hit ? mot_intlv_bit : chan_selector;
698 contig_offset = contig_addr - contig_base;
699 remove_addr_bit(&contig_offset, chan_intlv_bit_rm);
700 contig_addr = (contig_base >> sym_chan_shift) + contig_offset;
701 } else {
702 /* Otherwise we're in normal, boring symmetric mode. */
703 *pmiidx = 0u;
704
705 if (two_slices) {
706 bool slice1;
707
708 if (mot_hit) {
709 slice_intlv_bit_rm = MOT_SLC_INTLV_BIT;
710 slice1 = (addr >> MOT_SLC_INTLV_BIT) & 1;
711 } else {
712 slice_intlv_bit_rm = slice_selector;
713 slice1 = hash_by_mask(addr, slice_hash_mask);
714 }
715
716 *pmiidx = (u32)slice1 << 1;
717 }
718
719 if (two_channels) {
720 bool channel1;
721
722 mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
723 MOT_CHAN_INTLV_BIT_1SLC_2CH;
724
725 if (mot_hit) {
726 chan_intlv_bit_rm = mot_intlv_bit;
727 channel1 = (addr >> mot_intlv_bit) & 1;
728 } else {
729 chan_intlv_bit_rm = chan_selector;
730 channel1 = hash_by_mask(contig_addr, chan_hash_mask);
731 }
732
733 *pmiidx |= (u32)channel1;
734 }
735 }
736
737 /* Remove the chan_selector bit first */
738 remove_addr_bit(&contig_addr, chan_intlv_bit_rm);
739 /* Remove the slice bit (we remove it second because it must be lower */
740 remove_addr_bit(&contig_addr, slice_intlv_bit_rm);
741 *pmiaddr = contig_addr;
742
743 return 0;
744}
745
746/* Translate PMI address to memory (rank, row, bank, column) */
747#define C(n) (0x10 | (n)) /* column */
748#define B(n) (0x20 | (n)) /* bank */
749#define R(n) (0x40 | (n)) /* row */
750#define RS (0x80) /* rank */
751
752/* addrdec values */
753#define AMAP_1KB 0
754#define AMAP_2KB 1
755#define AMAP_4KB 2
756#define AMAP_RSVD 3
757
758/* dden values */
759#define DEN_4Gb 0
760#define DEN_8Gb 2
761
762/* dwid values */
763#define X8 0
764#define X16 1
765
766static struct dimm_geometry {
767 u8 addrdec;
768 u8 dden;
769 u8 dwid;
770 u8 rowbits, colbits;
771 u16 bits[PMI_ADDRESS_WIDTH];
772} dimms[] = {
773 {
774 .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X16,
775 .rowbits = 15, .colbits = 10,
776 .bits = {
777 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
778 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
779 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
780 0, 0, 0, 0
781 }
782 },
783 {
784 .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X8,
785 .rowbits = 16, .colbits = 10,
786 .bits = {
787 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
788 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
789 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
790 R(15), 0, 0, 0
791 }
792 },
793 {
794 .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X16,
795 .rowbits = 16, .colbits = 10,
796 .bits = {
797 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
798 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
799 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
800 R(15), 0, 0, 0
801 }
802 },
803 {
804 .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X8,
805 .rowbits = 16, .colbits = 11,
806 .bits = {
807 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
808 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
809 R(10), C(7), C(8), C(9), R(11), RS, C(11), R(12), R(13),
810 R(14), R(15), 0, 0
811 }
812 },
813 {
814 .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X16,
815 .rowbits = 15, .colbits = 10,
816 .bits = {
817 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
818 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
819 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
820 0, 0, 0, 0
821 }
822 },
823 {
824 .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X8,
825 .rowbits = 16, .colbits = 10,
826 .bits = {
827 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
828 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
829 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
830 R(15), 0, 0, 0
831 }
832 },
833 {
834 .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X16,
835 .rowbits = 16, .colbits = 10,
836 .bits = {
837 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
838 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
839 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
840 R(15), 0, 0, 0
841 }
842 },
843 {
844 .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X8,
845 .rowbits = 16, .colbits = 11,
846 .bits = {
847 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
848 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
849 R(9), R(10), C(8), C(9), R(11), RS, C(11), R(12), R(13),
850 R(14), R(15), 0, 0
851 }
852 },
853 {
854 .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X16,
855 .rowbits = 15, .colbits = 10,
856 .bits = {
857 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
858 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
859 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
860 0, 0, 0, 0
861 }
862 },
863 {
864 .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X8,
865 .rowbits = 16, .colbits = 10,
866 .bits = {
867 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
868 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
869 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
870 R(15), 0, 0, 0
871 }
872 },
873 {
874 .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X16,
875 .rowbits = 16, .colbits = 10,
876 .bits = {
877 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
878 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
879 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
880 R(15), 0, 0, 0
881 }
882 },
883 {
884 .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X8,
885 .rowbits = 16, .colbits = 11,
886 .bits = {
887 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
888 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
889 R(8), R(9), R(10), C(9), R(11), RS, C(11), R(12), R(13),
890 R(14), R(15), 0, 0
891 }
892 }
893};
894
895static int bank_hash(u64 pmiaddr, int idx, int shft)
896{
897 int bhash = 0;
898
899 switch (idx) {
900 case 0:
901 bhash ^= ((pmiaddr >> (12 + shft)) ^ (pmiaddr >> (9 + shft))) & 1;
902 break;
903 case 1:
904 bhash ^= (((pmiaddr >> (10 + shft)) ^ (pmiaddr >> (8 + shft))) & 1) << 1;
905 bhash ^= ((pmiaddr >> 22) & 1) << 1;
906 break;
907 case 2:
908 bhash ^= (((pmiaddr >> (13 + shft)) ^ (pmiaddr >> (11 + shft))) & 1) << 2;
909 break;
910 }
911
912 return bhash;
913}
914
915static int rank_hash(u64 pmiaddr)
916{
917 return ((pmiaddr >> 16) ^ (pmiaddr >> 10)) & 1;
918}
919
920/* Second stage decode. Compute rank, bank, row & column. */
921static int apl_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
922 struct dram_addr *daddr, char *msg)
923{
924 struct d_cr_drp0 *cr_drp0 = &drp0[pmiidx];
925 struct pnd2_pvt *pvt = mci->pvt_info;
926 int g = pvt->dimm_geom[pmiidx];
927 struct dimm_geometry *d = &dimms[g];
928 int column = 0, bank = 0, row = 0, rank = 0;
929 int i, idx, type, skiprs = 0;
930
931 for (i = 0; i < PMI_ADDRESS_WIDTH; i++) {
932 int bit = (pmiaddr >> i) & 1;
933
934 if (i + skiprs >= PMI_ADDRESS_WIDTH) {
935 snprintf(msg, PND2_MSG_SIZE, "Bad dimm_geometry[] table\n");
936 return -EINVAL;
937 }
938
939 type = d->bits[i + skiprs] & ~0xf;
940 idx = d->bits[i + skiprs] & 0xf;
941
942 /*
943 * On single rank DIMMs ignore the rank select bit
944 * and shift remainder of "bits[]" down one place.
945 */
946 if (type == RS && (cr_drp0->rken0 + cr_drp0->rken1) == 1) {
947 skiprs = 1;
948 type = d->bits[i + skiprs] & ~0xf;
949 idx = d->bits[i + skiprs] & 0xf;
950 }
951
952 switch (type) {
953 case C(0):
954 column |= (bit << idx);
955 break;
956 case B(0):
957 bank |= (bit << idx);
958 if (cr_drp0->bahen)
959 bank ^= bank_hash(pmiaddr, idx, d->addrdec);
960 break;
961 case R(0):
962 row |= (bit << idx);
963 break;
964 case RS:
965 rank = bit;
966 if (cr_drp0->rsien)
967 rank ^= rank_hash(pmiaddr);
968 break;
969 default:
970 if (bit) {
971 snprintf(msg, PND2_MSG_SIZE, "Bad translation\n");
972 return -EINVAL;
973 }
974 goto done;
975 }
976 }
977
978done:
979 daddr->col = column;
980 daddr->bank = bank;
981 daddr->row = row;
982 daddr->rank = rank;
983 daddr->dimm = 0;
984
985 return 0;
986}
987
988/* Pluck bit "in" from pmiaddr and return value shifted to bit "out" */
989#define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out))
990
991static int dnv_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
992 struct dram_addr *daddr, char *msg)
993{
994 /* Rank 0 or 1 */
995 daddr->rank = dnv_get_bit(pmiaddr, dmap[pmiidx].rs0 + 13, 0);
996 /* Rank 2 or 3 */
997 daddr->rank |= dnv_get_bit(pmiaddr, dmap[pmiidx].rs1 + 13, 1);
998
999 /*
1000 * Normally ranks 0,1 are DIMM0, and 2,3 are DIMM1, but we
1001 * flip them if DIMM1 is larger than DIMM0.
1002 */
1003 daddr->dimm = (daddr->rank >= 2) ^ drp[pmiidx].dimmflip;
1004
1005 daddr->bank = dnv_get_bit(pmiaddr, dmap[pmiidx].ba0 + 6, 0);
1006 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].ba1 + 6, 1);
1007 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg0 + 6, 2);
1008 if (dsch.ddr4en)
1009 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg1 + 6, 3);
1010 if (dmap1[pmiidx].bxor) {
1011 if (dsch.ddr4en) {
1012 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 0);
1013 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 1);
1014 if (dsch.chan_width == 0)
1015 /* 64/72 bit dram channel width */
1016 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
1017 else
1018 /* 32/40 bit dram channel width */
1019 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
1020 daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 3);
1021 } else {
1022 daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 0);
1023 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 1);
1024 if (dsch.chan_width == 0)
1025 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
1026 else
1027 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
1028 }
1029 }
1030
1031 daddr->row = dnv_get_bit(pmiaddr, dmap2[pmiidx].row0 + 6, 0);
1032 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row1 + 6, 1);
1033 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 2);
1034 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row3 + 6, 3);
1035 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row4 + 6, 4);
1036 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row5 + 6, 5);
1037 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 6);
1038 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 7);
1039 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row8 + 6, 8);
1040 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row9 + 6, 9);
1041 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row10 + 6, 10);
1042 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row11 + 6, 11);
1043 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row12 + 6, 12);
1044 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row13 + 6, 13);
1045 if (dmap4[pmiidx].row14 != 31)
1046 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row14 + 6, 14);
1047 if (dmap4[pmiidx].row15 != 31)
1048 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row15 + 6, 15);
1049 if (dmap4[pmiidx].row16 != 31)
1050 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row16 + 6, 16);
1051 if (dmap4[pmiidx].row17 != 31)
1052 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row17 + 6, 17);
1053
1054 daddr->col = dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 3);
1055 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 4);
1056 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca5 + 6, 5);
1057 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca6 + 6, 6);
1058 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca7 + 6, 7);
1059 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca8 + 6, 8);
1060 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca9 + 6, 9);
1061 if (!dsch.ddr4en && dmap1[pmiidx].ca11 != 0x3f)
1062 daddr->col |= dnv_get_bit(pmiaddr, dmap1[pmiidx].ca11 + 13, 11);
1063
1064 return 0;
1065}
1066
1067static int check_channel(int ch)
1068{
1069 if (drp0[ch].dramtype != 0) {
1070 pnd2_printk(KERN_INFO, "Unsupported DIMM in channel %d\n", ch);
1071 return 1;
1072 } else if (drp0[ch].eccen == 0) {
1073 pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1074 return 1;
1075 }
1076 return 0;
1077}
1078
1079static int apl_check_ecc_active(void)
1080{
1081 int i, ret = 0;
1082
1083 /* Check dramtype and ECC mode for each present DIMM */
1084 for (i = 0; i < APL_NUM_CHANNELS; i++)
1085 if (chan_mask & BIT(i))
1086 ret += check_channel(i);
1087 return ret ? -EINVAL : 0;
1088}
1089
1090#define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3)
1091
1092static int check_unit(int ch)
1093{
1094 struct d_cr_drp *d = &drp[ch];
1095
1096 if (DIMMS_PRESENT(d) && !ecc_ctrl[ch].eccen) {
1097 pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1098 return 1;
1099 }
1100 return 0;
1101}
1102
1103static int dnv_check_ecc_active(void)
1104{
1105 int i, ret = 0;
1106
1107 for (i = 0; i < DNV_NUM_CHANNELS; i++)
1108 ret += check_unit(i);
1109 return ret ? -EINVAL : 0;
1110}
1111
1112static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr,
1113 struct dram_addr *daddr, char *msg)
1114{
1115 u64 pmiaddr;
1116 u32 pmiidx;
1117 int ret;
1118
1119 ret = sys2pmi(addr, &pmiidx, &pmiaddr, msg);
1120 if (ret)
1121 return ret;
1122
1123 pmiaddr >>= ops->pmiaddr_shift;
1124 /* pmi channel idx to dimm channel idx */
1125 pmiidx >>= ops->pmiidx_shift;
1126 daddr->chan = pmiidx;
1127
1128 ret = ops->pmi2mem(mci, pmiaddr, pmiidx, daddr, msg);
1129 if (ret)
1130 return ret;
1131
1132 edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1133 addr, pmiaddr, daddr->chan, daddr->dimm, daddr->rank, daddr->bank, daddr->row, daddr->col);
1134
1135 return 0;
1136}
1137
1138static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
1139 struct dram_addr *daddr)
1140{
1141 enum hw_event_mc_err_type tp_event;
1142 char *optype, msg[PND2_MSG_SIZE];
1143 bool ripv = m->mcgstatus & MCG_STATUS_RIPV;
1144 bool overflow = m->status & MCI_STATUS_OVER;
1145 bool uc_err = m->status & MCI_STATUS_UC;
1146 bool recov = m->status & MCI_STATUS_S;
1147 u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
1148 u32 mscod = GET_BITFIELD(m->status, 16, 31);
1149 u32 errcode = GET_BITFIELD(m->status, 0, 15);
1150 u32 optypenum = GET_BITFIELD(m->status, 4, 6);
1151 int rc;
1152
1153 tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) :
1154 HW_EVENT_ERR_CORRECTED;
1155
1156 /*
1157 * According with Table 15-9 of the Intel Architecture spec vol 3A,
1158 * memory errors should fit in this mask:
1159 * 000f 0000 1mmm cccc (binary)
1160 * where:
1161 * f = Correction Report Filtering Bit. If 1, subsequent errors
1162 * won't be shown
1163 * mmm = error type
1164 * cccc = channel
1165 * If the mask doesn't match, report an error to the parsing logic
1166 */
1167 if (!((errcode & 0xef80) == 0x80)) {
1168 optype = "Can't parse: it is not a mem";
1169 } else {
1170 switch (optypenum) {
1171 case 0:
1172 optype = "generic undef request error";
1173 break;
1174 case 1:
1175 optype = "memory read error";
1176 break;
1177 case 2:
1178 optype = "memory write error";
1179 break;
1180 case 3:
1181 optype = "addr/cmd error";
1182 break;
1183 case 4:
1184 optype = "memory scrubbing error";
1185 break;
1186 default:
1187 optype = "reserved";
1188 break;
1189 }
1190 }
1191
1192 /* Only decode errors with an valid address (ADDRV) */
1193 if (!(m->status & MCI_STATUS_ADDRV))
1194 return;
1195
1196 rc = get_memory_error_data(mci, m->addr, daddr, msg);
1197 if (rc)
1198 goto address_error;
1199
1200 snprintf(msg, sizeof(msg),
1201 "%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d",
1202 overflow ? " OVERFLOW" : "", (uc_err && recov) ? " recoverable" : "", mscod,
1203 errcode, daddr->chan, daddr->dimm, daddr->rank, daddr->row, daddr->bank, daddr->col);
1204
1205 edac_dbg(0, "%s\n", msg);
1206
1207 /* Call the helper to output message */
1208 edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT,
Qiuxu Zhuo819f60f2017-03-25 19:29:01 +08001209 m->addr & ~PAGE_MASK, 0, daddr->chan, daddr->dimm, -1, optype, msg);
Tony Luck5c71ad12017-03-09 01:45:39 +08001210
1211 return;
1212
1213address_error:
1214 edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, msg, "");
1215}
1216
1217static void apl_get_dimm_config(struct mem_ctl_info *mci)
1218{
1219 struct pnd2_pvt *pvt = mci->pvt_info;
1220 struct dimm_info *dimm;
1221 struct d_cr_drp0 *d;
1222 u64 capacity;
1223 int i, g;
1224
1225 for (i = 0; i < APL_NUM_CHANNELS; i++) {
1226 if (!(chan_mask & BIT(i)))
1227 continue;
1228
1229 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, 0, 0);
1230 if (!dimm) {
1231 edac_dbg(0, "No allocated DIMM for channel %d\n", i);
1232 continue;
1233 }
1234
1235 d = &drp0[i];
1236 for (g = 0; g < ARRAY_SIZE(dimms); g++)
1237 if (dimms[g].addrdec == d->addrdec &&
1238 dimms[g].dden == d->dden &&
1239 dimms[g].dwid == d->dwid)
1240 break;
1241
1242 if (g == ARRAY_SIZE(dimms)) {
1243 edac_dbg(0, "Channel %d: unrecognized DIMM\n", i);
1244 continue;
1245 }
1246
1247 pvt->dimm_geom[i] = g;
1248 capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) *
1249 (1ul << dimms[g].colbits);
1250 edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3));
1251 dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1252 dimm->grain = 32;
1253 dimm->dtype = (d->dwid == 0) ? DEV_X8 : DEV_X16;
1254 dimm->mtype = MEM_DDR3;
1255 dimm->edac_mode = EDAC_SECDED;
1256 snprintf(dimm->label, sizeof(dimm->label), "Slice#%d_Chan#%d", i / 2, i % 2);
1257 }
1258}
1259
1260static const int dnv_dtypes[] = {
1261 DEV_X8, DEV_X4, DEV_X16, DEV_UNKNOWN
1262};
1263
1264static void dnv_get_dimm_config(struct mem_ctl_info *mci)
1265{
1266 int i, j, ranks_of_dimm[DNV_MAX_DIMMS], banks, rowbits, colbits, memtype;
1267 struct dimm_info *dimm;
1268 struct d_cr_drp *d;
1269 u64 capacity;
1270
1271 if (dsch.ddr4en) {
1272 memtype = MEM_DDR4;
1273 banks = 16;
1274 colbits = 10;
1275 } else {
1276 memtype = MEM_DDR3;
1277 banks = 8;
1278 }
1279
1280 for (i = 0; i < DNV_NUM_CHANNELS; i++) {
1281 if (dmap4[i].row14 == 31)
1282 rowbits = 14;
1283 else if (dmap4[i].row15 == 31)
1284 rowbits = 15;
1285 else if (dmap4[i].row16 == 31)
1286 rowbits = 16;
1287 else if (dmap4[i].row17 == 31)
1288 rowbits = 17;
1289 else
1290 rowbits = 18;
1291
1292 if (memtype == MEM_DDR3) {
1293 if (dmap1[i].ca11 != 0x3f)
1294 colbits = 12;
1295 else
1296 colbits = 10;
1297 }
1298
1299 d = &drp[i];
1300 /* DIMM0 is present if rank0 and/or rank1 is enabled */
1301 ranks_of_dimm[0] = d->rken0 + d->rken1;
1302 /* DIMM1 is present if rank2 and/or rank3 is enabled */
1303 ranks_of_dimm[1] = d->rken2 + d->rken3;
1304
1305 for (j = 0; j < DNV_MAX_DIMMS; j++) {
1306 if (!ranks_of_dimm[j])
1307 continue;
1308
1309 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0);
1310 if (!dimm) {
1311 edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j);
1312 continue;
1313 }
1314
1315 capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits);
1316 edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3));
1317 dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1318 dimm->grain = 32;
1319 dimm->dtype = dnv_dtypes[j ? d->dimmdwid0 : d->dimmdwid1];
1320 dimm->mtype = memtype;
1321 dimm->edac_mode = EDAC_SECDED;
1322 snprintf(dimm->label, sizeof(dimm->label), "Chan#%d_DIMM#%d", i, j);
1323 }
1324 }
1325}
1326
1327static int pnd2_register_mci(struct mem_ctl_info **ppmci)
1328{
1329 struct edac_mc_layer layers[2];
1330 struct mem_ctl_info *mci;
1331 struct pnd2_pvt *pvt;
1332 int rc;
1333
1334 rc = ops->check_ecc();
1335 if (rc < 0)
1336 return rc;
1337
1338 /* Allocate a new MC control structure */
1339 layers[0].type = EDAC_MC_LAYER_CHANNEL;
1340 layers[0].size = ops->channels;
1341 layers[0].is_virt_csrow = false;
1342 layers[1].type = EDAC_MC_LAYER_SLOT;
1343 layers[1].size = ops->dimms_per_channel;
1344 layers[1].is_virt_csrow = true;
1345 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1346 if (!mci)
1347 return -ENOMEM;
1348
1349 pvt = mci->pvt_info;
1350 memset(pvt, 0, sizeof(*pvt));
1351
1352 mci->mod_name = "pnd2_edac.c";
1353 mci->dev_name = ops->name;
1354 mci->ctl_name = "Pondicherry2";
1355
1356 /* Get dimm basic config and the memory layout */
1357 ops->get_dimm_config(mci);
1358
1359 if (edac_mc_add_mc(mci)) {
1360 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1361 edac_mc_free(mci);
1362 return -EINVAL;
1363 }
1364
1365 *ppmci = mci;
1366
1367 return 0;
1368}
1369
1370static void pnd2_unregister_mci(struct mem_ctl_info *mci)
1371{
1372 if (unlikely(!mci || !mci->pvt_info)) {
1373 pnd2_printk(KERN_ERR, "Couldn't find mci handler\n");
1374 return;
1375 }
1376
1377 /* Remove MC sysfs nodes */
1378 edac_mc_del_mc(NULL);
1379 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
1380 edac_mc_free(mci);
1381}
1382
1383/*
1384 * Callback function registered with core kernel mce code.
1385 * Called once for each logged error.
1386 */
1387static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, void *data)
1388{
1389 struct mce *mce = (struct mce *)data;
1390 struct mem_ctl_info *mci;
1391 struct dram_addr daddr;
1392 char *type;
1393
Borislav Petkovbffc7de2017-02-04 18:10:14 +01001394 if (edac_get_report_status() == EDAC_REPORTING_DISABLED)
Tony Luck5c71ad12017-03-09 01:45:39 +08001395 return NOTIFY_DONE;
1396
1397 mci = pnd2_mci;
1398 if (!mci)
1399 return NOTIFY_DONE;
1400
1401 /*
1402 * Just let mcelog handle it if the error is
1403 * outside the memory controller. A memory error
1404 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
1405 * bit 12 has an special meaning.
1406 */
1407 if ((mce->status & 0xefff) >> 7 != 1)
1408 return NOTIFY_DONE;
1409
1410 if (mce->mcgstatus & MCG_STATUS_MCIP)
1411 type = "Exception";
1412 else
1413 type = "Event";
1414
1415 pnd2_mc_printk(mci, KERN_INFO, "HANDLING MCE MEMORY ERROR\n");
1416 pnd2_mc_printk(mci, KERN_INFO, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n",
1417 mce->extcpu, type, mce->mcgstatus, mce->bank, mce->status);
1418 pnd2_mc_printk(mci, KERN_INFO, "TSC %llx ", mce->tsc);
1419 pnd2_mc_printk(mci, KERN_INFO, "ADDR %llx ", mce->addr);
1420 pnd2_mc_printk(mci, KERN_INFO, "MISC %llx ", mce->misc);
1421 pnd2_mc_printk(mci, KERN_INFO, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
1422 mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid);
1423
1424 pnd2_mce_output_error(mci, mce, &daddr);
1425
1426 /* Advice mcelog that the error were handled */
1427 return NOTIFY_STOP;
1428}
1429
1430static struct notifier_block pnd2_mce_dec = {
1431 .notifier_call = pnd2_mce_check_error,
1432};
1433
1434#ifdef CONFIG_EDAC_DEBUG
1435/*
1436 * Write an address to this file to exercise the address decode
1437 * logic in this driver.
1438 */
1439static u64 pnd2_fake_addr;
1440#define PND2_BLOB_SIZE 1024
1441static char pnd2_result[PND2_BLOB_SIZE];
1442static struct dentry *pnd2_test;
1443static struct debugfs_blob_wrapper pnd2_blob = {
1444 .data = pnd2_result,
1445 .size = 0
1446};
1447
1448static int debugfs_u64_set(void *data, u64 val)
1449{
1450 struct dram_addr daddr;
1451 struct mce m;
1452
1453 *(u64 *)data = val;
1454 m.mcgstatus = 0;
1455 /* ADDRV + MemRd + Unknown channel */
1456 m.status = MCI_STATUS_ADDRV + 0x9f;
1457 m.addr = val;
1458 pnd2_mce_output_error(pnd2_mci, &m, &daddr);
1459 snprintf(pnd2_blob.data, PND2_BLOB_SIZE,
1460 "SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1461 m.addr, daddr.chan, daddr.dimm, daddr.rank, daddr.bank, daddr.row, daddr.col);
1462 pnd2_blob.size = strlen(pnd2_blob.data);
1463
1464 return 0;
1465}
1466DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
1467
1468static void setup_pnd2_debug(void)
1469{
1470 pnd2_test = edac_debugfs_create_dir("pnd2_test");
1471 edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test,
1472 &pnd2_fake_addr, &fops_u64_wo);
1473 debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test, &pnd2_blob);
1474}
1475
1476static void teardown_pnd2_debug(void)
1477{
1478 debugfs_remove_recursive(pnd2_test);
1479}
Borislav Petkovcd1be312017-03-18 18:26:34 +01001480#else
1481static void setup_pnd2_debug(void) {}
1482static void teardown_pnd2_debug(void) {}
1483#endif /* CONFIG_EDAC_DEBUG */
1484
Tony Luck5c71ad12017-03-09 01:45:39 +08001485
1486static int pnd2_probe(void)
1487{
1488 int rc;
1489
1490 edac_dbg(2, "\n");
1491 rc = get_registers();
1492 if (rc)
1493 return rc;
1494
1495 return pnd2_register_mci(&pnd2_mci);
1496}
1497
1498static void pnd2_remove(void)
1499{
1500 edac_dbg(0, "\n");
1501 pnd2_unregister_mci(pnd2_mci);
1502}
1503
1504static struct dunit_ops apl_ops = {
1505 .name = "pnd2/apl",
1506 .type = APL,
1507 .pmiaddr_shift = LOG2_PMI_ADDR_GRANULARITY,
1508 .pmiidx_shift = 0,
1509 .channels = APL_NUM_CHANNELS,
1510 .dimms_per_channel = 1,
1511 .rd_reg = apl_rd_reg,
1512 .get_registers = apl_get_registers,
1513 .check_ecc = apl_check_ecc_active,
1514 .mk_region = apl_mk_region,
1515 .get_dimm_config = apl_get_dimm_config,
1516 .pmi2mem = apl_pmi2mem,
1517};
1518
1519static struct dunit_ops dnv_ops = {
1520 .name = "pnd2/dnv",
1521 .type = DNV,
1522 .pmiaddr_shift = 0,
1523 .pmiidx_shift = 1,
1524 .channels = DNV_NUM_CHANNELS,
1525 .dimms_per_channel = 2,
1526 .rd_reg = dnv_rd_reg,
1527 .get_registers = dnv_get_registers,
1528 .check_ecc = dnv_check_ecc_active,
1529 .mk_region = dnv_mk_region,
1530 .get_dimm_config = dnv_get_dimm_config,
1531 .pmi2mem = dnv_pmi2mem,
1532};
1533
1534static const struct x86_cpu_id pnd2_cpuids[] = {
1535 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops },
1536 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON, 0, (kernel_ulong_t)&dnv_ops },
1537 { }
1538};
1539MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
1540
1541static int __init pnd2_init(void)
1542{
1543 const struct x86_cpu_id *id;
1544 int rc;
1545
1546 edac_dbg(2, "\n");
1547
1548 id = x86_match_cpu(pnd2_cpuids);
1549 if (!id)
1550 return -ENODEV;
1551
1552 ops = (struct dunit_ops *)id->driver_data;
1553
Tony Luck3e5d2bd2017-08-03 14:05:36 -07001554 if (ops->type == APL) {
1555 p2sb_bus = pci_find_bus(0, 0);
1556 if (!p2sb_bus)
1557 return -ENODEV;
1558 }
1559
Tony Luck5c71ad12017-03-09 01:45:39 +08001560 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1561 opstate_init();
1562
1563 rc = pnd2_probe();
1564 if (rc < 0) {
1565 pnd2_printk(KERN_ERR, "Failed to register device with error %d.\n", rc);
1566 return rc;
1567 }
1568
1569 if (!pnd2_mci)
1570 return -ENODEV;
1571
1572 mce_register_decode_chain(&pnd2_mce_dec);
1573 setup_pnd2_debug();
1574
1575 return 0;
1576}
1577
1578static void __exit pnd2_exit(void)
1579{
1580 edac_dbg(2, "\n");
1581 teardown_pnd2_debug();
1582 mce_unregister_decode_chain(&pnd2_mce_dec);
1583 pnd2_remove();
1584}
1585
1586module_init(pnd2_init);
1587module_exit(pnd2_exit);
1588
1589module_param(edac_op_state, int, 0444);
1590MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1591
1592MODULE_LICENSE("GPL v2");
1593MODULE_AUTHOR("Tony Luck");
1594MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller");