blob: 9c1ffe3e912bfb266e8f94802b67865bf6d60499 [file] [log] [blame]
Ralf Baechlef65aad42012-10-17 00:39:09 +02001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2009 Wind River Systems,
7 * written by Ralf Baechle <ralf@linux-mips.org>
Daniel Walker1bc021e2013-09-20 15:46:41 -07008 *
9 * Copyright (c) 2013 by Cisco Systems, Inc.
10 * All rights reserved.
Ralf Baechlef65aad42012-10-17 00:39:09 +020011 */
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/slab.h>
15#include <linux/io.h>
16#include <linux/edac.h>
Daniel Walker1bc021e2013-09-20 15:46:41 -070017#include <linux/ctype.h>
Ralf Baechlef65aad42012-10-17 00:39:09 +020018
David Daneye1ced092012-11-15 13:58:59 -080019#include <asm/octeon/octeon.h>
20#include <asm/octeon/cvmx-lmcx-defs.h>
Ralf Baechlef65aad42012-10-17 00:39:09 +020021
Ralf Baechlef65aad42012-10-17 00:39:09 +020022#include "edac_module.h"
Ralf Baechlef65aad42012-10-17 00:39:09 +020023
David Daneye1ced092012-11-15 13:58:59 -080024#define OCTEON_MAX_MC 4
Ralf Baechlef65aad42012-10-17 00:39:09 +020025
Daniel Walker1bc021e2013-09-20 15:46:41 -070026#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
27
28struct octeon_lmc_pvt {
29 unsigned long inject;
30 unsigned long error_type;
31 unsigned long dimm;
32 unsigned long rank;
33 unsigned long bank;
34 unsigned long row;
35 unsigned long col;
36};
37
David Daneye1ced092012-11-15 13:58:59 -080038static void octeon_lmc_edac_poll(struct mem_ctl_info *mci)
Ralf Baechlef65aad42012-10-17 00:39:09 +020039{
David Daneye1ced092012-11-15 13:58:59 -080040 union cvmx_lmcx_mem_cfg0 cfg0;
41 bool do_clear = false;
Ralf Baechlef65aad42012-10-17 00:39:09 +020042 char msg[64];
43
David Daneye1ced092012-11-15 13:58:59 -080044 cfg0.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(mci->mc_idx));
45 if (cfg0.s.sec_err || cfg0.s.ded_err) {
46 union cvmx_lmcx_fadr fadr;
47 fadr.u64 = cvmx_read_csr(CVMX_LMCX_FADR(mci->mc_idx));
48 snprintf(msg, sizeof(msg),
49 "DIMM %d rank %d bank %d row %d col %d",
50 fadr.cn30xx.fdimm, fadr.cn30xx.fbunk,
51 fadr.cn30xx.fbank, fadr.cn30xx.frow, fadr.cn30xx.fcol);
Ralf Baechlef65aad42012-10-17 00:39:09 +020052 }
53
David Daneye1ced092012-11-15 13:58:59 -080054 if (cfg0.s.sec_err) {
55 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0,
56 -1, -1, -1, msg, "");
57 cfg0.s.sec_err = -1; /* Done, re-arm */
58 do_clear = true;
Ralf Baechlef65aad42012-10-17 00:39:09 +020059 }
60
David Daneye1ced092012-11-15 13:58:59 -080061 if (cfg0.s.ded_err) {
62 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
63 -1, -1, -1, msg, "");
64 cfg0.s.ded_err = -1; /* Done, re-arm */
65 do_clear = true;
66 }
67 if (do_clear)
68 cvmx_write_csr(CVMX_LMCX_MEM_CFG0(mci->mc_idx), cfg0.u64);
Ralf Baechlef65aad42012-10-17 00:39:09 +020069}
70
David Daneye1ced092012-11-15 13:58:59 -080071static void octeon_lmc_edac_poll_o2(struct mem_ctl_info *mci)
72{
Daniel Walker1bc021e2013-09-20 15:46:41 -070073 struct octeon_lmc_pvt *pvt = mci->pvt_info;
David Daneye1ced092012-11-15 13:58:59 -080074 union cvmx_lmcx_int int_reg;
75 bool do_clear = false;
76 char msg[64];
77
Daniel Walker1bc021e2013-09-20 15:46:41 -070078 if (!pvt->inject)
79 int_reg.u64 = cvmx_read_csr(CVMX_LMCX_INT(mci->mc_idx));
80 else {
81 if (pvt->error_type == 1)
82 int_reg.s.sec_err = 1;
83 if (pvt->error_type == 2)
84 int_reg.s.ded_err = 1;
85 }
86
David Daneye1ced092012-11-15 13:58:59 -080087 if (int_reg.s.sec_err || int_reg.s.ded_err) {
88 union cvmx_lmcx_fadr fadr;
Daniel Walker1bc021e2013-09-20 15:46:41 -070089 if (likely(!pvt->inject))
90 fadr.u64 = cvmx_read_csr(CVMX_LMCX_FADR(mci->mc_idx));
91 else {
92 fadr.cn61xx.fdimm = pvt->dimm;
93 fadr.cn61xx.fbunk = pvt->rank;
94 fadr.cn61xx.fbank = pvt->bank;
95 fadr.cn61xx.frow = pvt->row;
96 fadr.cn61xx.fcol = pvt->col;
97 }
David Daneye1ced092012-11-15 13:58:59 -080098 snprintf(msg, sizeof(msg),
99 "DIMM %d rank %d bank %d row %d col %d",
100 fadr.cn61xx.fdimm, fadr.cn61xx.fbunk,
101 fadr.cn61xx.fbank, fadr.cn61xx.frow, fadr.cn61xx.fcol);
102 }
103
104 if (int_reg.s.sec_err) {
105 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0,
106 -1, -1, -1, msg, "");
107 int_reg.s.sec_err = -1; /* Done, re-arm */
108 do_clear = true;
109 }
110
111 if (int_reg.s.ded_err) {
112 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
113 -1, -1, -1, msg, "");
114 int_reg.s.ded_err = -1; /* Done, re-arm */
115 do_clear = true;
116 }
Daniel Walker1bc021e2013-09-20 15:46:41 -0700117
118 if (do_clear) {
119 if (likely(!pvt->inject))
120 cvmx_write_csr(CVMX_LMCX_INT(mci->mc_idx), int_reg.u64);
121 else
122 pvt->inject = 0;
123 }
124}
125
126/************************ MC SYSFS parts ***********************************/
127
128/* Only a couple naming differences per template, so very similar */
129#define TEMPLATE_SHOW(reg) \
130static ssize_t octeon_mc_inject_##reg##_show(struct device *dev, \
131 struct device_attribute *attr, \
132 char *data) \
133{ \
134 struct mem_ctl_info *mci = to_mci(dev); \
135 struct octeon_lmc_pvt *pvt = mci->pvt_info; \
136 return sprintf(data, "%016llu\n", (u64)pvt->reg); \
137}
138
139#define TEMPLATE_STORE(reg) \
140static ssize_t octeon_mc_inject_##reg##_store(struct device *dev, \
141 struct device_attribute *attr, \
142 const char *data, size_t count) \
143{ \
144 struct mem_ctl_info *mci = to_mci(dev); \
145 struct octeon_lmc_pvt *pvt = mci->pvt_info; \
146 if (isdigit(*data)) { \
147 if (!kstrtoul(data, 0, &pvt->reg)) \
148 return count; \
149 } \
150 return 0; \
151}
152
153TEMPLATE_SHOW(inject);
154TEMPLATE_STORE(inject);
155TEMPLATE_SHOW(dimm);
156TEMPLATE_STORE(dimm);
157TEMPLATE_SHOW(bank);
158TEMPLATE_STORE(bank);
159TEMPLATE_SHOW(rank);
160TEMPLATE_STORE(rank);
161TEMPLATE_SHOW(row);
162TEMPLATE_STORE(row);
163TEMPLATE_SHOW(col);
164TEMPLATE_STORE(col);
165
166static ssize_t octeon_mc_inject_error_type_store(struct device *dev,
167 struct device_attribute *attr,
168 const char *data,
169 size_t count)
170{
171 struct mem_ctl_info *mci = to_mci(dev);
172 struct octeon_lmc_pvt *pvt = mci->pvt_info;
173
174 if (!strncmp(data, "single", 6))
175 pvt->error_type = 1;
176 else if (!strncmp(data, "double", 6))
177 pvt->error_type = 2;
178
179 return count;
180}
181
182static ssize_t octeon_mc_inject_error_type_show(struct device *dev,
183 struct device_attribute *attr,
184 char *data)
185{
186 struct mem_ctl_info *mci = to_mci(dev);
187 struct octeon_lmc_pvt *pvt = mci->pvt_info;
188 if (pvt->error_type == 1)
189 return sprintf(data, "single");
190 else if (pvt->error_type == 2)
191 return sprintf(data, "double");
192
193 return 0;
194}
195
196static DEVICE_ATTR(inject, S_IRUGO | S_IWUSR,
197 octeon_mc_inject_inject_show, octeon_mc_inject_inject_store);
198static DEVICE_ATTR(error_type, S_IRUGO | S_IWUSR,
199 octeon_mc_inject_error_type_show, octeon_mc_inject_error_type_store);
200static DEVICE_ATTR(dimm, S_IRUGO | S_IWUSR,
201 octeon_mc_inject_dimm_show, octeon_mc_inject_dimm_store);
202static DEVICE_ATTR(rank, S_IRUGO | S_IWUSR,
203 octeon_mc_inject_rank_show, octeon_mc_inject_rank_store);
204static DEVICE_ATTR(bank, S_IRUGO | S_IWUSR,
205 octeon_mc_inject_bank_show, octeon_mc_inject_bank_store);
206static DEVICE_ATTR(row, S_IRUGO | S_IWUSR,
207 octeon_mc_inject_row_show, octeon_mc_inject_row_store);
208static DEVICE_ATTR(col, S_IRUGO | S_IWUSR,
209 octeon_mc_inject_col_show, octeon_mc_inject_col_store);
210
Takashi Iwai1bf06a02015-02-04 11:48:57 +0100211static struct attribute *octeon_dev_attrs[] = {
212 &dev_attr_inject.attr,
213 &dev_attr_error_type.attr,
214 &dev_attr_dimm.attr,
215 &dev_attr_rank.attr,
216 &dev_attr_bank.attr,
217 &dev_attr_row.attr,
218 &dev_attr_col.attr,
219 NULL
220};
Daniel Walker1bc021e2013-09-20 15:46:41 -0700221
Takashi Iwai1bf06a02015-02-04 11:48:57 +0100222ATTRIBUTE_GROUPS(octeon_dev);
David Daneye1ced092012-11-15 13:58:59 -0800223
Greg Kroah-Hartman9b3c6e82012-12-21 13:23:51 -0800224static int octeon_lmc_edac_probe(struct platform_device *pdev)
Ralf Baechlef65aad42012-10-17 00:39:09 +0200225{
226 struct mem_ctl_info *mci;
David Daneye1ced092012-11-15 13:58:59 -0800227 struct edac_mc_layer layers[1];
228 int mc = pdev->id;
Ralf Baechlef65aad42012-10-17 00:39:09 +0200229
Daniel Walker5331de02013-09-20 15:46:40 -0700230 opstate_init();
231
David Daneye1ced092012-11-15 13:58:59 -0800232 layers[0].type = EDAC_MC_LAYER_CHANNEL;
233 layers[0].size = 1;
234 layers[0].is_virt_csrow = false;
Ralf Baechlef65aad42012-10-17 00:39:09 +0200235
Aaro Koskinen75a15a72015-07-01 13:38:52 +0300236 if (OCTEON_IS_OCTEON1PLUS()) {
David Daneye1ced092012-11-15 13:58:59 -0800237 union cvmx_lmcx_mem_cfg0 cfg0;
Ralf Baechlef65aad42012-10-17 00:39:09 +0200238
David Daneye1ced092012-11-15 13:58:59 -0800239 cfg0.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(0));
240 if (!cfg0.s.ecc_ena) {
241 dev_info(&pdev->dev, "Disabled (ECC not enabled)\n");
242 return 0;
243 }
Ralf Baechlef65aad42012-10-17 00:39:09 +0200244
Daniel Walker1bc021e2013-09-20 15:46:41 -0700245 mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, sizeof(struct octeon_lmc_pvt));
David Daneye1ced092012-11-15 13:58:59 -0800246 if (!mci)
247 return -ENXIO;
248
249 mci->pdev = &pdev->dev;
250 mci->dev_name = dev_name(&pdev->dev);
251
252 mci->mod_name = "octeon-lmc";
253 mci->ctl_name = "octeon-lmc-err";
254 mci->edac_check = octeon_lmc_edac_poll;
255
Takashi Iwai1bf06a02015-02-04 11:48:57 +0100256 if (edac_mc_add_mc_with_groups(mci, octeon_dev_groups)) {
David Daneye1ced092012-11-15 13:58:59 -0800257 dev_err(&pdev->dev, "edac_mc_add_mc() failed\n");
258 edac_mc_free(mci);
259 return -ENXIO;
260 }
261
262 cfg0.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(mc));
263 cfg0.s.intr_ded_ena = 0; /* We poll */
264 cfg0.s.intr_sec_ena = 0;
265 cvmx_write_csr(CVMX_LMCX_MEM_CFG0(mc), cfg0.u64);
266 } else {
267 /* OCTEON II */
268 union cvmx_lmcx_int_en en;
269 union cvmx_lmcx_config config;
270
271 config.u64 = cvmx_read_csr(CVMX_LMCX_CONFIG(0));
272 if (!config.s.ecc_ena) {
273 dev_info(&pdev->dev, "Disabled (ECC not enabled)\n");
274 return 0;
275 }
276
Daniel Walker1bc021e2013-09-20 15:46:41 -0700277 mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, sizeof(struct octeon_lmc_pvt));
David Daneye1ced092012-11-15 13:58:59 -0800278 if (!mci)
279 return -ENXIO;
280
281 mci->pdev = &pdev->dev;
282 mci->dev_name = dev_name(&pdev->dev);
283
284 mci->mod_name = "octeon-lmc";
285 mci->ctl_name = "co_lmc_err";
286 mci->edac_check = octeon_lmc_edac_poll_o2;
287
Takashi Iwai1bf06a02015-02-04 11:48:57 +0100288 if (edac_mc_add_mc_with_groups(mci, octeon_dev_groups)) {
David Daneye1ced092012-11-15 13:58:59 -0800289 dev_err(&pdev->dev, "edac_mc_add_mc() failed\n");
290 edac_mc_free(mci);
291 return -ENXIO;
292 }
293
294 en.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(mc));
295 en.s.intr_ded_ena = 0; /* We poll */
296 en.s.intr_sec_ena = 0;
297 cvmx_write_csr(CVMX_LMCX_MEM_CFG0(mc), en.u64);
Ralf Baechlef65aad42012-10-17 00:39:09 +0200298 }
David Daneye1ced092012-11-15 13:58:59 -0800299 platform_set_drvdata(pdev, mci);
Ralf Baechlef65aad42012-10-17 00:39:09 +0200300
301 return 0;
Ralf Baechlef65aad42012-10-17 00:39:09 +0200302}
303
David Daneye1ced092012-11-15 13:58:59 -0800304static int octeon_lmc_edac_remove(struct platform_device *pdev)
Ralf Baechlef65aad42012-10-17 00:39:09 +0200305{
306 struct mem_ctl_info *mci = platform_get_drvdata(pdev);
307
Ralf Baechlef65aad42012-10-17 00:39:09 +0200308 edac_mc_del_mc(&pdev->dev);
309 edac_mc_free(mci);
Ralf Baechlef65aad42012-10-17 00:39:09 +0200310 return 0;
311}
312
David Daneye1ced092012-11-15 13:58:59 -0800313static struct platform_driver octeon_lmc_edac_driver = {
314 .probe = octeon_lmc_edac_probe,
315 .remove = octeon_lmc_edac_remove,
Ralf Baechlef65aad42012-10-17 00:39:09 +0200316 .driver = {
David Daneye1ced092012-11-15 13:58:59 -0800317 .name = "octeon_lmc_edac",
Ralf Baechlef65aad42012-10-17 00:39:09 +0200318 }
319};
David Daneye1ced092012-11-15 13:58:59 -0800320module_platform_driver(octeon_lmc_edac_driver);
Ralf Baechlef65aad42012-10-17 00:39:09 +0200321
322MODULE_LICENSE("GPL");
323MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");