blob: 811f09a38f3a57966cbc257bb5f4c34d699d78b2 [file] [log] [blame]
Alan Coxda9bb1d2006-01-18 17:44:13 -08001/*
2 * edac_mc kernel module
Doug Thompson49c0dab72006-07-10 04:45:19 -07003 * (C) 2005, 2006 Linux Networx (http://lnxi.com)
Alan Coxda9bb1d2006-01-18 17:44:13 -08004 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * Written by Thayne Harbaugh
8 * Based on work by Dan Hollis <goemon at anime dot net> and others.
9 * http://www.anime.net/~goemon/linux-ecc/
10 *
11 * Modified by Dave Peterson and Doug Thompson
12 *
13 */
14
Alan Coxda9bb1d2006-01-18 17:44:13 -080015#include <linux/module.h>
16#include <linux/proc_fs.h>
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/smp.h>
20#include <linux/init.h>
21#include <linux/sysctl.h>
22#include <linux/highmem.h>
23#include <linux/timer.h>
24#include <linux/slab.h>
25#include <linux/jiffies.h>
26#include <linux/spinlock.h>
27#include <linux/list.h>
Alan Coxda9bb1d2006-01-18 17:44:13 -080028#include <linux/ctype.h>
Dave Jiangc0d12172007-07-19 01:49:46 -070029#include <linux/edac.h>
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -030030#include <linux/bitops.h>
Alan Coxda9bb1d2006-01-18 17:44:13 -080031#include <asm/uaccess.h>
32#include <asm/page.h>
33#include <asm/edac.h>
Douglas Thompson20bcb7a2007-07-19 01:49:47 -070034#include "edac_core.h"
Douglas Thompson7c9281d2007-07-19 01:49:33 -070035#include "edac_module.h"
Alan Coxda9bb1d2006-01-18 17:44:13 -080036
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -030037#define CREATE_TRACE_POINTS
38#define TRACE_INCLUDE_PATH ../../include/ras
39#include <ras/ras_event.h>
40
Alan Coxda9bb1d2006-01-18 17:44:13 -080041/* lock to memory controller's control array */
Matthias Kaehlcke63b7df92007-07-19 01:49:38 -070042static DEFINE_MUTEX(mem_ctls_mutex);
Robert P. J. Dayff6ac2a2008-04-29 01:03:17 -070043static LIST_HEAD(mc_devices);
Alan Coxda9bb1d2006-01-18 17:44:13 -080044
Alan Coxda9bb1d2006-01-18 17:44:13 -080045#ifdef CONFIG_EDAC_DEBUG
46
Mauro Carvalho Chehaba4b4be32012-01-27 10:26:13 -030047static void edac_mc_dump_channel(struct rank_info *chan)
Alan Coxda9bb1d2006-01-18 17:44:13 -080048{
49 debugf4("\tchannel = %p\n", chan);
50 debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx);
Alan Coxda9bb1d2006-01-18 17:44:13 -080051 debugf4("\tchannel->csrow = %p\n\n", chan->csrow);
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -030052 debugf4("\tchannel->dimm = %p\n", chan->dimm);
53}
54
55static void edac_mc_dump_dimm(struct dimm_info *dimm)
56{
57 int i;
58
59 debugf4("\tdimm = %p\n", dimm);
60 debugf4("\tdimm->label = '%s'\n", dimm->label);
61 debugf4("\tdimm->nr_pages = 0x%x\n", dimm->nr_pages);
62 debugf4("\tdimm location ");
63 for (i = 0; i < dimm->mci->n_layers; i++) {
64 printk(KERN_CONT "%d", dimm->location[i]);
65 if (i < dimm->mci->n_layers - 1)
66 printk(KERN_CONT ".");
67 }
68 printk(KERN_CONT "\n");
69 debugf4("\tdimm->grain = %d\n", dimm->grain);
70 debugf4("\tdimm->nr_pages = 0x%x\n", dimm->nr_pages);
Alan Coxda9bb1d2006-01-18 17:44:13 -080071}
72
Adrian Bunk2da1c112007-07-19 01:49:32 -070073static void edac_mc_dump_csrow(struct csrow_info *csrow)
Alan Coxda9bb1d2006-01-18 17:44:13 -080074{
75 debugf4("\tcsrow = %p\n", csrow);
76 debugf4("\tcsrow->csrow_idx = %d\n", csrow->csrow_idx);
Douglas Thompson079708b2007-07-19 01:49:58 -070077 debugf4("\tcsrow->first_page = 0x%lx\n", csrow->first_page);
Alan Coxda9bb1d2006-01-18 17:44:13 -080078 debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page);
79 debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask);
Douglas Thompson079708b2007-07-19 01:49:58 -070080 debugf4("\tcsrow->nr_channels = %d\n", csrow->nr_channels);
Alan Coxda9bb1d2006-01-18 17:44:13 -080081 debugf4("\tcsrow->channels = %p\n", csrow->channels);
82 debugf4("\tcsrow->mci = %p\n\n", csrow->mci);
83}
84
Adrian Bunk2da1c112007-07-19 01:49:32 -070085static void edac_mc_dump_mci(struct mem_ctl_info *mci)
Alan Coxda9bb1d2006-01-18 17:44:13 -080086{
87 debugf3("\tmci = %p\n", mci);
88 debugf3("\tmci->mtype_cap = %lx\n", mci->mtype_cap);
89 debugf3("\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
90 debugf3("\tmci->edac_cap = %lx\n", mci->edac_cap);
91 debugf4("\tmci->edac_check = %p\n", mci->edac_check);
92 debugf3("\tmci->nr_csrows = %d, csrows = %p\n",
93 mci->nr_csrows, mci->csrows);
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -030094 debugf3("\tmci->nr_dimms = %d, dimms = %p\n",
95 mci->tot_dimms, mci->dimms);
Mauro Carvalho Chehabfd687502012-03-16 07:44:18 -030096 debugf3("\tdev = %p\n", mci->pdev);
Douglas Thompson079708b2007-07-19 01:49:58 -070097 debugf3("\tmod_name:ctl_name = %s:%s\n", mci->mod_name, mci->ctl_name);
Alan Coxda9bb1d2006-01-18 17:44:13 -080098 debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
99}
100
Borislav Petkov24f9a7f2010-10-07 18:29:15 +0200101#endif /* CONFIG_EDAC_DEBUG */
102
Borislav Petkov239642f2009-11-12 15:33:16 +0100103/*
104 * keep those in sync with the enum mem_type
105 */
106const char *edac_mem_types[] = {
107 "Empty csrow",
108 "Reserved csrow type",
109 "Unknown csrow type",
110 "Fast page mode RAM",
111 "Extended data out RAM",
112 "Burst Extended data out RAM",
113 "Single data rate SDRAM",
114 "Registered single data rate SDRAM",
115 "Double data rate SDRAM",
116 "Registered Double data rate SDRAM",
117 "Rambus DRAM",
118 "Unbuffered DDR2 RAM",
119 "Fully buffered DDR2",
120 "Registered DDR2 RAM",
121 "Rambus XDR",
122 "Unbuffered DDR3 RAM",
123 "Registered DDR3 RAM",
124};
125EXPORT_SYMBOL_GPL(edac_mem_types);
126
Mauro Carvalho Chehab93e4fe62012-04-16 10:18:12 -0300127/**
128 * edac_align_ptr - Prepares the pointer offsets for a single-shot allocation
129 * @p: pointer to a pointer with the memory offset to be used. At
130 * return, this will be incremented to point to the next offset
131 * @size: Size of the data structure to be reserved
132 * @n_elems: Number of elements that should be reserved
Alan Coxda9bb1d2006-01-18 17:44:13 -0800133 *
134 * If 'size' is a constant, the compiler will optimize this whole function
Mauro Carvalho Chehab93e4fe62012-04-16 10:18:12 -0300135 * down to either a no-op or the addition of a constant to the value of '*p'.
136 *
137 * The 'p' pointer is absolutely needed to keep the proper advancing
138 * further in memory to the proper offsets when allocating the struct along
139 * with its embedded structs, as edac_device_alloc_ctl_info() does it
140 * above, for example.
141 *
142 * At return, the pointer 'p' will be incremented to be used on a next call
143 * to this function.
Alan Coxda9bb1d2006-01-18 17:44:13 -0800144 */
Mauro Carvalho Chehab93e4fe62012-04-16 10:18:12 -0300145void *edac_align_ptr(void **p, unsigned size, int n_elems)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800146{
147 unsigned align, r;
Mauro Carvalho Chehab93e4fe62012-04-16 10:18:12 -0300148 void *ptr = *p;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800149
Mauro Carvalho Chehab93e4fe62012-04-16 10:18:12 -0300150 *p += size * n_elems;
151
152 /*
153 * 'p' can possibly be an unaligned item X such that sizeof(X) is
154 * 'size'. Adjust 'p' so that its alignment is at least as
155 * stringent as what the compiler would provide for X and return
156 * the aligned result.
157 * Here we assume that the alignment of a "long long" is the most
Alan Coxda9bb1d2006-01-18 17:44:13 -0800158 * stringent alignment that the compiler will ever provide by default.
159 * As far as I know, this is a reasonable assumption.
160 */
161 if (size > sizeof(long))
162 align = sizeof(long long);
163 else if (size > sizeof(int))
164 align = sizeof(long);
165 else if (size > sizeof(short))
166 align = sizeof(int);
167 else if (size > sizeof(char))
168 align = sizeof(short);
169 else
Douglas Thompson079708b2007-07-19 01:49:58 -0700170 return (char *)ptr;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800171
172 r = size % align;
173
174 if (r == 0)
Douglas Thompson079708b2007-07-19 01:49:58 -0700175 return (char *)ptr;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800176
Mauro Carvalho Chehab93e4fe62012-04-16 10:18:12 -0300177 *p += align - r;
178
Douglas Thompson7391c6d2007-07-19 01:50:21 -0700179 return (void *)(((unsigned long)ptr) + align - r);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800180}
181
Alan Coxda9bb1d2006-01-18 17:44:13 -0800182/**
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300183 * edac_mc_alloc: Allocate and partially fill a struct mem_ctl_info structure
184 * @mc_num: Memory controller number
185 * @n_layers: Number of MC hierarchy layers
186 * layers: Describes each layer as seen by the Memory Controller
187 * @size_pvt: size of private storage needed
188 *
Alan Coxda9bb1d2006-01-18 17:44:13 -0800189 *
190 * Everything is kmalloc'ed as one big chunk - more efficient.
191 * Only can be used if all structures have the same lifetime - otherwise
192 * you have to allocate and initialize your own structures.
193 *
194 * Use edac_mc_free() to free mc structures allocated by this function.
195 *
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300196 * NOTE: drivers handle multi-rank memories in different ways: in some
197 * drivers, one multi-rank memory stick is mapped as one entry, while, in
198 * others, a single multi-rank memory stick would be mapped into several
199 * entries. Currently, this function will allocate multiple struct dimm_info
200 * on such scenarios, as grouping the multiple ranks require drivers change.
201 *
Alan Coxda9bb1d2006-01-18 17:44:13 -0800202 * Returns:
Mauro Carvalho Chehabca0907b2012-05-02 14:37:00 -0300203 * On failure: NULL
204 * On success: struct mem_ctl_info pointer
Alan Coxda9bb1d2006-01-18 17:44:13 -0800205 */
Mauro Carvalho Chehabca0907b2012-05-02 14:37:00 -0300206struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
207 unsigned n_layers,
208 struct edac_mc_layer *layers,
209 unsigned sz_pvt)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800210{
211 struct mem_ctl_info *mci;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300212 struct edac_mc_layer *layer;
213 struct csrow_info *csi, *csr;
Mauro Carvalho Chehaba4b4be32012-01-27 10:26:13 -0300214 struct rank_info *chi, *chp, *chan;
Mauro Carvalho Chehaba7d7d2e2012-01-27 14:12:32 -0300215 struct dimm_info *dimm;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300216 u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
217 unsigned pos[EDAC_MAX_LAYERS];
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300218 unsigned size, tot_dimms = 1, count = 1;
219 unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
Mauro Carvalho Chehab5926ff52012-02-09 11:05:20 -0300220 void *pvt, *p, *ptr = NULL;
221 int i, j, err, row, chn, n, len;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300222 bool per_rank = false;
223
224 BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0);
225 /*
226 * Calculate the total amount of dimms and csrows/cschannels while
227 * in the old API emulation mode
228 */
229 for (i = 0; i < n_layers; i++) {
230 tot_dimms *= layers[i].size;
231 if (layers[i].is_virt_csrow)
232 tot_csrows *= layers[i].size;
233 else
234 tot_channels *= layers[i].size;
235
236 if (layers[i].type == EDAC_MC_LAYER_CHIP_SELECT)
237 per_rank = true;
238 }
Alan Coxda9bb1d2006-01-18 17:44:13 -0800239
240 /* Figure out the offsets of the various items from the start of an mc
241 * structure. We want the alignment of each item to be at least as
242 * stringent as what the compiler would provide if we could simply
243 * hardcode everything into a single struct.
244 */
Mauro Carvalho Chehab93e4fe62012-04-16 10:18:12 -0300245 mci = edac_align_ptr(&ptr, sizeof(*mci), 1);
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300246 layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers);
247 csi = edac_align_ptr(&ptr, sizeof(*csi), tot_csrows);
248 chi = edac_align_ptr(&ptr, sizeof(*chi), tot_csrows * tot_channels);
249 dimm = edac_align_ptr(&ptr, sizeof(*dimm), tot_dimms);
250 for (i = 0; i < n_layers; i++) {
251 count *= layers[i].size;
252 debugf4("%s: errcount layer %d size %d\n", __func__, i, count);
253 ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
254 ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
255 tot_errcount += 2 * count;
256 }
257
258 debugf4("%s: allocating %d error counters\n", __func__, tot_errcount);
Mauro Carvalho Chehab93e4fe62012-04-16 10:18:12 -0300259 pvt = edac_align_ptr(&ptr, sz_pvt, 1);
Douglas Thompson079708b2007-07-19 01:49:58 -0700260 size = ((unsigned long)pvt) + sz_pvt;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800261
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300262 debugf1("%s(): allocating %u bytes for mci data (%d %s, %d csrows/channels)\n",
263 __func__, size,
264 tot_dimms,
265 per_rank ? "ranks" : "dimms",
266 tot_csrows * tot_channels);
Doug Thompson8096cfa2007-07-19 01:50:27 -0700267 mci = kzalloc(size, GFP_KERNEL);
268 if (mci == NULL)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800269 return NULL;
270
271 /* Adjust pointers so they point within the memory we just allocated
272 * rather than an imaginary chunk of memory located at address 0.
273 */
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300274 layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer));
Douglas Thompson079708b2007-07-19 01:49:58 -0700275 csi = (struct csrow_info *)(((char *)mci) + ((unsigned long)csi));
Mauro Carvalho Chehaba4b4be32012-01-27 10:26:13 -0300276 chi = (struct rank_info *)(((char *)mci) + ((unsigned long)chi));
Mauro Carvalho Chehaba7d7d2e2012-01-27 14:12:32 -0300277 dimm = (struct dimm_info *)(((char *)mci) + ((unsigned long)dimm));
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300278 for (i = 0; i < n_layers; i++) {
279 mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i]));
280 mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i]));
281 }
Douglas Thompson079708b2007-07-19 01:49:58 -0700282 pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800283
Doug Thompsonb8f6f972007-07-19 01:50:26 -0700284 /* setup index and various internal pointers */
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300285 mci->mc_idx = mc_num;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800286 mci->csrows = csi;
Mauro Carvalho Chehaba7d7d2e2012-01-27 14:12:32 -0300287 mci->dimms = dimm;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300288 mci->tot_dimms = tot_dimms;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800289 mci->pvt_info = pvt;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300290 mci->n_layers = n_layers;
291 mci->layers = layer;
292 memcpy(mci->layers, layers, sizeof(*layer) * n_layers);
293 mci->nr_csrows = tot_csrows;
294 mci->num_cschannel = tot_channels;
295 mci->mem_is_per_rank = per_rank;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800296
Mauro Carvalho Chehaba7d7d2e2012-01-27 14:12:32 -0300297 /*
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300298 * Fill the csrow struct
Mauro Carvalho Chehaba7d7d2e2012-01-27 14:12:32 -0300299 */
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300300 for (row = 0; row < tot_csrows; row++) {
301 csr = &csi[row];
302 csr->csrow_idx = row;
303 csr->mci = mci;
304 csr->nr_channels = tot_channels;
305 chp = &chi[row * tot_channels];
306 csr->channels = chp;
Mauro Carvalho Chehaba7d7d2e2012-01-27 14:12:32 -0300307
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300308 for (chn = 0; chn < tot_channels; chn++) {
Alan Coxda9bb1d2006-01-18 17:44:13 -0800309 chan = &chp[chn];
310 chan->chan_idx = chn;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300311 chan->csrow = csr;
312 }
313 }
Mauro Carvalho Chehaba7d7d2e2012-01-27 14:12:32 -0300314
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300315 /*
316 * Fill the dimm struct
317 */
318 memset(&pos, 0, sizeof(pos));
319 row = 0;
320 chn = 0;
321 debugf4("%s: initializing %d %s\n", __func__, tot_dimms,
322 per_rank ? "ranks" : "dimms");
323 for (i = 0; i < tot_dimms; i++) {
324 chan = &csi[row].channels[chn];
325 dimm = EDAC_DIMM_PTR(layer, mci->dimms, n_layers,
326 pos[0], pos[1], pos[2]);
327 dimm->mci = mci;
328
329 debugf2("%s: %d: %s%zd (%d:%d:%d): row %d, chan %d\n", __func__,
330 i, per_rank ? "rank" : "dimm", (dimm - mci->dimms),
331 pos[0], pos[1], pos[2], row, chn);
332
Mauro Carvalho Chehab5926ff52012-02-09 11:05:20 -0300333 /*
334 * Copy DIMM location and initialize it.
335 */
336 len = sizeof(dimm->label);
337 p = dimm->label;
338 n = snprintf(p, len, "mc#%u", mc_num);
339 p += n;
340 len -= n;
341 for (j = 0; j < n_layers; j++) {
342 n = snprintf(p, len, "%s#%u",
343 edac_layer_name[layers[j].type],
344 pos[j]);
345 p += n;
346 len -= n;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300347 dimm->location[j] = pos[j];
348
Mauro Carvalho Chehab5926ff52012-02-09 11:05:20 -0300349 if (len <= 0)
350 break;
351 }
352
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300353 /* Link it to the csrows old API data */
354 chan->dimm = dimm;
355 dimm->csrow = row;
356 dimm->cschannel = chn;
357
358 /* Increment csrow location */
359 row++;
360 if (row == tot_csrows) {
361 row = 0;
362 chn++;
363 }
364
365 /* Increment dimm location */
366 for (j = n_layers - 1; j >= 0; j--) {
367 pos[j]++;
368 if (pos[j] < layers[j].size)
369 break;
370 pos[j] = 0;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800371 }
372 }
373
Dave Jiang81d87cb2007-07-19 01:49:52 -0700374 mci->op_state = OP_ALLOC;
Mauro Carvalho Chehab6fe11082010-08-12 00:30:25 -0300375 INIT_LIST_HEAD(&mci->grp_kobj_list);
Dave Jiang81d87cb2007-07-19 01:49:52 -0700376
Doug Thompson8096cfa2007-07-19 01:50:27 -0700377 /*
378 * Initialize the 'root' kobj for the edac_mc controller
379 */
380 err = edac_mc_register_sysfs_main_kobj(mci);
381 if (err) {
382 kfree(mci);
383 return NULL;
384 }
385
386 /* at this point, the root kobj is valid, and in order to
387 * 'free' the object, then the function:
388 * edac_mc_unregister_sysfs_main_kobj() must be called
389 * which will perform kobj unregistration and the actual free
390 * will occur during the kobject callback operation
391 */
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -0300392
Alan Coxda9bb1d2006-01-18 17:44:13 -0800393 return mci;
394}
Dave Peterson91105402006-03-26 01:38:55 -0800395EXPORT_SYMBOL_GPL(edac_mc_alloc);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800396
Alan Coxda9bb1d2006-01-18 17:44:13 -0800397/**
Doug Thompson8096cfa2007-07-19 01:50:27 -0700398 * edac_mc_free
399 * 'Free' a previously allocated 'mci' structure
Alan Coxda9bb1d2006-01-18 17:44:13 -0800400 * @mci: pointer to a struct mem_ctl_info structure
Alan Coxda9bb1d2006-01-18 17:44:13 -0800401 */
402void edac_mc_free(struct mem_ctl_info *mci)
403{
Mauro Carvalho Chehabbbc560a2010-08-16 18:22:43 -0300404 debugf1("%s()\n", __func__);
405
Doug Thompson8096cfa2007-07-19 01:50:27 -0700406 edac_mc_unregister_sysfs_main_kobj(mci);
Mauro Carvalho Chehabaccf74f2010-08-16 18:34:37 -0300407
408 /* free the mci instance memory here */
409 kfree(mci);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800410}
Dave Peterson91105402006-03-26 01:38:55 -0800411EXPORT_SYMBOL_GPL(edac_mc_free);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800412
Doug Thompsonbce19682007-07-26 10:41:14 -0700413
Mauro Carvalho Chehab939747bd2010-08-10 11:22:01 -0300414/**
Doug Thompsonbce19682007-07-26 10:41:14 -0700415 * find_mci_by_dev
416 *
417 * scan list of controllers looking for the one that manages
418 * the 'dev' device
Mauro Carvalho Chehab939747bd2010-08-10 11:22:01 -0300419 * @dev: pointer to a struct device related with the MCI
Doug Thompsonbce19682007-07-26 10:41:14 -0700420 */
Mauro Carvalho Chehab939747bd2010-08-10 11:22:01 -0300421struct mem_ctl_info *find_mci_by_dev(struct device *dev)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800422{
423 struct mem_ctl_info *mci;
424 struct list_head *item;
425
Dave Peterson537fba22006-03-26 01:38:40 -0800426 debugf3("%s()\n", __func__);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800427
428 list_for_each(item, &mc_devices) {
429 mci = list_entry(item, struct mem_ctl_info, link);
430
Mauro Carvalho Chehabfd687502012-03-16 07:44:18 -0300431 if (mci->pdev == dev)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800432 return mci;
433 }
434
435 return NULL;
436}
Mauro Carvalho Chehab939747bd2010-08-10 11:22:01 -0300437EXPORT_SYMBOL_GPL(find_mci_by_dev);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800438
Dave Jiang81d87cb2007-07-19 01:49:52 -0700439/*
440 * handler for EDAC to check if NMI type handler has asserted interrupt
441 */
442static int edac_mc_assert_error_check_and_clear(void)
443{
Dave Jiang66ee2f92007-07-19 01:49:54 -0700444 int old_state;
Dave Jiang81d87cb2007-07-19 01:49:52 -0700445
Douglas Thompson079708b2007-07-19 01:49:58 -0700446 if (edac_op_state == EDAC_OPSTATE_POLL)
Dave Jiang81d87cb2007-07-19 01:49:52 -0700447 return 1;
448
Dave Jiang66ee2f92007-07-19 01:49:54 -0700449 old_state = edac_err_assert;
450 edac_err_assert = 0;
Dave Jiang81d87cb2007-07-19 01:49:52 -0700451
Dave Jiang66ee2f92007-07-19 01:49:54 -0700452 return old_state;
Dave Jiang81d87cb2007-07-19 01:49:52 -0700453}
454
455/*
456 * edac_mc_workq_function
457 * performs the operation scheduled by a workq request
458 */
Dave Jiang81d87cb2007-07-19 01:49:52 -0700459static void edac_mc_workq_function(struct work_struct *work_req)
460{
Jean Delvarefbeb4382009-04-13 14:40:21 -0700461 struct delayed_work *d_work = to_delayed_work(work_req);
Dave Jiang81d87cb2007-07-19 01:49:52 -0700462 struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work);
Dave Jiang81d87cb2007-07-19 01:49:52 -0700463
464 mutex_lock(&mem_ctls_mutex);
465
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700466 /* if this control struct has movd to offline state, we are done */
467 if (mci->op_state == OP_OFFLINE) {
468 mutex_unlock(&mem_ctls_mutex);
469 return;
470 }
471
Dave Jiang81d87cb2007-07-19 01:49:52 -0700472 /* Only poll controllers that are running polled and have a check */
473 if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL))
474 mci->edac_check(mci);
475
Dave Jiang81d87cb2007-07-19 01:49:52 -0700476 mutex_unlock(&mem_ctls_mutex);
477
478 /* Reschedule */
Dave Jiang4de78c62007-07-19 01:49:54 -0700479 queue_delayed_work(edac_workqueue, &mci->work,
Douglas Thompson052dfb42007-07-19 01:50:13 -0700480 msecs_to_jiffies(edac_mc_get_poll_msec()));
Dave Jiang81d87cb2007-07-19 01:49:52 -0700481}
482
483/*
484 * edac_mc_workq_setup
485 * initialize a workq item for this mci
486 * passing in the new delay period in msec
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700487 *
488 * locking model:
489 *
490 * called with the mem_ctls_mutex held
Dave Jiang81d87cb2007-07-19 01:49:52 -0700491 */
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700492static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
Dave Jiang81d87cb2007-07-19 01:49:52 -0700493{
494 debugf0("%s()\n", __func__);
495
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700496 /* if this instance is not in the POLL state, then simply return */
497 if (mci->op_state != OP_RUNNING_POLL)
498 return;
499
Dave Jiang81d87cb2007-07-19 01:49:52 -0700500 INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
Dave Jiang81d87cb2007-07-19 01:49:52 -0700501 queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
502}
503
504/*
505 * edac_mc_workq_teardown
506 * stop the workq processing on this mci
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700507 *
508 * locking model:
509 *
510 * called WITHOUT lock held
Dave Jiang81d87cb2007-07-19 01:49:52 -0700511 */
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700512static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
Dave Jiang81d87cb2007-07-19 01:49:52 -0700513{
514 int status;
515
Borislav Petkov00740c52010-09-26 12:42:23 +0200516 if (mci->op_state != OP_RUNNING_POLL)
517 return;
518
Doug Thompsonbce19682007-07-26 10:41:14 -0700519 status = cancel_delayed_work(&mci->work);
520 if (status == 0) {
521 debugf0("%s() not canceled, flush the queue\n",
522 __func__);
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700523
Doug Thompsonbce19682007-07-26 10:41:14 -0700524 /* workq instance might be running, wait for it */
525 flush_workqueue(edac_workqueue);
Dave Jiang81d87cb2007-07-19 01:49:52 -0700526 }
527}
528
529/*
Doug Thompsonbce19682007-07-26 10:41:14 -0700530 * edac_mc_reset_delay_period(unsigned long value)
531 *
532 * user space has updated our poll period value, need to
533 * reset our workq delays
Dave Jiang81d87cb2007-07-19 01:49:52 -0700534 */
Doug Thompsonbce19682007-07-26 10:41:14 -0700535void edac_mc_reset_delay_period(int value)
Dave Jiang81d87cb2007-07-19 01:49:52 -0700536{
Doug Thompsonbce19682007-07-26 10:41:14 -0700537 struct mem_ctl_info *mci;
538 struct list_head *item;
Dave Jiang81d87cb2007-07-19 01:49:52 -0700539
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700540 mutex_lock(&mem_ctls_mutex);
541
Doug Thompsonbce19682007-07-26 10:41:14 -0700542 /* scan the list and turn off all workq timers, doing so under lock
543 */
544 list_for_each(item, &mc_devices) {
545 mci = list_entry(item, struct mem_ctl_info, link);
546
547 if (mci->op_state == OP_RUNNING_POLL)
548 cancel_delayed_work(&mci->work);
549 }
550
551 mutex_unlock(&mem_ctls_mutex);
552
553
554 /* re-walk the list, and reset the poll delay */
555 mutex_lock(&mem_ctls_mutex);
556
557 list_for_each(item, &mc_devices) {
558 mci = list_entry(item, struct mem_ctl_info, link);
559
560 edac_mc_workq_setup(mci, (unsigned long) value);
561 }
Dave Jiang81d87cb2007-07-19 01:49:52 -0700562
563 mutex_unlock(&mem_ctls_mutex);
564}
565
Doug Thompsonbce19682007-07-26 10:41:14 -0700566
567
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700568/* Return 0 on success, 1 on failure.
569 * Before calling this function, caller must
570 * assign a unique value to mci->mc_idx.
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700571 *
572 * locking model:
573 *
574 * called with the mem_ctls_mutex lock held
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700575 */
Douglas Thompson079708b2007-07-19 01:49:58 -0700576static int add_mc_to_global_list(struct mem_ctl_info *mci)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800577{
578 struct list_head *item, *insert_before;
579 struct mem_ctl_info *p;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800580
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700581 insert_before = &mc_devices;
582
Mauro Carvalho Chehabfd687502012-03-16 07:44:18 -0300583 p = find_mci_by_dev(mci->pdev);
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700584 if (unlikely(p != NULL))
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700585 goto fail0;
586
587 list_for_each(item, &mc_devices) {
588 p = list_entry(item, struct mem_ctl_info, link);
589
590 if (p->mc_idx >= mci->mc_idx) {
591 if (unlikely(p->mc_idx == mci->mc_idx))
592 goto fail1;
593
594 insert_before = item;
595 break;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800596 }
Alan Coxda9bb1d2006-01-18 17:44:13 -0800597 }
598
599 list_add_tail_rcu(&mci->link, insert_before);
Dave Jiangc0d12172007-07-19 01:49:46 -0700600 atomic_inc(&edac_handlers);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800601 return 0;
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700602
Douglas Thompson052dfb42007-07-19 01:50:13 -0700603fail0:
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700604 edac_printk(KERN_WARNING, EDAC_MC,
Mauro Carvalho Chehabfd687502012-03-16 07:44:18 -0300605 "%s (%s) %s %s already assigned %d\n", dev_name(p->pdev),
Stephen Rothwell17aa7e02008-05-05 13:54:19 +1000606 edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700607 return 1;
608
Douglas Thompson052dfb42007-07-19 01:50:13 -0700609fail1:
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700610 edac_printk(KERN_WARNING, EDAC_MC,
Douglas Thompson052dfb42007-07-19 01:50:13 -0700611 "bug in low-level driver: attempt to assign\n"
612 " duplicate mc_idx %d in %s()\n", p->mc_idx, __func__);
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700613 return 1;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800614}
615
Dave Petersone7ecd892006-03-26 01:38:52 -0800616static void del_mc_from_global_list(struct mem_ctl_info *mci)
Dave Petersona1d03fc2006-03-26 01:38:46 -0800617{
Dave Jiangc0d12172007-07-19 01:49:46 -0700618 atomic_dec(&edac_handlers);
Dave Petersona1d03fc2006-03-26 01:38:46 -0800619 list_del_rcu(&mci->link);
Lai Jiangshane2e77092011-05-26 16:25:58 -0700620
621 /* these are for safe removal of devices from global list while
622 * NMI handlers may be traversing list
623 */
624 synchronize_rcu();
625 INIT_LIST_HEAD(&mci->link);
Dave Petersona1d03fc2006-03-26 01:38:46 -0800626}
627
Alan Coxda9bb1d2006-01-18 17:44:13 -0800628/**
Douglas Thompson5da08312007-07-19 01:49:31 -0700629 * edac_mc_find: Search for a mem_ctl_info structure whose index is 'idx'.
630 *
631 * If found, return a pointer to the structure.
632 * Else return NULL.
633 *
634 * Caller must hold mem_ctls_mutex.
635 */
Douglas Thompson079708b2007-07-19 01:49:58 -0700636struct mem_ctl_info *edac_mc_find(int idx)
Douglas Thompson5da08312007-07-19 01:49:31 -0700637{
638 struct list_head *item;
639 struct mem_ctl_info *mci;
640
641 list_for_each(item, &mc_devices) {
642 mci = list_entry(item, struct mem_ctl_info, link);
643
644 if (mci->mc_idx >= idx) {
645 if (mci->mc_idx == idx)
646 return mci;
647
648 break;
649 }
650 }
651
652 return NULL;
653}
654EXPORT_SYMBOL(edac_mc_find);
655
656/**
Dave Peterson472678e2006-03-26 01:38:49 -0800657 * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and
658 * create sysfs entries associated with mci structure
Alan Coxda9bb1d2006-01-18 17:44:13 -0800659 * @mci: pointer to the mci structure to be added to the list
660 *
661 * Return:
662 * 0 Success
663 * !0 Failure
664 */
665
666/* FIXME - should a warning be printed if no error detection? correction? */
Doug Thompsonb8f6f972007-07-19 01:50:26 -0700667int edac_mc_add_mc(struct mem_ctl_info *mci)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800668{
Dave Peterson537fba22006-03-26 01:38:40 -0800669 debugf0("%s()\n", __func__);
Doug Thompsonb8f6f972007-07-19 01:50:26 -0700670
Alan Coxda9bb1d2006-01-18 17:44:13 -0800671#ifdef CONFIG_EDAC_DEBUG
672 if (edac_debug_level >= 3)
673 edac_mc_dump_mci(mci);
Dave Petersone7ecd892006-03-26 01:38:52 -0800674
Alan Coxda9bb1d2006-01-18 17:44:13 -0800675 if (edac_debug_level >= 4) {
676 int i;
677
678 for (i = 0; i < mci->nr_csrows; i++) {
679 int j;
Dave Petersone7ecd892006-03-26 01:38:52 -0800680
Alan Coxda9bb1d2006-01-18 17:44:13 -0800681 edac_mc_dump_csrow(&mci->csrows[i]);
682 for (j = 0; j < mci->csrows[i].nr_channels; j++)
Douglas Thompson079708b2007-07-19 01:49:58 -0700683 edac_mc_dump_channel(&mci->csrows[i].
Douglas Thompson052dfb42007-07-19 01:50:13 -0700684 channels[j]);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800685 }
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300686 for (i = 0; i < mci->tot_dimms; i++)
687 edac_mc_dump_dimm(&mci->dimms[i]);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800688 }
689#endif
Matthias Kaehlcke63b7df92007-07-19 01:49:38 -0700690 mutex_lock(&mem_ctls_mutex);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800691
692 if (add_mc_to_global_list(mci))
Dave Peterson028a7b62006-03-26 01:38:47 -0800693 goto fail0;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800694
695 /* set load time so that error rate can be tracked */
696 mci->start_time = jiffies;
697
eric wollesen9794f332007-02-12 00:53:08 -0800698 if (edac_create_sysfs_mci_device(mci)) {
699 edac_mc_printk(mci, KERN_WARNING,
Douglas Thompson052dfb42007-07-19 01:50:13 -0700700 "failed to create sysfs device\n");
eric wollesen9794f332007-02-12 00:53:08 -0800701 goto fail1;
702 }
Alan Coxda9bb1d2006-01-18 17:44:13 -0800703
Dave Jiang81d87cb2007-07-19 01:49:52 -0700704 /* If there IS a check routine, then we are running POLLED */
705 if (mci->edac_check != NULL) {
706 /* This instance is NOW RUNNING */
707 mci->op_state = OP_RUNNING_POLL;
708
709 edac_mc_workq_setup(mci, edac_mc_get_poll_msec());
710 } else {
711 mci->op_state = OP_RUNNING_INTERRUPT;
712 }
713
Alan Coxda9bb1d2006-01-18 17:44:13 -0800714 /* Report action taken */
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700715 edac_mc_printk(mci, KERN_INFO, "Giving out device to '%s' '%s':"
Stephen Rothwell17aa7e02008-05-05 13:54:19 +1000716 " DEV %s\n", mci->mod_name, mci->ctl_name, edac_dev_name(mci));
Alan Coxda9bb1d2006-01-18 17:44:13 -0800717
Matthias Kaehlcke63b7df92007-07-19 01:49:38 -0700718 mutex_unlock(&mem_ctls_mutex);
Dave Peterson028a7b62006-03-26 01:38:47 -0800719 return 0;
720
Douglas Thompson052dfb42007-07-19 01:50:13 -0700721fail1:
Dave Peterson028a7b62006-03-26 01:38:47 -0800722 del_mc_from_global_list(mci);
723
Douglas Thompson052dfb42007-07-19 01:50:13 -0700724fail0:
Matthias Kaehlcke63b7df92007-07-19 01:49:38 -0700725 mutex_unlock(&mem_ctls_mutex);
Dave Peterson028a7b62006-03-26 01:38:47 -0800726 return 1;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800727}
Dave Peterson91105402006-03-26 01:38:55 -0800728EXPORT_SYMBOL_GPL(edac_mc_add_mc);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800729
Alan Coxda9bb1d2006-01-18 17:44:13 -0800730/**
Dave Peterson472678e2006-03-26 01:38:49 -0800731 * edac_mc_del_mc: Remove sysfs entries for specified mci structure and
732 * remove mci structure from global list
Doug Thompson37f04582006-06-30 01:56:07 -0700733 * @pdev: Pointer to 'struct device' representing mci structure to remove.
Alan Coxda9bb1d2006-01-18 17:44:13 -0800734 *
Dave Peterson18dbc332006-03-26 01:38:50 -0800735 * Return pointer to removed mci structure, or NULL if device not found.
Alan Coxda9bb1d2006-01-18 17:44:13 -0800736 */
Douglas Thompson079708b2007-07-19 01:49:58 -0700737struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800738{
Dave Peterson18dbc332006-03-26 01:38:50 -0800739 struct mem_ctl_info *mci;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800740
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700741 debugf0("%s()\n", __func__);
742
Matthias Kaehlcke63b7df92007-07-19 01:49:38 -0700743 mutex_lock(&mem_ctls_mutex);
Dave Peterson18dbc332006-03-26 01:38:50 -0800744
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700745 /* find the requested mci struct in the global list */
746 mci = find_mci_by_dev(dev);
747 if (mci == NULL) {
Matthias Kaehlcke63b7df92007-07-19 01:49:38 -0700748 mutex_unlock(&mem_ctls_mutex);
Dave Peterson18dbc332006-03-26 01:38:50 -0800749 return NULL;
750 }
751
Alan Coxda9bb1d2006-01-18 17:44:13 -0800752 del_mc_from_global_list(mci);
Matthias Kaehlcke63b7df92007-07-19 01:49:38 -0700753 mutex_unlock(&mem_ctls_mutex);
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700754
Borislav Petkovbb31b3122010-12-02 17:48:35 +0100755 /* flush workq processes */
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700756 edac_mc_workq_teardown(mci);
Borislav Petkovbb31b3122010-12-02 17:48:35 +0100757
758 /* marking MCI offline */
759 mci->op_state = OP_OFFLINE;
760
761 /* remove from sysfs */
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700762 edac_remove_sysfs_mci_device(mci);
763
Dave Peterson537fba22006-03-26 01:38:40 -0800764 edac_printk(KERN_INFO, EDAC_MC,
Douglas Thompson052dfb42007-07-19 01:50:13 -0700765 "Removed device %d for %s %s: DEV %s\n", mci->mc_idx,
Stephen Rothwell17aa7e02008-05-05 13:54:19 +1000766 mci->mod_name, mci->ctl_name, edac_dev_name(mci));
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700767
Dave Peterson18dbc332006-03-26 01:38:50 -0800768 return mci;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800769}
Dave Peterson91105402006-03-26 01:38:55 -0800770EXPORT_SYMBOL_GPL(edac_mc_del_mc);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800771
Adrian Bunk2da1c112007-07-19 01:49:32 -0700772static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
773 u32 size)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800774{
775 struct page *pg;
776 void *virt_addr;
777 unsigned long flags = 0;
778
Dave Peterson537fba22006-03-26 01:38:40 -0800779 debugf3("%s()\n", __func__);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800780
781 /* ECC error page was not in our memory. Ignore it. */
Douglas Thompson079708b2007-07-19 01:49:58 -0700782 if (!pfn_valid(page))
Alan Coxda9bb1d2006-01-18 17:44:13 -0800783 return;
784
785 /* Find the actual page structure then map it and fix */
786 pg = pfn_to_page(page);
787
788 if (PageHighMem(pg))
789 local_irq_save(flags);
790
Cong Wang4e5df7c2011-11-25 23:14:19 +0800791 virt_addr = kmap_atomic(pg);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800792
793 /* Perform architecture specific atomic scrub operation */
794 atomic_scrub(virt_addr + offset, size);
795
796 /* Unmap and complete */
Cong Wang4e5df7c2011-11-25 23:14:19 +0800797 kunmap_atomic(virt_addr);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800798
799 if (PageHighMem(pg))
800 local_irq_restore(flags);
801}
802
Alan Coxda9bb1d2006-01-18 17:44:13 -0800803/* FIXME - should return -1 */
Dave Petersone7ecd892006-03-26 01:38:52 -0800804int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800805{
806 struct csrow_info *csrows = mci->csrows;
Mauro Carvalho Chehaba895bf82012-01-28 09:09:38 -0300807 int row, i, j, n;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800808
Dave Peterson537fba22006-03-26 01:38:40 -0800809 debugf1("MC%d: %s(): 0x%lx\n", mci->mc_idx, __func__, page);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800810 row = -1;
811
812 for (i = 0; i < mci->nr_csrows; i++) {
813 struct csrow_info *csrow = &csrows[i];
Mauro Carvalho Chehaba895bf82012-01-28 09:09:38 -0300814 n = 0;
815 for (j = 0; j < csrow->nr_channels; j++) {
816 struct dimm_info *dimm = csrow->channels[j].dimm;
817 n += dimm->nr_pages;
818 }
819 if (n == 0)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800820 continue;
821
Dave Peterson537fba22006-03-26 01:38:40 -0800822 debugf3("MC%d: %s(): first(0x%lx) page(0x%lx) last(0x%lx) "
823 "mask(0x%lx)\n", mci->mc_idx, __func__,
824 csrow->first_page, page, csrow->last_page,
825 csrow->page_mask);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800826
827 if ((page >= csrow->first_page) &&
828 (page <= csrow->last_page) &&
829 ((page & csrow->page_mask) ==
830 (csrow->first_page & csrow->page_mask))) {
831 row = i;
832 break;
833 }
834 }
835
836 if (row == -1)
Dave Peterson537fba22006-03-26 01:38:40 -0800837 edac_mc_printk(mci, KERN_ERR,
Douglas Thompson052dfb42007-07-19 01:50:13 -0700838 "could not look up page error address %lx\n",
839 (unsigned long)page);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800840
841 return row;
842}
Dave Peterson91105402006-03-26 01:38:55 -0800843EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800844
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300845const char *edac_layer_name[] = {
846 [EDAC_MC_LAYER_BRANCH] = "branch",
847 [EDAC_MC_LAYER_CHANNEL] = "channel",
848 [EDAC_MC_LAYER_SLOT] = "slot",
849 [EDAC_MC_LAYER_CHIP_SELECT] = "csrow",
850};
851EXPORT_SYMBOL_GPL(edac_layer_name);
852
853static void edac_inc_ce_error(struct mem_ctl_info *mci,
854 bool enable_per_layer_report,
855 const int pos[EDAC_MAX_LAYERS])
Alan Coxda9bb1d2006-01-18 17:44:13 -0800856{
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300857 int i, index = 0;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800858
Mauro Carvalho Chehab5926ff52012-02-09 11:05:20 -0300859 mci->ce_mc++;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300860
861 if (!enable_per_layer_report) {
862 mci->ce_noinfo_count++;
863 return;
864 }
865
866 for (i = 0; i < mci->n_layers; i++) {
867 if (pos[i] < 0)
868 break;
869 index += pos[i];
870 mci->ce_per_layer[i][index]++;
871
872 if (i < mci->n_layers - 1)
873 index *= mci->layers[i + 1].size;
874 }
875}
876
877static void edac_inc_ue_error(struct mem_ctl_info *mci,
878 bool enable_per_layer_report,
879 const int pos[EDAC_MAX_LAYERS])
880{
881 int i, index = 0;
882
Mauro Carvalho Chehab5926ff52012-02-09 11:05:20 -0300883 mci->ue_mc++;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300884
885 if (!enable_per_layer_report) {
886 mci->ce_noinfo_count++;
887 return;
888 }
889
890 for (i = 0; i < mci->n_layers; i++) {
891 if (pos[i] < 0)
892 break;
893 index += pos[i];
894 mci->ue_per_layer[i][index]++;
895
896 if (i < mci->n_layers - 1)
897 index *= mci->layers[i + 1].size;
898 }
899}
900
901static void edac_ce_error(struct mem_ctl_info *mci,
902 const int pos[EDAC_MAX_LAYERS],
903 const char *msg,
904 const char *location,
905 const char *label,
906 const char *detail,
907 const char *other_detail,
908 const bool enable_per_layer_report,
909 const unsigned long page_frame_number,
910 const unsigned long offset_in_page,
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -0300911 long grain)
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300912{
913 unsigned long remapped_page;
914
915 if (edac_mc_get_log_ce()) {
916 if (other_detail && *other_detail)
917 edac_mc_printk(mci, KERN_WARNING,
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -0300918 "CE %s on %s (%s %s - %s)\n",
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300919 msg, label, location,
920 detail, other_detail);
921 else
922 edac_mc_printk(mci, KERN_WARNING,
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -0300923 "CE %s on %s (%s %s)\n",
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300924 msg, label, location,
925 detail);
926 }
927 edac_inc_ce_error(mci, enable_per_layer_report, pos);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800928
929 if (mci->scrub_mode & SCRUB_SW_SRC) {
930 /*
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300931 * Some memory controllers (called MCs below) can remap
932 * memory so that it is still available at a different
933 * address when PCI devices map into memory.
934 * MC's that can't do this, lose the memory where PCI
935 * devices are mapped. This mapping is MC-dependent
936 * and so we call back into the MC driver for it to
937 * map the MC page to a physical (CPU) page which can
938 * then be mapped to a virtual page - which can then
939 * be scrubbed.
940 */
Alan Coxda9bb1d2006-01-18 17:44:13 -0800941 remapped_page = mci->ctl_page_to_phys ?
Douglas Thompson052dfb42007-07-19 01:50:13 -0700942 mci->ctl_page_to_phys(mci, page_frame_number) :
943 page_frame_number;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800944
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300945 edac_mc_scrub_block(remapped_page,
946 offset_in_page, grain);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800947 }
948}
949
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300950static void edac_ue_error(struct mem_ctl_info *mci,
951 const int pos[EDAC_MAX_LAYERS],
952 const char *msg,
953 const char *location,
954 const char *label,
955 const char *detail,
956 const char *other_detail,
957 const bool enable_per_layer_report)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800958{
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300959 if (edac_mc_get_log_ue()) {
960 if (other_detail && *other_detail)
961 edac_mc_printk(mci, KERN_WARNING,
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -0300962 "UE %s on %s (%s %s - %s)\n",
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300963 msg, label, location, detail,
964 other_detail);
965 else
966 edac_mc_printk(mci, KERN_WARNING,
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -0300967 "UE %s on %s (%s %s)\n",
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300968 msg, label, location, detail);
969 }
Dave Petersone7ecd892006-03-26 01:38:52 -0800970
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300971 if (edac_mc_get_panic_on_ue()) {
972 if (other_detail && *other_detail)
973 panic("UE %s on %s (%s%s - %s)\n",
974 msg, label, location, detail, other_detail);
975 else
976 panic("UE %s on %s (%s%s)\n",
977 msg, label, location, detail);
978 }
979
980 edac_inc_ue_error(mci, enable_per_layer_report, pos);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800981}
982
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300983#define OTHER_LABEL " or "
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -0300984
985/**
986 * edac_mc_handle_error - reports a memory event to userspace
987 *
988 * @type: severity of the error (CE/UE/Fatal)
989 * @mci: a struct mem_ctl_info pointer
990 * @page_frame_number: mem page where the error occurred
991 * @offset_in_page: offset of the error inside the page
992 * @syndrome: ECC syndrome
993 * @top_layer: Memory layer[0] position
994 * @mid_layer: Memory layer[1] position
995 * @low_layer: Memory layer[2] position
996 * @msg: Message meaningful to the end users that
997 * explains the event
998 * @other_detail: Technical details about the event that
999 * may help hardware manufacturers and
1000 * EDAC developers to analyse the event
1001 * @arch_log: Architecture-specific struct that can
1002 * be used to add extended information to the
1003 * tracepoint, like dumping MCE registers.
1004 */
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001005void edac_mc_handle_error(const enum hw_event_mc_err_type type,
1006 struct mem_ctl_info *mci,
1007 const unsigned long page_frame_number,
1008 const unsigned long offset_in_page,
1009 const unsigned long syndrome,
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001010 const int top_layer,
1011 const int mid_layer,
1012 const int low_layer,
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001013 const char *msg,
1014 const char *other_detail,
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001015 const void *arch_log)
Alan Coxda9bb1d2006-01-18 17:44:13 -08001016{
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001017 /* FIXME: too much for stack: move it to some pre-alocated area */
1018 char detail[80], location[80];
1019 char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * mci->tot_dimms];
1020 char *p;
1021 int row = -1, chan = -1;
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001022 int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer };
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001023 int i;
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001024 long grain;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001025 bool enable_per_layer_report = false;
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001026 u16 error_count; /* FIXME: make it a parameter */
1027 u8 grain_bits;
Alan Coxda9bb1d2006-01-18 17:44:13 -08001028
Dave Peterson537fba22006-03-26 01:38:40 -08001029 debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
Alan Coxda9bb1d2006-01-18 17:44:13 -08001030
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001031 /*
1032 * Check if the event report is consistent and if the memory
1033 * location is known. If it is known, enable_per_layer_report will be
1034 * true, the DIMM(s) label info will be filled and the per-layer
1035 * error counters will be incremented.
1036 */
1037 for (i = 0; i < mci->n_layers; i++) {
1038 if (pos[i] >= (int)mci->layers[i].size) {
1039 if (type == HW_EVENT_ERR_CORRECTED)
1040 p = "CE";
1041 else
1042 p = "UE";
1043
1044 edac_mc_printk(mci, KERN_ERR,
1045 "INTERNAL ERROR: %s value is out of range (%d >= %d)\n",
1046 edac_layer_name[mci->layers[i].type],
1047 pos[i], mci->layers[i].size);
1048 /*
1049 * Instead of just returning it, let's use what's
1050 * known about the error. The increment routines and
1051 * the DIMM filter logic will do the right thing by
1052 * pointing the likely damaged DIMMs.
1053 */
1054 pos[i] = -1;
1055 }
1056 if (pos[i] >= 0)
1057 enable_per_layer_report = true;
Alan Coxda9bb1d2006-01-18 17:44:13 -08001058 }
1059
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001060 /*
1061 * Get the dimm label/grain that applies to the match criteria.
1062 * As the error algorithm may not be able to point to just one memory
1063 * stick, the logic here will get all possible labels that could
1064 * pottentially be affected by the error.
1065 * On FB-DIMM memory controllers, for uncorrected errors, it is common
1066 * to have only the MC channel and the MC dimm (also called "branch")
1067 * but the channel is not known, as the memory is arranged in pairs,
1068 * where each memory belongs to a separate channel within the same
1069 * branch.
1070 */
1071 grain = 0;
1072 p = label;
1073 *p = '\0';
1074 for (i = 0; i < mci->tot_dimms; i++) {
1075 struct dimm_info *dimm = &mci->dimms[i];
Dave Petersone7ecd892006-03-26 01:38:52 -08001076
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001077 if (top_layer >= 0 && top_layer != dimm->location[0])
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001078 continue;
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001079 if (mid_layer >= 0 && mid_layer != dimm->location[1])
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001080 continue;
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001081 if (low_layer >= 0 && low_layer != dimm->location[2])
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001082 continue;
1083
1084 /* get the max grain, over the error match range */
1085 if (dimm->grain > grain)
1086 grain = dimm->grain;
1087
1088 /*
1089 * If the error is memory-controller wide, there's no need to
1090 * seek for the affected DIMMs because the whole
1091 * channel/memory controller/... may be affected.
1092 * Also, don't show errors for empty DIMM slots.
1093 */
1094 if (enable_per_layer_report && dimm->nr_pages) {
1095 if (p != label) {
1096 strcpy(p, OTHER_LABEL);
1097 p += strlen(OTHER_LABEL);
1098 }
1099 strcpy(p, dimm->label);
1100 p += strlen(p);
1101 *p = '\0';
1102
1103 /*
1104 * get csrow/channel of the DIMM, in order to allow
1105 * incrementing the compat API counters
1106 */
1107 debugf4("%s: %s csrows map: (%d,%d)\n",
1108 __func__,
1109 mci->mem_is_per_rank ? "rank" : "dimm",
1110 dimm->csrow, dimm->cschannel);
1111
1112 if (row == -1)
1113 row = dimm->csrow;
1114 else if (row >= 0 && row != dimm->csrow)
1115 row = -2;
1116
1117 if (chan == -1)
1118 chan = dimm->cschannel;
1119 else if (chan >= 0 && chan != dimm->cschannel)
1120 chan = -2;
1121 }
Alan Coxda9bb1d2006-01-18 17:44:13 -08001122 }
1123
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001124 if (!enable_per_layer_report) {
1125 strcpy(label, "any memory");
1126 } else {
1127 debugf4("%s: csrow/channel to increment: (%d,%d)\n",
1128 __func__, row, chan);
1129 if (p == label)
1130 strcpy(label, "unknown memory");
1131 if (type == HW_EVENT_ERR_CORRECTED) {
1132 if (row >= 0) {
1133 mci->csrows[row].ce_count++;
1134 if (chan >= 0)
1135 mci->csrows[row].channels[chan].ce_count++;
1136 }
1137 } else
1138 if (row >= 0)
1139 mci->csrows[row].ue_count++;
1140 }
Alan Coxda9bb1d2006-01-18 17:44:13 -08001141
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001142 /* Fill the RAM location data */
1143 p = location;
1144 for (i = 0; i < mci->n_layers; i++) {
1145 if (pos[i] < 0)
1146 continue;
1147
1148 p += sprintf(p, "%s:%d ",
1149 edac_layer_name[mci->layers[i].type],
1150 pos[i]);
1151 }
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001152 if (p > location)
1153 *(p - 1) = '\0';
1154
1155 /* Report the error via the trace interface */
1156
1157 error_count = 1; /* FIXME: allow change it */
1158 grain_bits = fls_long(grain) + 1;
1159 trace_mc_event(type, msg, label, error_count,
1160 mci->mc_idx, top_layer, mid_layer, low_layer,
1161 PAGES_TO_MiB(page_frame_number) | offset_in_page,
1162 grain_bits, syndrome, other_detail);
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001163
1164 /* Memory type dependent details about the error */
1165 if (type == HW_EVENT_ERR_CORRECTED) {
1166 snprintf(detail, sizeof(detail),
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001167 "page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx",
Douglas Thompson052dfb42007-07-19 01:50:13 -07001168 page_frame_number, offset_in_page,
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001169 grain, syndrome);
1170 edac_ce_error(mci, pos, msg, location, label, detail,
1171 other_detail, enable_per_layer_report,
1172 page_frame_number, offset_in_page, grain);
1173 } else {
1174 snprintf(detail, sizeof(detail),
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001175 "page:0x%lx offset:0x%lx grain:%ld",
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001176 page_frame_number, offset_in_page, grain);
Alan Coxda9bb1d2006-01-18 17:44:13 -08001177
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001178 edac_ue_error(mci, pos, msg, location, label, detail,
1179 other_detail, enable_per_layer_report);
1180 }
Alan Coxda9bb1d2006-01-18 17:44:13 -08001181}
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001182EXPORT_SYMBOL_GPL(edac_mc_handle_error);