blob: 90f0b730e9bb1ee4a23a2941ea60021a12798d1f [file] [log] [blame]
Alan Coxda9bb1d2006-01-18 17:44:13 -08001/*
2 * edac_mc kernel module
Doug Thompson49c0dab72006-07-10 04:45:19 -07003 * (C) 2005, 2006 Linux Networx (http://lnxi.com)
Alan Coxda9bb1d2006-01-18 17:44:13 -08004 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * Written by Thayne Harbaugh
8 * Based on work by Dan Hollis <goemon at anime dot net> and others.
9 * http://www.anime.net/~goemon/linux-ecc/
10 *
11 * Modified by Dave Peterson and Doug Thompson
12 *
13 */
14
Alan Coxda9bb1d2006-01-18 17:44:13 -080015#include <linux/module.h>
16#include <linux/proc_fs.h>
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/smp.h>
20#include <linux/init.h>
21#include <linux/sysctl.h>
22#include <linux/highmem.h>
23#include <linux/timer.h>
24#include <linux/slab.h>
25#include <linux/jiffies.h>
26#include <linux/spinlock.h>
27#include <linux/list.h>
Alan Coxda9bb1d2006-01-18 17:44:13 -080028#include <linux/ctype.h>
Dave Jiangc0d12172007-07-19 01:49:46 -070029#include <linux/edac.h>
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -030030#include <linux/bitops.h>
Alan Coxda9bb1d2006-01-18 17:44:13 -080031#include <asm/uaccess.h>
32#include <asm/page.h>
33#include <asm/edac.h>
Douglas Thompson20bcb7a2007-07-19 01:49:47 -070034#include "edac_core.h"
Douglas Thompson7c9281d2007-07-19 01:49:33 -070035#include "edac_module.h"
Alan Coxda9bb1d2006-01-18 17:44:13 -080036
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -030037#define CREATE_TRACE_POINTS
38#define TRACE_INCLUDE_PATH ../../include/ras
39#include <ras/ras_event.h>
40
Alan Coxda9bb1d2006-01-18 17:44:13 -080041/* lock to memory controller's control array */
Matthias Kaehlcke63b7df92007-07-19 01:49:38 -070042static DEFINE_MUTEX(mem_ctls_mutex);
Robert P. J. Dayff6ac2a2008-04-29 01:03:17 -070043static LIST_HEAD(mc_devices);
Alan Coxda9bb1d2006-01-18 17:44:13 -080044
Mauro Carvalho Chehab6e84d352012-04-30 10:24:43 -030045unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
46 unsigned len)
47{
48 struct mem_ctl_info *mci = dimm->mci;
49 int i, n, count = 0;
50 char *p = buf;
51
52 for (i = 0; i < mci->n_layers; i++) {
53 n = snprintf(p, len, "%s %d ",
54 edac_layer_name[mci->layers[i].type],
55 dimm->location[i]);
56 p += n;
57 len -= n;
58 count += n;
59 if (!len)
60 break;
61 }
62
63 return count;
64}
65
Alan Coxda9bb1d2006-01-18 17:44:13 -080066#ifdef CONFIG_EDAC_DEBUG
67
Mauro Carvalho Chehaba4b4be32012-01-27 10:26:13 -030068static void edac_mc_dump_channel(struct rank_info *chan)
Alan Coxda9bb1d2006-01-18 17:44:13 -080069{
Mauro Carvalho Chehab6e84d352012-04-30 10:24:43 -030070 edac_dbg(4, " channel->chan_idx = %d\n", chan->chan_idx);
71 edac_dbg(4, " channel = %p\n", chan);
72 edac_dbg(4, " channel->csrow = %p\n", chan->csrow);
73 edac_dbg(4, " channel->dimm = %p\n", chan->dimm);
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -030074}
75
Mauro Carvalho Chehab6e84d352012-04-30 10:24:43 -030076static void edac_mc_dump_dimm(struct dimm_info *dimm, int number)
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -030077{
Mauro Carvalho Chehab6e84d352012-04-30 10:24:43 -030078 char location[80];
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -030079
Mauro Carvalho Chehab6e84d352012-04-30 10:24:43 -030080 edac_dimm_info_location(dimm, location, sizeof(location));
81
82 edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n",
83 dimm->mci->mem_is_per_rank ? "rank" : "dimm",
84 number, location, dimm->csrow, dimm->cschannel);
85 edac_dbg(4, " dimm = %p\n", dimm);
86 edac_dbg(4, " dimm->label = '%s'\n", dimm->label);
87 edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
88 edac_dbg(4, " dimm->grain = %d\n", dimm->grain);
89 edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
Alan Coxda9bb1d2006-01-18 17:44:13 -080090}
91
Adrian Bunk2da1c112007-07-19 01:49:32 -070092static void edac_mc_dump_csrow(struct csrow_info *csrow)
Alan Coxda9bb1d2006-01-18 17:44:13 -080093{
Mauro Carvalho Chehab6e84d352012-04-30 10:24:43 -030094 edac_dbg(4, "csrow->csrow_idx = %d\n", csrow->csrow_idx);
95 edac_dbg(4, " csrow = %p\n", csrow);
96 edac_dbg(4, " csrow->first_page = 0x%lx\n", csrow->first_page);
97 edac_dbg(4, " csrow->last_page = 0x%lx\n", csrow->last_page);
98 edac_dbg(4, " csrow->page_mask = 0x%lx\n", csrow->page_mask);
99 edac_dbg(4, " csrow->nr_channels = %d\n", csrow->nr_channels);
100 edac_dbg(4, " csrow->channels = %p\n", csrow->channels);
101 edac_dbg(4, " csrow->mci = %p\n", csrow->mci);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800102}
103
Adrian Bunk2da1c112007-07-19 01:49:32 -0700104static void edac_mc_dump_mci(struct mem_ctl_info *mci)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800105{
Joe Perches956b9ba2012-04-29 17:08:39 -0300106 edac_dbg(3, "\tmci = %p\n", mci);
107 edac_dbg(3, "\tmci->mtype_cap = %lx\n", mci->mtype_cap);
108 edac_dbg(3, "\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
109 edac_dbg(3, "\tmci->edac_cap = %lx\n", mci->edac_cap);
110 edac_dbg(4, "\tmci->edac_check = %p\n", mci->edac_check);
111 edac_dbg(3, "\tmci->nr_csrows = %d, csrows = %p\n",
112 mci->nr_csrows, mci->csrows);
113 edac_dbg(3, "\tmci->nr_dimms = %d, dimms = %p\n",
114 mci->tot_dimms, mci->dimms);
115 edac_dbg(3, "\tdev = %p\n", mci->pdev);
116 edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n",
117 mci->mod_name, mci->ctl_name);
118 edac_dbg(3, "\tpvt_info = %p\n\n", mci->pvt_info);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800119}
120
Borislav Petkov24f9a7f2010-10-07 18:29:15 +0200121#endif /* CONFIG_EDAC_DEBUG */
122
Borislav Petkov239642f2009-11-12 15:33:16 +0100123/*
124 * keep those in sync with the enum mem_type
125 */
126const char *edac_mem_types[] = {
127 "Empty csrow",
128 "Reserved csrow type",
129 "Unknown csrow type",
130 "Fast page mode RAM",
131 "Extended data out RAM",
132 "Burst Extended data out RAM",
133 "Single data rate SDRAM",
134 "Registered single data rate SDRAM",
135 "Double data rate SDRAM",
136 "Registered Double data rate SDRAM",
137 "Rambus DRAM",
138 "Unbuffered DDR2 RAM",
139 "Fully buffered DDR2",
140 "Registered DDR2 RAM",
141 "Rambus XDR",
142 "Unbuffered DDR3 RAM",
143 "Registered DDR3 RAM",
144};
145EXPORT_SYMBOL_GPL(edac_mem_types);
146
Mauro Carvalho Chehab93e4fe62012-04-16 10:18:12 -0300147/**
148 * edac_align_ptr - Prepares the pointer offsets for a single-shot allocation
149 * @p: pointer to a pointer with the memory offset to be used. At
150 * return, this will be incremented to point to the next offset
151 * @size: Size of the data structure to be reserved
152 * @n_elems: Number of elements that should be reserved
Alan Coxda9bb1d2006-01-18 17:44:13 -0800153 *
154 * If 'size' is a constant, the compiler will optimize this whole function
Mauro Carvalho Chehab93e4fe62012-04-16 10:18:12 -0300155 * down to either a no-op or the addition of a constant to the value of '*p'.
156 *
157 * The 'p' pointer is absolutely needed to keep the proper advancing
158 * further in memory to the proper offsets when allocating the struct along
159 * with its embedded structs, as edac_device_alloc_ctl_info() does it
160 * above, for example.
161 *
162 * At return, the pointer 'p' will be incremented to be used on a next call
163 * to this function.
Alan Coxda9bb1d2006-01-18 17:44:13 -0800164 */
Mauro Carvalho Chehab93e4fe62012-04-16 10:18:12 -0300165void *edac_align_ptr(void **p, unsigned size, int n_elems)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800166{
167 unsigned align, r;
Mauro Carvalho Chehab93e4fe62012-04-16 10:18:12 -0300168 void *ptr = *p;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800169
Mauro Carvalho Chehab93e4fe62012-04-16 10:18:12 -0300170 *p += size * n_elems;
171
172 /*
173 * 'p' can possibly be an unaligned item X such that sizeof(X) is
174 * 'size'. Adjust 'p' so that its alignment is at least as
175 * stringent as what the compiler would provide for X and return
176 * the aligned result.
177 * Here we assume that the alignment of a "long long" is the most
Alan Coxda9bb1d2006-01-18 17:44:13 -0800178 * stringent alignment that the compiler will ever provide by default.
179 * As far as I know, this is a reasonable assumption.
180 */
181 if (size > sizeof(long))
182 align = sizeof(long long);
183 else if (size > sizeof(int))
184 align = sizeof(long);
185 else if (size > sizeof(short))
186 align = sizeof(int);
187 else if (size > sizeof(char))
188 align = sizeof(short);
189 else
Douglas Thompson079708b2007-07-19 01:49:58 -0700190 return (char *)ptr;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800191
Chris Metcalf8447c4d12012-06-06 13:11:05 -0400192 r = (unsigned long)p % align;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800193
194 if (r == 0)
Douglas Thompson079708b2007-07-19 01:49:58 -0700195 return (char *)ptr;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800196
Mauro Carvalho Chehab93e4fe62012-04-16 10:18:12 -0300197 *p += align - r;
198
Douglas Thompson7391c6d2007-07-19 01:50:21 -0700199 return (void *)(((unsigned long)ptr) + align - r);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800200}
201
Shaun Ruffellfaa2ad02012-09-22 20:26:38 -0500202static void _edac_mc_free(struct mem_ctl_info *mci)
203{
204 int i, chn, row;
205 struct csrow_info *csr;
206 const unsigned int tot_dimms = mci->tot_dimms;
207 const unsigned int tot_channels = mci->num_cschannel;
208 const unsigned int tot_csrows = mci->nr_csrows;
209
210 if (mci->dimms) {
211 for (i = 0; i < tot_dimms; i++)
212 kfree(mci->dimms[i]);
213 kfree(mci->dimms);
214 }
215 if (mci->csrows) {
216 for (row = 0; row < tot_csrows; row++) {
217 csr = mci->csrows[row];
218 if (csr) {
219 if (csr->channels) {
220 for (chn = 0; chn < tot_channels; chn++)
221 kfree(csr->channels[chn]);
222 kfree(csr->channels);
223 }
224 kfree(csr);
225 }
226 }
227 kfree(mci->csrows);
228 }
229 kfree(mci);
230}
231
Alan Coxda9bb1d2006-01-18 17:44:13 -0800232/**
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300233 * edac_mc_alloc: Allocate and partially fill a struct mem_ctl_info structure
234 * @mc_num: Memory controller number
235 * @n_layers: Number of MC hierarchy layers
236 * layers: Describes each layer as seen by the Memory Controller
237 * @size_pvt: size of private storage needed
238 *
Alan Coxda9bb1d2006-01-18 17:44:13 -0800239 *
240 * Everything is kmalloc'ed as one big chunk - more efficient.
241 * Only can be used if all structures have the same lifetime - otherwise
242 * you have to allocate and initialize your own structures.
243 *
244 * Use edac_mc_free() to free mc structures allocated by this function.
245 *
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300246 * NOTE: drivers handle multi-rank memories in different ways: in some
247 * drivers, one multi-rank memory stick is mapped as one entry, while, in
248 * others, a single multi-rank memory stick would be mapped into several
249 * entries. Currently, this function will allocate multiple struct dimm_info
250 * on such scenarios, as grouping the multiple ranks require drivers change.
251 *
Alan Coxda9bb1d2006-01-18 17:44:13 -0800252 * Returns:
Mauro Carvalho Chehabca0907b2012-05-02 14:37:00 -0300253 * On failure: NULL
254 * On success: struct mem_ctl_info pointer
Alan Coxda9bb1d2006-01-18 17:44:13 -0800255 */
Mauro Carvalho Chehabca0907b2012-05-02 14:37:00 -0300256struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
257 unsigned n_layers,
258 struct edac_mc_layer *layers,
259 unsigned sz_pvt)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800260{
261 struct mem_ctl_info *mci;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300262 struct edac_mc_layer *layer;
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300263 struct csrow_info *csr;
264 struct rank_info *chan;
Mauro Carvalho Chehaba7d7d2e2012-01-27 14:12:32 -0300265 struct dimm_info *dimm;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300266 u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
267 unsigned pos[EDAC_MAX_LAYERS];
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300268 unsigned size, tot_dimms = 1, count = 1;
269 unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
Mauro Carvalho Chehab5926ff52012-02-09 11:05:20 -0300270 void *pvt, *p, *ptr = NULL;
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300271 int i, j, row, chn, n, len, off;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300272 bool per_rank = false;
273
274 BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0);
275 /*
276 * Calculate the total amount of dimms and csrows/cschannels while
277 * in the old API emulation mode
278 */
279 for (i = 0; i < n_layers; i++) {
280 tot_dimms *= layers[i].size;
281 if (layers[i].is_virt_csrow)
282 tot_csrows *= layers[i].size;
283 else
284 tot_channels *= layers[i].size;
285
286 if (layers[i].type == EDAC_MC_LAYER_CHIP_SELECT)
287 per_rank = true;
288 }
Alan Coxda9bb1d2006-01-18 17:44:13 -0800289
290 /* Figure out the offsets of the various items from the start of an mc
291 * structure. We want the alignment of each item to be at least as
292 * stringent as what the compiler would provide if we could simply
293 * hardcode everything into a single struct.
294 */
Mauro Carvalho Chehab93e4fe62012-04-16 10:18:12 -0300295 mci = edac_align_ptr(&ptr, sizeof(*mci), 1);
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300296 layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers);
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300297 for (i = 0; i < n_layers; i++) {
298 count *= layers[i].size;
Joe Perches956b9ba2012-04-29 17:08:39 -0300299 edac_dbg(4, "errcount layer %d size %d\n", i, count);
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300300 ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
301 ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
302 tot_errcount += 2 * count;
303 }
304
Joe Perches956b9ba2012-04-29 17:08:39 -0300305 edac_dbg(4, "allocating %d error counters\n", tot_errcount);
Mauro Carvalho Chehab93e4fe62012-04-16 10:18:12 -0300306 pvt = edac_align_ptr(&ptr, sz_pvt, 1);
Douglas Thompson079708b2007-07-19 01:49:58 -0700307 size = ((unsigned long)pvt) + sz_pvt;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800308
Joe Perches956b9ba2012-04-29 17:08:39 -0300309 edac_dbg(1, "allocating %u bytes for mci data (%d %s, %d csrows/channels)\n",
310 size,
311 tot_dimms,
312 per_rank ? "ranks" : "dimms",
313 tot_csrows * tot_channels);
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300314
Doug Thompson8096cfa2007-07-19 01:50:27 -0700315 mci = kzalloc(size, GFP_KERNEL);
316 if (mci == NULL)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800317 return NULL;
318
319 /* Adjust pointers so they point within the memory we just allocated
320 * rather than an imaginary chunk of memory located at address 0.
321 */
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300322 layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer));
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300323 for (i = 0; i < n_layers; i++) {
324 mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i]));
325 mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i]));
326 }
Douglas Thompson079708b2007-07-19 01:49:58 -0700327 pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800328
Doug Thompsonb8f6f972007-07-19 01:50:26 -0700329 /* setup index and various internal pointers */
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300330 mci->mc_idx = mc_num;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300331 mci->tot_dimms = tot_dimms;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800332 mci->pvt_info = pvt;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300333 mci->n_layers = n_layers;
334 mci->layers = layer;
335 memcpy(mci->layers, layers, sizeof(*layer) * n_layers);
336 mci->nr_csrows = tot_csrows;
337 mci->num_cschannel = tot_channels;
338 mci->mem_is_per_rank = per_rank;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800339
Mauro Carvalho Chehaba7d7d2e2012-01-27 14:12:32 -0300340 /*
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300341 * Alocate and fill the csrow/channels structs
Mauro Carvalho Chehaba7d7d2e2012-01-27 14:12:32 -0300342 */
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300343 mci->csrows = kcalloc(sizeof(*mci->csrows), tot_csrows, GFP_KERNEL);
344 if (!mci->csrows)
345 goto error;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300346 for (row = 0; row < tot_csrows; row++) {
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300347 csr = kzalloc(sizeof(**mci->csrows), GFP_KERNEL);
348 if (!csr)
349 goto error;
350 mci->csrows[row] = csr;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300351 csr->csrow_idx = row;
352 csr->mci = mci;
353 csr->nr_channels = tot_channels;
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300354 csr->channels = kcalloc(sizeof(*csr->channels), tot_channels,
355 GFP_KERNEL);
356 if (!csr->channels)
357 goto error;
Mauro Carvalho Chehaba7d7d2e2012-01-27 14:12:32 -0300358
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300359 for (chn = 0; chn < tot_channels; chn++) {
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300360 chan = kzalloc(sizeof(**csr->channels), GFP_KERNEL);
361 if (!chan)
362 goto error;
363 csr->channels[chn] = chan;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800364 chan->chan_idx = chn;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300365 chan->csrow = csr;
366 }
367 }
Mauro Carvalho Chehaba7d7d2e2012-01-27 14:12:32 -0300368
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300369 /*
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300370 * Allocate and fill the dimm structs
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300371 */
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300372 mci->dimms = kcalloc(sizeof(*mci->dimms), tot_dimms, GFP_KERNEL);
373 if (!mci->dimms)
374 goto error;
375
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300376 memset(&pos, 0, sizeof(pos));
377 row = 0;
378 chn = 0;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300379 for (i = 0; i < tot_dimms; i++) {
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300380 chan = mci->csrows[row]->channels[chn];
381 off = EDAC_DIMM_OFF(layer, n_layers, pos[0], pos[1], pos[2]);
382 if (off < 0 || off >= tot_dimms) {
383 edac_mc_printk(mci, KERN_ERR, "EDAC core bug: EDAC_DIMM_OFF is trying to do an illegal data access\n");
384 goto error;
385 }
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300386
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300387 dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL);
Dan Carpenter08a4a132012-05-18 15:51:02 +0300388 if (!dimm)
389 goto error;
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300390 mci->dimms[off] = dimm;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300391 dimm->mci = mci;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300392
Mauro Carvalho Chehab5926ff52012-02-09 11:05:20 -0300393 /*
394 * Copy DIMM location and initialize it.
395 */
396 len = sizeof(dimm->label);
397 p = dimm->label;
398 n = snprintf(p, len, "mc#%u", mc_num);
399 p += n;
400 len -= n;
401 for (j = 0; j < n_layers; j++) {
402 n = snprintf(p, len, "%s#%u",
403 edac_layer_name[layers[j].type],
404 pos[j]);
405 p += n;
406 len -= n;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300407 dimm->location[j] = pos[j];
408
Mauro Carvalho Chehab5926ff52012-02-09 11:05:20 -0300409 if (len <= 0)
410 break;
411 }
412
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300413 /* Link it to the csrows old API data */
414 chan->dimm = dimm;
415 dimm->csrow = row;
416 dimm->cschannel = chn;
417
418 /* Increment csrow location */
419 row++;
420 if (row == tot_csrows) {
421 row = 0;
422 chn++;
423 }
424
425 /* Increment dimm location */
426 for (j = n_layers - 1; j >= 0; j--) {
427 pos[j]++;
428 if (pos[j] < layers[j].size)
429 break;
430 pos[j] = 0;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800431 }
432 }
433
Dave Jiang81d87cb2007-07-19 01:49:52 -0700434 mci->op_state = OP_ALLOC;
Doug Thompson8096cfa2007-07-19 01:50:27 -0700435
436 /* at this point, the root kobj is valid, and in order to
437 * 'free' the object, then the function:
438 * edac_mc_unregister_sysfs_main_kobj() must be called
439 * which will perform kobj unregistration and the actual free
440 * will occur during the kobject callback operation
441 */
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -0300442
Alan Coxda9bb1d2006-01-18 17:44:13 -0800443 return mci;
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300444
445error:
Shaun Ruffellfaa2ad02012-09-22 20:26:38 -0500446 _edac_mc_free(mci);
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300447
448 return NULL;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800449}
Dave Peterson91105402006-03-26 01:38:55 -0800450EXPORT_SYMBOL_GPL(edac_mc_alloc);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800451
Alan Coxda9bb1d2006-01-18 17:44:13 -0800452/**
Doug Thompson8096cfa2007-07-19 01:50:27 -0700453 * edac_mc_free
454 * 'Free' a previously allocated 'mci' structure
Alan Coxda9bb1d2006-01-18 17:44:13 -0800455 * @mci: pointer to a struct mem_ctl_info structure
Alan Coxda9bb1d2006-01-18 17:44:13 -0800456 */
457void edac_mc_free(struct mem_ctl_info *mci)
458{
Joe Perches956b9ba2012-04-29 17:08:39 -0300459 edac_dbg(1, "\n");
Mauro Carvalho Chehabbbc560a2010-08-16 18:22:43 -0300460
Shaun Ruffellfaa2ad02012-09-22 20:26:38 -0500461 /* If we're not yet registered with sysfs free only what was allocated
462 * in edac_mc_alloc().
463 */
464 if (!device_is_registered(&mci->dev)) {
465 _edac_mc_free(mci);
466 return;
467 }
468
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300469 /* the mci instance is freed here, when the sysfs object is dropped */
Mauro Carvalho Chehab7a623c02012-04-16 16:41:11 -0300470 edac_unregister_sysfs(mci);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800471}
Dave Peterson91105402006-03-26 01:38:55 -0800472EXPORT_SYMBOL_GPL(edac_mc_free);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800473
Doug Thompsonbce19682007-07-26 10:41:14 -0700474
Mauro Carvalho Chehab939747bd2010-08-10 11:22:01 -0300475/**
Doug Thompsonbce19682007-07-26 10:41:14 -0700476 * find_mci_by_dev
477 *
478 * scan list of controllers looking for the one that manages
479 * the 'dev' device
Mauro Carvalho Chehab939747bd2010-08-10 11:22:01 -0300480 * @dev: pointer to a struct device related with the MCI
Doug Thompsonbce19682007-07-26 10:41:14 -0700481 */
Mauro Carvalho Chehab939747bd2010-08-10 11:22:01 -0300482struct mem_ctl_info *find_mci_by_dev(struct device *dev)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800483{
484 struct mem_ctl_info *mci;
485 struct list_head *item;
486
Joe Perches956b9ba2012-04-29 17:08:39 -0300487 edac_dbg(3, "\n");
Alan Coxda9bb1d2006-01-18 17:44:13 -0800488
489 list_for_each(item, &mc_devices) {
490 mci = list_entry(item, struct mem_ctl_info, link);
491
Mauro Carvalho Chehabfd687502012-03-16 07:44:18 -0300492 if (mci->pdev == dev)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800493 return mci;
494 }
495
496 return NULL;
497}
Mauro Carvalho Chehab939747bd2010-08-10 11:22:01 -0300498EXPORT_SYMBOL_GPL(find_mci_by_dev);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800499
Dave Jiang81d87cb2007-07-19 01:49:52 -0700500/*
501 * handler for EDAC to check if NMI type handler has asserted interrupt
502 */
503static int edac_mc_assert_error_check_and_clear(void)
504{
Dave Jiang66ee2f92007-07-19 01:49:54 -0700505 int old_state;
Dave Jiang81d87cb2007-07-19 01:49:52 -0700506
Douglas Thompson079708b2007-07-19 01:49:58 -0700507 if (edac_op_state == EDAC_OPSTATE_POLL)
Dave Jiang81d87cb2007-07-19 01:49:52 -0700508 return 1;
509
Dave Jiang66ee2f92007-07-19 01:49:54 -0700510 old_state = edac_err_assert;
511 edac_err_assert = 0;
Dave Jiang81d87cb2007-07-19 01:49:52 -0700512
Dave Jiang66ee2f92007-07-19 01:49:54 -0700513 return old_state;
Dave Jiang81d87cb2007-07-19 01:49:52 -0700514}
515
516/*
517 * edac_mc_workq_function
518 * performs the operation scheduled by a workq request
519 */
Dave Jiang81d87cb2007-07-19 01:49:52 -0700520static void edac_mc_workq_function(struct work_struct *work_req)
521{
Jean Delvarefbeb4382009-04-13 14:40:21 -0700522 struct delayed_work *d_work = to_delayed_work(work_req);
Dave Jiang81d87cb2007-07-19 01:49:52 -0700523 struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work);
Dave Jiang81d87cb2007-07-19 01:49:52 -0700524
525 mutex_lock(&mem_ctls_mutex);
526
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700527 /* if this control struct has movd to offline state, we are done */
528 if (mci->op_state == OP_OFFLINE) {
529 mutex_unlock(&mem_ctls_mutex);
530 return;
531 }
532
Dave Jiang81d87cb2007-07-19 01:49:52 -0700533 /* Only poll controllers that are running polled and have a check */
534 if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL))
535 mci->edac_check(mci);
536
Dave Jiang81d87cb2007-07-19 01:49:52 -0700537 mutex_unlock(&mem_ctls_mutex);
538
539 /* Reschedule */
Dave Jiang4de78c62007-07-19 01:49:54 -0700540 queue_delayed_work(edac_workqueue, &mci->work,
Douglas Thompson052dfb42007-07-19 01:50:13 -0700541 msecs_to_jiffies(edac_mc_get_poll_msec()));
Dave Jiang81d87cb2007-07-19 01:49:52 -0700542}
543
544/*
545 * edac_mc_workq_setup
546 * initialize a workq item for this mci
547 * passing in the new delay period in msec
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700548 *
549 * locking model:
550 *
551 * called with the mem_ctls_mutex held
Dave Jiang81d87cb2007-07-19 01:49:52 -0700552 */
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700553static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
Dave Jiang81d87cb2007-07-19 01:49:52 -0700554{
Joe Perches956b9ba2012-04-29 17:08:39 -0300555 edac_dbg(0, "\n");
Dave Jiang81d87cb2007-07-19 01:49:52 -0700556
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700557 /* if this instance is not in the POLL state, then simply return */
558 if (mci->op_state != OP_RUNNING_POLL)
559 return;
560
Dave Jiang81d87cb2007-07-19 01:49:52 -0700561 INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
Tejun Heo41f63c52012-08-03 10:30:47 -0700562 mod_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
Dave Jiang81d87cb2007-07-19 01:49:52 -0700563}
564
565/*
566 * edac_mc_workq_teardown
567 * stop the workq processing on this mci
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700568 *
569 * locking model:
570 *
571 * called WITHOUT lock held
Dave Jiang81d87cb2007-07-19 01:49:52 -0700572 */
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700573static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
Dave Jiang81d87cb2007-07-19 01:49:52 -0700574{
575 int status;
576
Borislav Petkov00740c52010-09-26 12:42:23 +0200577 if (mci->op_state != OP_RUNNING_POLL)
578 return;
579
Doug Thompsonbce19682007-07-26 10:41:14 -0700580 status = cancel_delayed_work(&mci->work);
581 if (status == 0) {
Joe Perches956b9ba2012-04-29 17:08:39 -0300582 edac_dbg(0, "not canceled, flush the queue\n");
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700583
Doug Thompsonbce19682007-07-26 10:41:14 -0700584 /* workq instance might be running, wait for it */
585 flush_workqueue(edac_workqueue);
Dave Jiang81d87cb2007-07-19 01:49:52 -0700586 }
587}
588
589/*
Doug Thompsonbce19682007-07-26 10:41:14 -0700590 * edac_mc_reset_delay_period(unsigned long value)
591 *
592 * user space has updated our poll period value, need to
593 * reset our workq delays
Dave Jiang81d87cb2007-07-19 01:49:52 -0700594 */
Doug Thompsonbce19682007-07-26 10:41:14 -0700595void edac_mc_reset_delay_period(int value)
Dave Jiang81d87cb2007-07-19 01:49:52 -0700596{
Doug Thompsonbce19682007-07-26 10:41:14 -0700597 struct mem_ctl_info *mci;
598 struct list_head *item;
Dave Jiang81d87cb2007-07-19 01:49:52 -0700599
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700600 mutex_lock(&mem_ctls_mutex);
601
Doug Thompsonbce19682007-07-26 10:41:14 -0700602 list_for_each(item, &mc_devices) {
603 mci = list_entry(item, struct mem_ctl_info, link);
604
605 edac_mc_workq_setup(mci, (unsigned long) value);
606 }
Dave Jiang81d87cb2007-07-19 01:49:52 -0700607
608 mutex_unlock(&mem_ctls_mutex);
609}
610
Doug Thompsonbce19682007-07-26 10:41:14 -0700611
612
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700613/* Return 0 on success, 1 on failure.
614 * Before calling this function, caller must
615 * assign a unique value to mci->mc_idx.
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700616 *
617 * locking model:
618 *
619 * called with the mem_ctls_mutex lock held
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700620 */
Douglas Thompson079708b2007-07-19 01:49:58 -0700621static int add_mc_to_global_list(struct mem_ctl_info *mci)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800622{
623 struct list_head *item, *insert_before;
624 struct mem_ctl_info *p;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800625
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700626 insert_before = &mc_devices;
627
Mauro Carvalho Chehabfd687502012-03-16 07:44:18 -0300628 p = find_mci_by_dev(mci->pdev);
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700629 if (unlikely(p != NULL))
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700630 goto fail0;
631
632 list_for_each(item, &mc_devices) {
633 p = list_entry(item, struct mem_ctl_info, link);
634
635 if (p->mc_idx >= mci->mc_idx) {
636 if (unlikely(p->mc_idx == mci->mc_idx))
637 goto fail1;
638
639 insert_before = item;
640 break;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800641 }
Alan Coxda9bb1d2006-01-18 17:44:13 -0800642 }
643
644 list_add_tail_rcu(&mci->link, insert_before);
Dave Jiangc0d12172007-07-19 01:49:46 -0700645 atomic_inc(&edac_handlers);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800646 return 0;
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700647
Douglas Thompson052dfb42007-07-19 01:50:13 -0700648fail0:
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700649 edac_printk(KERN_WARNING, EDAC_MC,
Mauro Carvalho Chehabfd687502012-03-16 07:44:18 -0300650 "%s (%s) %s %s already assigned %d\n", dev_name(p->pdev),
Stephen Rothwell17aa7e02008-05-05 13:54:19 +1000651 edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700652 return 1;
653
Douglas Thompson052dfb42007-07-19 01:50:13 -0700654fail1:
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700655 edac_printk(KERN_WARNING, EDAC_MC,
Douglas Thompson052dfb42007-07-19 01:50:13 -0700656 "bug in low-level driver: attempt to assign\n"
657 " duplicate mc_idx %d in %s()\n", p->mc_idx, __func__);
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700658 return 1;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800659}
660
Dave Petersone7ecd892006-03-26 01:38:52 -0800661static void del_mc_from_global_list(struct mem_ctl_info *mci)
Dave Petersona1d03fc2006-03-26 01:38:46 -0800662{
Dave Jiangc0d12172007-07-19 01:49:46 -0700663 atomic_dec(&edac_handlers);
Dave Petersona1d03fc2006-03-26 01:38:46 -0800664 list_del_rcu(&mci->link);
Lai Jiangshane2e77092011-05-26 16:25:58 -0700665
666 /* these are for safe removal of devices from global list while
667 * NMI handlers may be traversing list
668 */
669 synchronize_rcu();
670 INIT_LIST_HEAD(&mci->link);
Dave Petersona1d03fc2006-03-26 01:38:46 -0800671}
672
Alan Coxda9bb1d2006-01-18 17:44:13 -0800673/**
Douglas Thompson5da08312007-07-19 01:49:31 -0700674 * edac_mc_find: Search for a mem_ctl_info structure whose index is 'idx'.
675 *
676 * If found, return a pointer to the structure.
677 * Else return NULL.
678 *
679 * Caller must hold mem_ctls_mutex.
680 */
Douglas Thompson079708b2007-07-19 01:49:58 -0700681struct mem_ctl_info *edac_mc_find(int idx)
Douglas Thompson5da08312007-07-19 01:49:31 -0700682{
683 struct list_head *item;
684 struct mem_ctl_info *mci;
685
686 list_for_each(item, &mc_devices) {
687 mci = list_entry(item, struct mem_ctl_info, link);
688
689 if (mci->mc_idx >= idx) {
690 if (mci->mc_idx == idx)
691 return mci;
692
693 break;
694 }
695 }
696
697 return NULL;
698}
699EXPORT_SYMBOL(edac_mc_find);
700
701/**
Dave Peterson472678e2006-03-26 01:38:49 -0800702 * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and
703 * create sysfs entries associated with mci structure
Alan Coxda9bb1d2006-01-18 17:44:13 -0800704 * @mci: pointer to the mci structure to be added to the list
705 *
706 * Return:
707 * 0 Success
708 * !0 Failure
709 */
710
711/* FIXME - should a warning be printed if no error detection? correction? */
Doug Thompsonb8f6f972007-07-19 01:50:26 -0700712int edac_mc_add_mc(struct mem_ctl_info *mci)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800713{
Joe Perches956b9ba2012-04-29 17:08:39 -0300714 edac_dbg(0, "\n");
Doug Thompsonb8f6f972007-07-19 01:50:26 -0700715
Alan Coxda9bb1d2006-01-18 17:44:13 -0800716#ifdef CONFIG_EDAC_DEBUG
717 if (edac_debug_level >= 3)
718 edac_mc_dump_mci(mci);
Dave Petersone7ecd892006-03-26 01:38:52 -0800719
Alan Coxda9bb1d2006-01-18 17:44:13 -0800720 if (edac_debug_level >= 4) {
721 int i;
722
723 for (i = 0; i < mci->nr_csrows; i++) {
Mauro Carvalho Chehab6e84d352012-04-30 10:24:43 -0300724 struct csrow_info *csrow = mci->csrows[i];
725 u32 nr_pages = 0;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800726 int j;
Dave Petersone7ecd892006-03-26 01:38:52 -0800727
Mauro Carvalho Chehab6e84d352012-04-30 10:24:43 -0300728 for (j = 0; j < csrow->nr_channels; j++)
729 nr_pages += csrow->channels[j]->dimm->nr_pages;
730 if (!nr_pages)
731 continue;
732 edac_mc_dump_csrow(csrow);
733 for (j = 0; j < csrow->nr_channels; j++)
734 if (csrow->channels[j]->dimm->nr_pages)
735 edac_mc_dump_channel(csrow->channels[j]);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800736 }
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300737 for (i = 0; i < mci->tot_dimms; i++)
Mauro Carvalho Chehab6e84d352012-04-30 10:24:43 -0300738 if (mci->dimms[i]->nr_pages)
739 edac_mc_dump_dimm(mci->dimms[i], i);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800740 }
741#endif
Matthias Kaehlcke63b7df92007-07-19 01:49:38 -0700742 mutex_lock(&mem_ctls_mutex);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800743
744 if (add_mc_to_global_list(mci))
Dave Peterson028a7b62006-03-26 01:38:47 -0800745 goto fail0;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800746
747 /* set load time so that error rate can be tracked */
748 mci->start_time = jiffies;
749
eric wollesen9794f332007-02-12 00:53:08 -0800750 if (edac_create_sysfs_mci_device(mci)) {
751 edac_mc_printk(mci, KERN_WARNING,
Douglas Thompson052dfb42007-07-19 01:50:13 -0700752 "failed to create sysfs device\n");
eric wollesen9794f332007-02-12 00:53:08 -0800753 goto fail1;
754 }
Alan Coxda9bb1d2006-01-18 17:44:13 -0800755
Dave Jiang81d87cb2007-07-19 01:49:52 -0700756 /* If there IS a check routine, then we are running POLLED */
757 if (mci->edac_check != NULL) {
758 /* This instance is NOW RUNNING */
759 mci->op_state = OP_RUNNING_POLL;
760
761 edac_mc_workq_setup(mci, edac_mc_get_poll_msec());
762 } else {
763 mci->op_state = OP_RUNNING_INTERRUPT;
764 }
765
Alan Coxda9bb1d2006-01-18 17:44:13 -0800766 /* Report action taken */
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700767 edac_mc_printk(mci, KERN_INFO, "Giving out device to '%s' '%s':"
Stephen Rothwell17aa7e02008-05-05 13:54:19 +1000768 " DEV %s\n", mci->mod_name, mci->ctl_name, edac_dev_name(mci));
Alan Coxda9bb1d2006-01-18 17:44:13 -0800769
Matthias Kaehlcke63b7df92007-07-19 01:49:38 -0700770 mutex_unlock(&mem_ctls_mutex);
Dave Peterson028a7b62006-03-26 01:38:47 -0800771 return 0;
772
Douglas Thompson052dfb42007-07-19 01:50:13 -0700773fail1:
Dave Peterson028a7b62006-03-26 01:38:47 -0800774 del_mc_from_global_list(mci);
775
Douglas Thompson052dfb42007-07-19 01:50:13 -0700776fail0:
Matthias Kaehlcke63b7df92007-07-19 01:49:38 -0700777 mutex_unlock(&mem_ctls_mutex);
Dave Peterson028a7b62006-03-26 01:38:47 -0800778 return 1;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800779}
Dave Peterson91105402006-03-26 01:38:55 -0800780EXPORT_SYMBOL_GPL(edac_mc_add_mc);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800781
Alan Coxda9bb1d2006-01-18 17:44:13 -0800782/**
Dave Peterson472678e2006-03-26 01:38:49 -0800783 * edac_mc_del_mc: Remove sysfs entries for specified mci structure and
784 * remove mci structure from global list
Doug Thompson37f04582006-06-30 01:56:07 -0700785 * @pdev: Pointer to 'struct device' representing mci structure to remove.
Alan Coxda9bb1d2006-01-18 17:44:13 -0800786 *
Dave Peterson18dbc332006-03-26 01:38:50 -0800787 * Return pointer to removed mci structure, or NULL if device not found.
Alan Coxda9bb1d2006-01-18 17:44:13 -0800788 */
Douglas Thompson079708b2007-07-19 01:49:58 -0700789struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800790{
Dave Peterson18dbc332006-03-26 01:38:50 -0800791 struct mem_ctl_info *mci;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800792
Joe Perches956b9ba2012-04-29 17:08:39 -0300793 edac_dbg(0, "\n");
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700794
Matthias Kaehlcke63b7df92007-07-19 01:49:38 -0700795 mutex_lock(&mem_ctls_mutex);
Dave Peterson18dbc332006-03-26 01:38:50 -0800796
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700797 /* find the requested mci struct in the global list */
798 mci = find_mci_by_dev(dev);
799 if (mci == NULL) {
Matthias Kaehlcke63b7df92007-07-19 01:49:38 -0700800 mutex_unlock(&mem_ctls_mutex);
Dave Peterson18dbc332006-03-26 01:38:50 -0800801 return NULL;
802 }
803
Alan Coxda9bb1d2006-01-18 17:44:13 -0800804 del_mc_from_global_list(mci);
Matthias Kaehlcke63b7df92007-07-19 01:49:38 -0700805 mutex_unlock(&mem_ctls_mutex);
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700806
Borislav Petkovbb31b3122010-12-02 17:48:35 +0100807 /* flush workq processes */
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700808 edac_mc_workq_teardown(mci);
Borislav Petkovbb31b3122010-12-02 17:48:35 +0100809
810 /* marking MCI offline */
811 mci->op_state = OP_OFFLINE;
812
813 /* remove from sysfs */
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700814 edac_remove_sysfs_mci_device(mci);
815
Dave Peterson537fba22006-03-26 01:38:40 -0800816 edac_printk(KERN_INFO, EDAC_MC,
Douglas Thompson052dfb42007-07-19 01:50:13 -0700817 "Removed device %d for %s %s: DEV %s\n", mci->mc_idx,
Stephen Rothwell17aa7e02008-05-05 13:54:19 +1000818 mci->mod_name, mci->ctl_name, edac_dev_name(mci));
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700819
Dave Peterson18dbc332006-03-26 01:38:50 -0800820 return mci;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800821}
Dave Peterson91105402006-03-26 01:38:55 -0800822EXPORT_SYMBOL_GPL(edac_mc_del_mc);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800823
Adrian Bunk2da1c112007-07-19 01:49:32 -0700824static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
825 u32 size)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800826{
827 struct page *pg;
828 void *virt_addr;
829 unsigned long flags = 0;
830
Joe Perches956b9ba2012-04-29 17:08:39 -0300831 edac_dbg(3, "\n");
Alan Coxda9bb1d2006-01-18 17:44:13 -0800832
833 /* ECC error page was not in our memory. Ignore it. */
Douglas Thompson079708b2007-07-19 01:49:58 -0700834 if (!pfn_valid(page))
Alan Coxda9bb1d2006-01-18 17:44:13 -0800835 return;
836
837 /* Find the actual page structure then map it and fix */
838 pg = pfn_to_page(page);
839
840 if (PageHighMem(pg))
841 local_irq_save(flags);
842
Cong Wang4e5df7c2011-11-25 23:14:19 +0800843 virt_addr = kmap_atomic(pg);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800844
845 /* Perform architecture specific atomic scrub operation */
846 atomic_scrub(virt_addr + offset, size);
847
848 /* Unmap and complete */
Cong Wang4e5df7c2011-11-25 23:14:19 +0800849 kunmap_atomic(virt_addr);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800850
851 if (PageHighMem(pg))
852 local_irq_restore(flags);
853}
854
Alan Coxda9bb1d2006-01-18 17:44:13 -0800855/* FIXME - should return -1 */
Dave Petersone7ecd892006-03-26 01:38:52 -0800856int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800857{
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300858 struct csrow_info **csrows = mci->csrows;
Mauro Carvalho Chehaba895bf82012-01-28 09:09:38 -0300859 int row, i, j, n;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800860
Joe Perches956b9ba2012-04-29 17:08:39 -0300861 edac_dbg(1, "MC%d: 0x%lx\n", mci->mc_idx, page);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800862 row = -1;
863
864 for (i = 0; i < mci->nr_csrows; i++) {
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300865 struct csrow_info *csrow = csrows[i];
Mauro Carvalho Chehaba895bf82012-01-28 09:09:38 -0300866 n = 0;
867 for (j = 0; j < csrow->nr_channels; j++) {
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300868 struct dimm_info *dimm = csrow->channels[j]->dimm;
Mauro Carvalho Chehaba895bf82012-01-28 09:09:38 -0300869 n += dimm->nr_pages;
870 }
871 if (n == 0)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800872 continue;
873
Joe Perches956b9ba2012-04-29 17:08:39 -0300874 edac_dbg(3, "MC%d: first(0x%lx) page(0x%lx) last(0x%lx) mask(0x%lx)\n",
875 mci->mc_idx,
876 csrow->first_page, page, csrow->last_page,
877 csrow->page_mask);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800878
879 if ((page >= csrow->first_page) &&
880 (page <= csrow->last_page) &&
881 ((page & csrow->page_mask) ==
882 (csrow->first_page & csrow->page_mask))) {
883 row = i;
884 break;
885 }
886 }
887
888 if (row == -1)
Dave Peterson537fba22006-03-26 01:38:40 -0800889 edac_mc_printk(mci, KERN_ERR,
Douglas Thompson052dfb42007-07-19 01:50:13 -0700890 "could not look up page error address %lx\n",
891 (unsigned long)page);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800892
893 return row;
894}
Dave Peterson91105402006-03-26 01:38:55 -0800895EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800896
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300897const char *edac_layer_name[] = {
898 [EDAC_MC_LAYER_BRANCH] = "branch",
899 [EDAC_MC_LAYER_CHANNEL] = "channel",
900 [EDAC_MC_LAYER_SLOT] = "slot",
901 [EDAC_MC_LAYER_CHIP_SELECT] = "csrow",
902};
903EXPORT_SYMBOL_GPL(edac_layer_name);
904
905static void edac_inc_ce_error(struct mem_ctl_info *mci,
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -0300906 bool enable_per_layer_report,
907 const int pos[EDAC_MAX_LAYERS],
908 const u16 count)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800909{
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300910 int i, index = 0;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800911
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -0300912 mci->ce_mc += count;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300913
914 if (!enable_per_layer_report) {
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -0300915 mci->ce_noinfo_count += count;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300916 return;
917 }
918
919 for (i = 0; i < mci->n_layers; i++) {
920 if (pos[i] < 0)
921 break;
922 index += pos[i];
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -0300923 mci->ce_per_layer[i][index] += count;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300924
925 if (i < mci->n_layers - 1)
926 index *= mci->layers[i + 1].size;
927 }
928}
929
930static void edac_inc_ue_error(struct mem_ctl_info *mci,
931 bool enable_per_layer_report,
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -0300932 const int pos[EDAC_MAX_LAYERS],
933 const u16 count)
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300934{
935 int i, index = 0;
936
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -0300937 mci->ue_mc += count;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300938
939 if (!enable_per_layer_report) {
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -0300940 mci->ce_noinfo_count += count;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300941 return;
942 }
943
944 for (i = 0; i < mci->n_layers; i++) {
945 if (pos[i] < 0)
946 break;
947 index += pos[i];
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -0300948 mci->ue_per_layer[i][index] += count;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300949
950 if (i < mci->n_layers - 1)
951 index *= mci->layers[i + 1].size;
952 }
953}
954
955static void edac_ce_error(struct mem_ctl_info *mci,
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -0300956 const u16 error_count,
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300957 const int pos[EDAC_MAX_LAYERS],
958 const char *msg,
959 const char *location,
960 const char *label,
961 const char *detail,
962 const char *other_detail,
963 const bool enable_per_layer_report,
964 const unsigned long page_frame_number,
965 const unsigned long offset_in_page,
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -0300966 long grain)
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300967{
968 unsigned long remapped_page;
969
970 if (edac_mc_get_log_ce()) {
971 if (other_detail && *other_detail)
972 edac_mc_printk(mci, KERN_WARNING,
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -0300973 "%d CE %s on %s (%s %s - %s)\n",
974 error_count,
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300975 msg, label, location,
976 detail, other_detail);
977 else
978 edac_mc_printk(mci, KERN_WARNING,
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -0300979 "%d CE %s on %s (%s %s)\n",
980 error_count,
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300981 msg, label, location,
982 detail);
983 }
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -0300984 edac_inc_ce_error(mci, enable_per_layer_report, pos, error_count);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800985
986 if (mci->scrub_mode & SCRUB_SW_SRC) {
987 /*
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300988 * Some memory controllers (called MCs below) can remap
989 * memory so that it is still available at a different
990 * address when PCI devices map into memory.
991 * MC's that can't do this, lose the memory where PCI
992 * devices are mapped. This mapping is MC-dependent
993 * and so we call back into the MC driver for it to
994 * map the MC page to a physical (CPU) page which can
995 * then be mapped to a virtual page - which can then
996 * be scrubbed.
997 */
Alan Coxda9bb1d2006-01-18 17:44:13 -0800998 remapped_page = mci->ctl_page_to_phys ?
Douglas Thompson052dfb42007-07-19 01:50:13 -0700999 mci->ctl_page_to_phys(mci, page_frame_number) :
1000 page_frame_number;
Alan Coxda9bb1d2006-01-18 17:44:13 -08001001
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001002 edac_mc_scrub_block(remapped_page,
1003 offset_in_page, grain);
Alan Coxda9bb1d2006-01-18 17:44:13 -08001004 }
1005}
1006
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001007static void edac_ue_error(struct mem_ctl_info *mci,
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -03001008 const u16 error_count,
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001009 const int pos[EDAC_MAX_LAYERS],
1010 const char *msg,
1011 const char *location,
1012 const char *label,
1013 const char *detail,
1014 const char *other_detail,
1015 const bool enable_per_layer_report)
Alan Coxda9bb1d2006-01-18 17:44:13 -08001016{
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001017 if (edac_mc_get_log_ue()) {
1018 if (other_detail && *other_detail)
1019 edac_mc_printk(mci, KERN_WARNING,
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -03001020 "%d UE %s on %s (%s %s - %s)\n",
1021 error_count,
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001022 msg, label, location, detail,
1023 other_detail);
1024 else
1025 edac_mc_printk(mci, KERN_WARNING,
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -03001026 "%d UE %s on %s (%s %s)\n",
1027 error_count,
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001028 msg, label, location, detail);
1029 }
Dave Petersone7ecd892006-03-26 01:38:52 -08001030
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001031 if (edac_mc_get_panic_on_ue()) {
1032 if (other_detail && *other_detail)
1033 panic("UE %s on %s (%s%s - %s)\n",
1034 msg, label, location, detail, other_detail);
1035 else
1036 panic("UE %s on %s (%s%s)\n",
1037 msg, label, location, detail);
1038 }
1039
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -03001040 edac_inc_ue_error(mci, enable_per_layer_report, pos, error_count);
Alan Coxda9bb1d2006-01-18 17:44:13 -08001041}
1042
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001043#define OTHER_LABEL " or "
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001044
1045/**
1046 * edac_mc_handle_error - reports a memory event to userspace
1047 *
1048 * @type: severity of the error (CE/UE/Fatal)
1049 * @mci: a struct mem_ctl_info pointer
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -03001050 * @error_count: Number of errors of the same type
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001051 * @page_frame_number: mem page where the error occurred
1052 * @offset_in_page: offset of the error inside the page
1053 * @syndrome: ECC syndrome
1054 * @top_layer: Memory layer[0] position
1055 * @mid_layer: Memory layer[1] position
1056 * @low_layer: Memory layer[2] position
1057 * @msg: Message meaningful to the end users that
1058 * explains the event
1059 * @other_detail: Technical details about the event that
1060 * may help hardware manufacturers and
1061 * EDAC developers to analyse the event
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001062 */
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001063void edac_mc_handle_error(const enum hw_event_mc_err_type type,
1064 struct mem_ctl_info *mci,
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -03001065 const u16 error_count,
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001066 const unsigned long page_frame_number,
1067 const unsigned long offset_in_page,
1068 const unsigned long syndrome,
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001069 const int top_layer,
1070 const int mid_layer,
1071 const int low_layer,
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001072 const char *msg,
Mauro Carvalho Chehab03f7eae2012-06-04 11:29:25 -03001073 const char *other_detail)
Alan Coxda9bb1d2006-01-18 17:44:13 -08001074{
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001075 /* FIXME: too much for stack: move it to some pre-alocated area */
1076 char detail[80], location[80];
1077 char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * mci->tot_dimms];
1078 char *p;
1079 int row = -1, chan = -1;
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001080 int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer };
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001081 int i;
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001082 long grain;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001083 bool enable_per_layer_report = false;
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001084 u8 grain_bits;
Alan Coxda9bb1d2006-01-18 17:44:13 -08001085
Joe Perches956b9ba2012-04-29 17:08:39 -03001086 edac_dbg(3, "MC%d\n", mci->mc_idx);
Alan Coxda9bb1d2006-01-18 17:44:13 -08001087
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001088 /*
1089 * Check if the event report is consistent and if the memory
1090 * location is known. If it is known, enable_per_layer_report will be
1091 * true, the DIMM(s) label info will be filled and the per-layer
1092 * error counters will be incremented.
1093 */
1094 for (i = 0; i < mci->n_layers; i++) {
1095 if (pos[i] >= (int)mci->layers[i].size) {
1096 if (type == HW_EVENT_ERR_CORRECTED)
1097 p = "CE";
1098 else
1099 p = "UE";
1100
1101 edac_mc_printk(mci, KERN_ERR,
1102 "INTERNAL ERROR: %s value is out of range (%d >= %d)\n",
1103 edac_layer_name[mci->layers[i].type],
1104 pos[i], mci->layers[i].size);
1105 /*
1106 * Instead of just returning it, let's use what's
1107 * known about the error. The increment routines and
1108 * the DIMM filter logic will do the right thing by
1109 * pointing the likely damaged DIMMs.
1110 */
1111 pos[i] = -1;
1112 }
1113 if (pos[i] >= 0)
1114 enable_per_layer_report = true;
Alan Coxda9bb1d2006-01-18 17:44:13 -08001115 }
1116
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001117 /*
1118 * Get the dimm label/grain that applies to the match criteria.
1119 * As the error algorithm may not be able to point to just one memory
1120 * stick, the logic here will get all possible labels that could
1121 * pottentially be affected by the error.
1122 * On FB-DIMM memory controllers, for uncorrected errors, it is common
1123 * to have only the MC channel and the MC dimm (also called "branch")
1124 * but the channel is not known, as the memory is arranged in pairs,
1125 * where each memory belongs to a separate channel within the same
1126 * branch.
1127 */
1128 grain = 0;
1129 p = label;
1130 *p = '\0';
1131 for (i = 0; i < mci->tot_dimms; i++) {
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -03001132 struct dimm_info *dimm = mci->dimms[i];
Dave Petersone7ecd892006-03-26 01:38:52 -08001133
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001134 if (top_layer >= 0 && top_layer != dimm->location[0])
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001135 continue;
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001136 if (mid_layer >= 0 && mid_layer != dimm->location[1])
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001137 continue;
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001138 if (low_layer >= 0 && low_layer != dimm->location[2])
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001139 continue;
1140
1141 /* get the max grain, over the error match range */
1142 if (dimm->grain > grain)
1143 grain = dimm->grain;
1144
1145 /*
1146 * If the error is memory-controller wide, there's no need to
1147 * seek for the affected DIMMs because the whole
1148 * channel/memory controller/... may be affected.
1149 * Also, don't show errors for empty DIMM slots.
1150 */
1151 if (enable_per_layer_report && dimm->nr_pages) {
1152 if (p != label) {
1153 strcpy(p, OTHER_LABEL);
1154 p += strlen(OTHER_LABEL);
1155 }
1156 strcpy(p, dimm->label);
1157 p += strlen(p);
1158 *p = '\0';
1159
1160 /*
1161 * get csrow/channel of the DIMM, in order to allow
1162 * incrementing the compat API counters
1163 */
Joe Perches956b9ba2012-04-29 17:08:39 -03001164 edac_dbg(4, "%s csrows map: (%d,%d)\n",
1165 mci->mem_is_per_rank ? "rank" : "dimm",
1166 dimm->csrow, dimm->cschannel);
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001167 if (row == -1)
1168 row = dimm->csrow;
1169 else if (row >= 0 && row != dimm->csrow)
1170 row = -2;
1171
1172 if (chan == -1)
1173 chan = dimm->cschannel;
1174 else if (chan >= 0 && chan != dimm->cschannel)
1175 chan = -2;
1176 }
Alan Coxda9bb1d2006-01-18 17:44:13 -08001177 }
1178
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001179 if (!enable_per_layer_report) {
1180 strcpy(label, "any memory");
1181 } else {
Joe Perches956b9ba2012-04-29 17:08:39 -03001182 edac_dbg(4, "csrow/channel to increment: (%d,%d)\n", row, chan);
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001183 if (p == label)
1184 strcpy(label, "unknown memory");
1185 if (type == HW_EVENT_ERR_CORRECTED) {
1186 if (row >= 0) {
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -03001187 mci->csrows[row]->ce_count += error_count;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001188 if (chan >= 0)
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -03001189 mci->csrows[row]->channels[chan]->ce_count += error_count;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001190 }
1191 } else
1192 if (row >= 0)
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -03001193 mci->csrows[row]->ue_count += error_count;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001194 }
Alan Coxda9bb1d2006-01-18 17:44:13 -08001195
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001196 /* Fill the RAM location data */
1197 p = location;
1198 for (i = 0; i < mci->n_layers; i++) {
1199 if (pos[i] < 0)
1200 continue;
1201
1202 p += sprintf(p, "%s:%d ",
1203 edac_layer_name[mci->layers[i].type],
1204 pos[i]);
1205 }
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001206 if (p > location)
1207 *(p - 1) = '\0';
1208
1209 /* Report the error via the trace interface */
1210
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001211 grain_bits = fls_long(grain) + 1;
1212 trace_mc_event(type, msg, label, error_count,
1213 mci->mc_idx, top_layer, mid_layer, low_layer,
1214 PAGES_TO_MiB(page_frame_number) | offset_in_page,
1215 grain_bits, syndrome, other_detail);
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001216
1217 /* Memory type dependent details about the error */
1218 if (type == HW_EVENT_ERR_CORRECTED) {
1219 snprintf(detail, sizeof(detail),
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001220 "page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx",
Douglas Thompson052dfb42007-07-19 01:50:13 -07001221 page_frame_number, offset_in_page,
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001222 grain, syndrome);
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -03001223 edac_ce_error(mci, error_count, pos, msg, location, label,
1224 detail, other_detail, enable_per_layer_report,
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001225 page_frame_number, offset_in_page, grain);
1226 } else {
1227 snprintf(detail, sizeof(detail),
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001228 "page:0x%lx offset:0x%lx grain:%ld",
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001229 page_frame_number, offset_in_page, grain);
Alan Coxda9bb1d2006-01-18 17:44:13 -08001230
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -03001231 edac_ue_error(mci, error_count, pos, msg, location, label,
1232 detail, other_detail, enable_per_layer_report);
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001233 }
Alan Coxda9bb1d2006-01-18 17:44:13 -08001234}
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001235EXPORT_SYMBOL_GPL(edac_mc_handle_error);