blob: d5dc9da7f99f95fc9dd3a6edb5d4bc302e69c2c3 [file] [log] [blame]
Alan Coxda9bb1d2006-01-18 17:44:13 -08001/*
2 * edac_mc kernel module
Doug Thompson49c0dab72006-07-10 04:45:19 -07003 * (C) 2005, 2006 Linux Networx (http://lnxi.com)
Alan Coxda9bb1d2006-01-18 17:44:13 -08004 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * Written by Thayne Harbaugh
8 * Based on work by Dan Hollis <goemon at anime dot net> and others.
9 * http://www.anime.net/~goemon/linux-ecc/
10 *
11 * Modified by Dave Peterson and Doug Thompson
12 *
13 */
14
Alan Coxda9bb1d2006-01-18 17:44:13 -080015#include <linux/module.h>
16#include <linux/proc_fs.h>
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/smp.h>
20#include <linux/init.h>
21#include <linux/sysctl.h>
22#include <linux/highmem.h>
23#include <linux/timer.h>
24#include <linux/slab.h>
25#include <linux/jiffies.h>
26#include <linux/spinlock.h>
27#include <linux/list.h>
Alan Coxda9bb1d2006-01-18 17:44:13 -080028#include <linux/ctype.h>
Dave Jiangc0d12172007-07-19 01:49:46 -070029#include <linux/edac.h>
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -030030#include <linux/bitops.h>
Alan Coxda9bb1d2006-01-18 17:44:13 -080031#include <asm/uaccess.h>
32#include <asm/page.h>
33#include <asm/edac.h>
Douglas Thompson20bcb7a2007-07-19 01:49:47 -070034#include "edac_core.h"
Douglas Thompson7c9281d2007-07-19 01:49:33 -070035#include "edac_module.h"
Alan Coxda9bb1d2006-01-18 17:44:13 -080036
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -030037#define CREATE_TRACE_POINTS
38#define TRACE_INCLUDE_PATH ../../include/ras
39#include <ras/ras_event.h>
40
Alan Coxda9bb1d2006-01-18 17:44:13 -080041/* lock to memory controller's control array */
Matthias Kaehlcke63b7df92007-07-19 01:49:38 -070042static DEFINE_MUTEX(mem_ctls_mutex);
Robert P. J. Dayff6ac2a2008-04-29 01:03:17 -070043static LIST_HEAD(mc_devices);
Alan Coxda9bb1d2006-01-18 17:44:13 -080044
Mauro Carvalho Chehab6e84d352012-04-30 10:24:43 -030045unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
46 unsigned len)
47{
48 struct mem_ctl_info *mci = dimm->mci;
49 int i, n, count = 0;
50 char *p = buf;
51
52 for (i = 0; i < mci->n_layers; i++) {
53 n = snprintf(p, len, "%s %d ",
54 edac_layer_name[mci->layers[i].type],
55 dimm->location[i]);
56 p += n;
57 len -= n;
58 count += n;
59 if (!len)
60 break;
61 }
62
63 return count;
64}
65
Alan Coxda9bb1d2006-01-18 17:44:13 -080066#ifdef CONFIG_EDAC_DEBUG
67
Mauro Carvalho Chehaba4b4be32012-01-27 10:26:13 -030068static void edac_mc_dump_channel(struct rank_info *chan)
Alan Coxda9bb1d2006-01-18 17:44:13 -080069{
Mauro Carvalho Chehab6e84d352012-04-30 10:24:43 -030070 edac_dbg(4, " channel->chan_idx = %d\n", chan->chan_idx);
71 edac_dbg(4, " channel = %p\n", chan);
72 edac_dbg(4, " channel->csrow = %p\n", chan->csrow);
73 edac_dbg(4, " channel->dimm = %p\n", chan->dimm);
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -030074}
75
Mauro Carvalho Chehab6e84d352012-04-30 10:24:43 -030076static void edac_mc_dump_dimm(struct dimm_info *dimm, int number)
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -030077{
Mauro Carvalho Chehab6e84d352012-04-30 10:24:43 -030078 char location[80];
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -030079
Mauro Carvalho Chehab6e84d352012-04-30 10:24:43 -030080 edac_dimm_info_location(dimm, location, sizeof(location));
81
82 edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n",
83 dimm->mci->mem_is_per_rank ? "rank" : "dimm",
84 number, location, dimm->csrow, dimm->cschannel);
85 edac_dbg(4, " dimm = %p\n", dimm);
86 edac_dbg(4, " dimm->label = '%s'\n", dimm->label);
87 edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
88 edac_dbg(4, " dimm->grain = %d\n", dimm->grain);
89 edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
Alan Coxda9bb1d2006-01-18 17:44:13 -080090}
91
Adrian Bunk2da1c112007-07-19 01:49:32 -070092static void edac_mc_dump_csrow(struct csrow_info *csrow)
Alan Coxda9bb1d2006-01-18 17:44:13 -080093{
Mauro Carvalho Chehab6e84d352012-04-30 10:24:43 -030094 edac_dbg(4, "csrow->csrow_idx = %d\n", csrow->csrow_idx);
95 edac_dbg(4, " csrow = %p\n", csrow);
96 edac_dbg(4, " csrow->first_page = 0x%lx\n", csrow->first_page);
97 edac_dbg(4, " csrow->last_page = 0x%lx\n", csrow->last_page);
98 edac_dbg(4, " csrow->page_mask = 0x%lx\n", csrow->page_mask);
99 edac_dbg(4, " csrow->nr_channels = %d\n", csrow->nr_channels);
100 edac_dbg(4, " csrow->channels = %p\n", csrow->channels);
101 edac_dbg(4, " csrow->mci = %p\n", csrow->mci);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800102}
103
Adrian Bunk2da1c112007-07-19 01:49:32 -0700104static void edac_mc_dump_mci(struct mem_ctl_info *mci)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800105{
Joe Perches956b9ba12012-04-29 17:08:39 -0300106 edac_dbg(3, "\tmci = %p\n", mci);
107 edac_dbg(3, "\tmci->mtype_cap = %lx\n", mci->mtype_cap);
108 edac_dbg(3, "\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
109 edac_dbg(3, "\tmci->edac_cap = %lx\n", mci->edac_cap);
110 edac_dbg(4, "\tmci->edac_check = %p\n", mci->edac_check);
111 edac_dbg(3, "\tmci->nr_csrows = %d, csrows = %p\n",
112 mci->nr_csrows, mci->csrows);
113 edac_dbg(3, "\tmci->nr_dimms = %d, dimms = %p\n",
114 mci->tot_dimms, mci->dimms);
115 edac_dbg(3, "\tdev = %p\n", mci->pdev);
116 edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n",
117 mci->mod_name, mci->ctl_name);
118 edac_dbg(3, "\tpvt_info = %p\n\n", mci->pvt_info);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800119}
120
Borislav Petkov24f9a7f2010-10-07 18:29:15 +0200121#endif /* CONFIG_EDAC_DEBUG */
122
Borislav Petkov239642f2009-11-12 15:33:16 +0100123/*
124 * keep those in sync with the enum mem_type
125 */
126const char *edac_mem_types[] = {
127 "Empty csrow",
128 "Reserved csrow type",
129 "Unknown csrow type",
130 "Fast page mode RAM",
131 "Extended data out RAM",
132 "Burst Extended data out RAM",
133 "Single data rate SDRAM",
134 "Registered single data rate SDRAM",
135 "Double data rate SDRAM",
136 "Registered Double data rate SDRAM",
137 "Rambus DRAM",
138 "Unbuffered DDR2 RAM",
139 "Fully buffered DDR2",
140 "Registered DDR2 RAM",
141 "Rambus XDR",
142 "Unbuffered DDR3 RAM",
143 "Registered DDR3 RAM",
144};
145EXPORT_SYMBOL_GPL(edac_mem_types);
146
Mauro Carvalho Chehab93e4fe62012-04-16 10:18:12 -0300147/**
148 * edac_align_ptr - Prepares the pointer offsets for a single-shot allocation
149 * @p: pointer to a pointer with the memory offset to be used. At
150 * return, this will be incremented to point to the next offset
151 * @size: Size of the data structure to be reserved
152 * @n_elems: Number of elements that should be reserved
Alan Coxda9bb1d2006-01-18 17:44:13 -0800153 *
154 * If 'size' is a constant, the compiler will optimize this whole function
Mauro Carvalho Chehab93e4fe62012-04-16 10:18:12 -0300155 * down to either a no-op or the addition of a constant to the value of '*p'.
156 *
157 * The 'p' pointer is absolutely needed to keep the proper advancing
158 * further in memory to the proper offsets when allocating the struct along
159 * with its embedded structs, as edac_device_alloc_ctl_info() does it
160 * above, for example.
161 *
162 * At return, the pointer 'p' will be incremented to be used on a next call
163 * to this function.
Alan Coxda9bb1d2006-01-18 17:44:13 -0800164 */
Mauro Carvalho Chehab93e4fe62012-04-16 10:18:12 -0300165void *edac_align_ptr(void **p, unsigned size, int n_elems)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800166{
167 unsigned align, r;
Mauro Carvalho Chehab93e4fe62012-04-16 10:18:12 -0300168 void *ptr = *p;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800169
Mauro Carvalho Chehab93e4fe62012-04-16 10:18:12 -0300170 *p += size * n_elems;
171
172 /*
173 * 'p' can possibly be an unaligned item X such that sizeof(X) is
174 * 'size'. Adjust 'p' so that its alignment is at least as
175 * stringent as what the compiler would provide for X and return
176 * the aligned result.
177 * Here we assume that the alignment of a "long long" is the most
Alan Coxda9bb1d2006-01-18 17:44:13 -0800178 * stringent alignment that the compiler will ever provide by default.
179 * As far as I know, this is a reasonable assumption.
180 */
181 if (size > sizeof(long))
182 align = sizeof(long long);
183 else if (size > sizeof(int))
184 align = sizeof(long);
185 else if (size > sizeof(short))
186 align = sizeof(int);
187 else if (size > sizeof(char))
188 align = sizeof(short);
189 else
Douglas Thompson079708b2007-07-19 01:49:58 -0700190 return (char *)ptr;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800191
Chris Metcalf8447c4d12012-06-06 13:11:05 -0400192 r = (unsigned long)p % align;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800193
194 if (r == 0)
Douglas Thompson079708b2007-07-19 01:49:58 -0700195 return (char *)ptr;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800196
Mauro Carvalho Chehab93e4fe62012-04-16 10:18:12 -0300197 *p += align - r;
198
Douglas Thompson7391c6d2007-07-19 01:50:21 -0700199 return (void *)(((unsigned long)ptr) + align - r);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800200}
201
Shaun Ruffellfaa2ad02012-09-22 20:26:38 -0500202static void _edac_mc_free(struct mem_ctl_info *mci)
203{
204 int i, chn, row;
205 struct csrow_info *csr;
206 const unsigned int tot_dimms = mci->tot_dimms;
207 const unsigned int tot_channels = mci->num_cschannel;
208 const unsigned int tot_csrows = mci->nr_csrows;
209
210 if (mci->dimms) {
211 for (i = 0; i < tot_dimms; i++)
212 kfree(mci->dimms[i]);
213 kfree(mci->dimms);
214 }
215 if (mci->csrows) {
216 for (row = 0; row < tot_csrows; row++) {
217 csr = mci->csrows[row];
218 if (csr) {
219 if (csr->channels) {
220 for (chn = 0; chn < tot_channels; chn++)
221 kfree(csr->channels[chn]);
222 kfree(csr->channels);
223 }
224 kfree(csr);
225 }
226 }
227 kfree(mci->csrows);
228 }
229 kfree(mci);
230}
231
Alan Coxda9bb1d2006-01-18 17:44:13 -0800232/**
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300233 * edac_mc_alloc: Allocate and partially fill a struct mem_ctl_info structure
234 * @mc_num: Memory controller number
235 * @n_layers: Number of MC hierarchy layers
236 * layers: Describes each layer as seen by the Memory Controller
237 * @size_pvt: size of private storage needed
238 *
Alan Coxda9bb1d2006-01-18 17:44:13 -0800239 *
240 * Everything is kmalloc'ed as one big chunk - more efficient.
241 * Only can be used if all structures have the same lifetime - otherwise
242 * you have to allocate and initialize your own structures.
243 *
244 * Use edac_mc_free() to free mc structures allocated by this function.
245 *
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300246 * NOTE: drivers handle multi-rank memories in different ways: in some
247 * drivers, one multi-rank memory stick is mapped as one entry, while, in
248 * others, a single multi-rank memory stick would be mapped into several
249 * entries. Currently, this function will allocate multiple struct dimm_info
250 * on such scenarios, as grouping the multiple ranks require drivers change.
251 *
Alan Coxda9bb1d2006-01-18 17:44:13 -0800252 * Returns:
Mauro Carvalho Chehabca0907b2012-05-02 14:37:00 -0300253 * On failure: NULL
254 * On success: struct mem_ctl_info pointer
Alan Coxda9bb1d2006-01-18 17:44:13 -0800255 */
Mauro Carvalho Chehabca0907b2012-05-02 14:37:00 -0300256struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
257 unsigned n_layers,
258 struct edac_mc_layer *layers,
259 unsigned sz_pvt)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800260{
261 struct mem_ctl_info *mci;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300262 struct edac_mc_layer *layer;
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300263 struct csrow_info *csr;
264 struct rank_info *chan;
Mauro Carvalho Chehaba7d7d2e2012-01-27 14:12:32 -0300265 struct dimm_info *dimm;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300266 u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
267 unsigned pos[EDAC_MAX_LAYERS];
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300268 unsigned size, tot_dimms = 1, count = 1;
269 unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
Mauro Carvalho Chehab5926ff52012-02-09 11:05:20 -0300270 void *pvt, *p, *ptr = NULL;
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300271 int i, j, row, chn, n, len, off;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300272 bool per_rank = false;
273
274 BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0);
275 /*
276 * Calculate the total amount of dimms and csrows/cschannels while
277 * in the old API emulation mode
278 */
279 for (i = 0; i < n_layers; i++) {
280 tot_dimms *= layers[i].size;
281 if (layers[i].is_virt_csrow)
282 tot_csrows *= layers[i].size;
283 else
284 tot_channels *= layers[i].size;
285
286 if (layers[i].type == EDAC_MC_LAYER_CHIP_SELECT)
287 per_rank = true;
288 }
Alan Coxda9bb1d2006-01-18 17:44:13 -0800289
290 /* Figure out the offsets of the various items from the start of an mc
291 * structure. We want the alignment of each item to be at least as
292 * stringent as what the compiler would provide if we could simply
293 * hardcode everything into a single struct.
294 */
Mauro Carvalho Chehab93e4fe62012-04-16 10:18:12 -0300295 mci = edac_align_ptr(&ptr, sizeof(*mci), 1);
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300296 layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers);
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300297 for (i = 0; i < n_layers; i++) {
298 count *= layers[i].size;
Joe Perches956b9ba12012-04-29 17:08:39 -0300299 edac_dbg(4, "errcount layer %d size %d\n", i, count);
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300300 ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
301 ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
302 tot_errcount += 2 * count;
303 }
304
Joe Perches956b9ba12012-04-29 17:08:39 -0300305 edac_dbg(4, "allocating %d error counters\n", tot_errcount);
Mauro Carvalho Chehab93e4fe62012-04-16 10:18:12 -0300306 pvt = edac_align_ptr(&ptr, sz_pvt, 1);
Douglas Thompson079708b2007-07-19 01:49:58 -0700307 size = ((unsigned long)pvt) + sz_pvt;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800308
Joe Perches956b9ba12012-04-29 17:08:39 -0300309 edac_dbg(1, "allocating %u bytes for mci data (%d %s, %d csrows/channels)\n",
310 size,
311 tot_dimms,
312 per_rank ? "ranks" : "dimms",
313 tot_csrows * tot_channels);
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300314
Doug Thompson8096cfa2007-07-19 01:50:27 -0700315 mci = kzalloc(size, GFP_KERNEL);
316 if (mci == NULL)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800317 return NULL;
318
319 /* Adjust pointers so they point within the memory we just allocated
320 * rather than an imaginary chunk of memory located at address 0.
321 */
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300322 layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer));
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300323 for (i = 0; i < n_layers; i++) {
324 mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i]));
325 mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i]));
326 }
Douglas Thompson079708b2007-07-19 01:49:58 -0700327 pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800328
Doug Thompsonb8f6f972007-07-19 01:50:26 -0700329 /* setup index and various internal pointers */
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300330 mci->mc_idx = mc_num;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300331 mci->tot_dimms = tot_dimms;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800332 mci->pvt_info = pvt;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300333 mci->n_layers = n_layers;
334 mci->layers = layer;
335 memcpy(mci->layers, layers, sizeof(*layer) * n_layers);
336 mci->nr_csrows = tot_csrows;
337 mci->num_cschannel = tot_channels;
338 mci->mem_is_per_rank = per_rank;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800339
Mauro Carvalho Chehaba7d7d2e2012-01-27 14:12:32 -0300340 /*
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300341 * Alocate and fill the csrow/channels structs
Mauro Carvalho Chehaba7d7d2e2012-01-27 14:12:32 -0300342 */
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300343 mci->csrows = kcalloc(sizeof(*mci->csrows), tot_csrows, GFP_KERNEL);
344 if (!mci->csrows)
345 goto error;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300346 for (row = 0; row < tot_csrows; row++) {
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300347 csr = kzalloc(sizeof(**mci->csrows), GFP_KERNEL);
348 if (!csr)
349 goto error;
350 mci->csrows[row] = csr;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300351 csr->csrow_idx = row;
352 csr->mci = mci;
353 csr->nr_channels = tot_channels;
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300354 csr->channels = kcalloc(sizeof(*csr->channels), tot_channels,
355 GFP_KERNEL);
356 if (!csr->channels)
357 goto error;
Mauro Carvalho Chehaba7d7d2e2012-01-27 14:12:32 -0300358
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300359 for (chn = 0; chn < tot_channels; chn++) {
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300360 chan = kzalloc(sizeof(**csr->channels), GFP_KERNEL);
361 if (!chan)
362 goto error;
363 csr->channels[chn] = chan;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800364 chan->chan_idx = chn;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300365 chan->csrow = csr;
366 }
367 }
Mauro Carvalho Chehaba7d7d2e2012-01-27 14:12:32 -0300368
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300369 /*
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300370 * Allocate and fill the dimm structs
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300371 */
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300372 mci->dimms = kcalloc(sizeof(*mci->dimms), tot_dimms, GFP_KERNEL);
373 if (!mci->dimms)
374 goto error;
375
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300376 memset(&pos, 0, sizeof(pos));
377 row = 0;
378 chn = 0;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300379 for (i = 0; i < tot_dimms; i++) {
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300380 chan = mci->csrows[row]->channels[chn];
381 off = EDAC_DIMM_OFF(layer, n_layers, pos[0], pos[1], pos[2]);
382 if (off < 0 || off >= tot_dimms) {
383 edac_mc_printk(mci, KERN_ERR, "EDAC core bug: EDAC_DIMM_OFF is trying to do an illegal data access\n");
384 goto error;
385 }
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300386
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300387 dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL);
Dan Carpenter08a4a132012-05-18 15:51:02 +0300388 if (!dimm)
389 goto error;
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300390 mci->dimms[off] = dimm;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300391 dimm->mci = mci;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300392
Mauro Carvalho Chehab5926ff52012-02-09 11:05:20 -0300393 /*
394 * Copy DIMM location and initialize it.
395 */
396 len = sizeof(dimm->label);
397 p = dimm->label;
398 n = snprintf(p, len, "mc#%u", mc_num);
399 p += n;
400 len -= n;
401 for (j = 0; j < n_layers; j++) {
402 n = snprintf(p, len, "%s#%u",
403 edac_layer_name[layers[j].type],
404 pos[j]);
405 p += n;
406 len -= n;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300407 dimm->location[j] = pos[j];
408
Mauro Carvalho Chehab5926ff52012-02-09 11:05:20 -0300409 if (len <= 0)
410 break;
411 }
412
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300413 /* Link it to the csrows old API data */
414 chan->dimm = dimm;
415 dimm->csrow = row;
416 dimm->cschannel = chn;
417
418 /* Increment csrow location */
419 row++;
420 if (row == tot_csrows) {
421 row = 0;
422 chn++;
423 }
424
425 /* Increment dimm location */
426 for (j = n_layers - 1; j >= 0; j--) {
427 pos[j]++;
428 if (pos[j] < layers[j].size)
429 break;
430 pos[j] = 0;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800431 }
432 }
433
Dave Jiang81d87cb2007-07-19 01:49:52 -0700434 mci->op_state = OP_ALLOC;
Doug Thompson8096cfa2007-07-19 01:50:27 -0700435
436 /* at this point, the root kobj is valid, and in order to
437 * 'free' the object, then the function:
438 * edac_mc_unregister_sysfs_main_kobj() must be called
439 * which will perform kobj unregistration and the actual free
440 * will occur during the kobject callback operation
441 */
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -0300442
Alan Coxda9bb1d2006-01-18 17:44:13 -0800443 return mci;
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300444
445error:
Shaun Ruffellfaa2ad02012-09-22 20:26:38 -0500446 _edac_mc_free(mci);
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300447
448 return NULL;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800449}
Dave Peterson91105402006-03-26 01:38:55 -0800450EXPORT_SYMBOL_GPL(edac_mc_alloc);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800451
Alan Coxda9bb1d2006-01-18 17:44:13 -0800452/**
Doug Thompson8096cfa2007-07-19 01:50:27 -0700453 * edac_mc_free
454 * 'Free' a previously allocated 'mci' structure
Alan Coxda9bb1d2006-01-18 17:44:13 -0800455 * @mci: pointer to a struct mem_ctl_info structure
Alan Coxda9bb1d2006-01-18 17:44:13 -0800456 */
457void edac_mc_free(struct mem_ctl_info *mci)
458{
Joe Perches956b9ba12012-04-29 17:08:39 -0300459 edac_dbg(1, "\n");
Mauro Carvalho Chehabbbc560a2010-08-16 18:22:43 -0300460
Shaun Ruffellfaa2ad02012-09-22 20:26:38 -0500461 /* If we're not yet registered with sysfs free only what was allocated
462 * in edac_mc_alloc().
463 */
464 if (!device_is_registered(&mci->dev)) {
465 _edac_mc_free(mci);
466 return;
467 }
468
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300469 /* the mci instance is freed here, when the sysfs object is dropped */
Mauro Carvalho Chehab7a623c02012-04-16 16:41:11 -0300470 edac_unregister_sysfs(mci);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800471}
Dave Peterson91105402006-03-26 01:38:55 -0800472EXPORT_SYMBOL_GPL(edac_mc_free);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800473
Doug Thompsonbce19682007-07-26 10:41:14 -0700474
Mauro Carvalho Chehab939747bd2010-08-10 11:22:01 -0300475/**
Doug Thompsonbce19682007-07-26 10:41:14 -0700476 * find_mci_by_dev
477 *
478 * scan list of controllers looking for the one that manages
479 * the 'dev' device
Mauro Carvalho Chehab939747bd2010-08-10 11:22:01 -0300480 * @dev: pointer to a struct device related with the MCI
Doug Thompsonbce19682007-07-26 10:41:14 -0700481 */
Mauro Carvalho Chehab939747bd2010-08-10 11:22:01 -0300482struct mem_ctl_info *find_mci_by_dev(struct device *dev)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800483{
484 struct mem_ctl_info *mci;
485 struct list_head *item;
486
Joe Perches956b9ba12012-04-29 17:08:39 -0300487 edac_dbg(3, "\n");
Alan Coxda9bb1d2006-01-18 17:44:13 -0800488
489 list_for_each(item, &mc_devices) {
490 mci = list_entry(item, struct mem_ctl_info, link);
491
Mauro Carvalho Chehabfd687502012-03-16 07:44:18 -0300492 if (mci->pdev == dev)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800493 return mci;
494 }
495
496 return NULL;
497}
Mauro Carvalho Chehab939747bd2010-08-10 11:22:01 -0300498EXPORT_SYMBOL_GPL(find_mci_by_dev);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800499
Dave Jiang81d87cb2007-07-19 01:49:52 -0700500/*
501 * handler for EDAC to check if NMI type handler has asserted interrupt
502 */
503static int edac_mc_assert_error_check_and_clear(void)
504{
Dave Jiang66ee2f92007-07-19 01:49:54 -0700505 int old_state;
Dave Jiang81d87cb2007-07-19 01:49:52 -0700506
Douglas Thompson079708b2007-07-19 01:49:58 -0700507 if (edac_op_state == EDAC_OPSTATE_POLL)
Dave Jiang81d87cb2007-07-19 01:49:52 -0700508 return 1;
509
Dave Jiang66ee2f92007-07-19 01:49:54 -0700510 old_state = edac_err_assert;
511 edac_err_assert = 0;
Dave Jiang81d87cb2007-07-19 01:49:52 -0700512
Dave Jiang66ee2f92007-07-19 01:49:54 -0700513 return old_state;
Dave Jiang81d87cb2007-07-19 01:49:52 -0700514}
515
516/*
517 * edac_mc_workq_function
518 * performs the operation scheduled by a workq request
519 */
Dave Jiang81d87cb2007-07-19 01:49:52 -0700520static void edac_mc_workq_function(struct work_struct *work_req)
521{
Jean Delvarefbeb4382009-04-13 14:40:21 -0700522 struct delayed_work *d_work = to_delayed_work(work_req);
Dave Jiang81d87cb2007-07-19 01:49:52 -0700523 struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work);
Dave Jiang81d87cb2007-07-19 01:49:52 -0700524
525 mutex_lock(&mem_ctls_mutex);
526
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700527 /* if this control struct has movd to offline state, we are done */
528 if (mci->op_state == OP_OFFLINE) {
529 mutex_unlock(&mem_ctls_mutex);
530 return;
531 }
532
Dave Jiang81d87cb2007-07-19 01:49:52 -0700533 /* Only poll controllers that are running polled and have a check */
534 if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL))
535 mci->edac_check(mci);
536
Dave Jiang81d87cb2007-07-19 01:49:52 -0700537 mutex_unlock(&mem_ctls_mutex);
538
539 /* Reschedule */
Dave Jiang4de78c62007-07-19 01:49:54 -0700540 queue_delayed_work(edac_workqueue, &mci->work,
Douglas Thompson052dfb42007-07-19 01:50:13 -0700541 msecs_to_jiffies(edac_mc_get_poll_msec()));
Dave Jiang81d87cb2007-07-19 01:49:52 -0700542}
543
544/*
545 * edac_mc_workq_setup
546 * initialize a workq item for this mci
547 * passing in the new delay period in msec
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700548 *
549 * locking model:
550 *
551 * called with the mem_ctls_mutex held
Dave Jiang81d87cb2007-07-19 01:49:52 -0700552 */
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700553static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
Dave Jiang81d87cb2007-07-19 01:49:52 -0700554{
Joe Perches956b9ba12012-04-29 17:08:39 -0300555 edac_dbg(0, "\n");
Dave Jiang81d87cb2007-07-19 01:49:52 -0700556
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700557 /* if this instance is not in the POLL state, then simply return */
558 if (mci->op_state != OP_RUNNING_POLL)
559 return;
560
Dave Jiang81d87cb2007-07-19 01:49:52 -0700561 INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
Dave Jiang81d87cb2007-07-19 01:49:52 -0700562 queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
563}
564
565/*
566 * edac_mc_workq_teardown
567 * stop the workq processing on this mci
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700568 *
569 * locking model:
570 *
571 * called WITHOUT lock held
Dave Jiang81d87cb2007-07-19 01:49:52 -0700572 */
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700573static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
Dave Jiang81d87cb2007-07-19 01:49:52 -0700574{
575 int status;
576
Borislav Petkov00740c52010-09-26 12:42:23 +0200577 if (mci->op_state != OP_RUNNING_POLL)
578 return;
579
Doug Thompsonbce19682007-07-26 10:41:14 -0700580 status = cancel_delayed_work(&mci->work);
581 if (status == 0) {
Joe Perches956b9ba12012-04-29 17:08:39 -0300582 edac_dbg(0, "not canceled, flush the queue\n");
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700583
Doug Thompsonbce19682007-07-26 10:41:14 -0700584 /* workq instance might be running, wait for it */
585 flush_workqueue(edac_workqueue);
Dave Jiang81d87cb2007-07-19 01:49:52 -0700586 }
587}
588
589/*
Doug Thompsonbce19682007-07-26 10:41:14 -0700590 * edac_mc_reset_delay_period(unsigned long value)
591 *
592 * user space has updated our poll period value, need to
593 * reset our workq delays
Dave Jiang81d87cb2007-07-19 01:49:52 -0700594 */
Doug Thompsonbce19682007-07-26 10:41:14 -0700595void edac_mc_reset_delay_period(int value)
Dave Jiang81d87cb2007-07-19 01:49:52 -0700596{
Doug Thompsonbce19682007-07-26 10:41:14 -0700597 struct mem_ctl_info *mci;
598 struct list_head *item;
Dave Jiang81d87cb2007-07-19 01:49:52 -0700599
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700600 mutex_lock(&mem_ctls_mutex);
601
Doug Thompsonbce19682007-07-26 10:41:14 -0700602 /* scan the list and turn off all workq timers, doing so under lock
603 */
604 list_for_each(item, &mc_devices) {
605 mci = list_entry(item, struct mem_ctl_info, link);
606
607 if (mci->op_state == OP_RUNNING_POLL)
608 cancel_delayed_work(&mci->work);
609 }
610
611 mutex_unlock(&mem_ctls_mutex);
612
613
614 /* re-walk the list, and reset the poll delay */
615 mutex_lock(&mem_ctls_mutex);
616
617 list_for_each(item, &mc_devices) {
618 mci = list_entry(item, struct mem_ctl_info, link);
619
620 edac_mc_workq_setup(mci, (unsigned long) value);
621 }
Dave Jiang81d87cb2007-07-19 01:49:52 -0700622
623 mutex_unlock(&mem_ctls_mutex);
624}
625
Doug Thompsonbce19682007-07-26 10:41:14 -0700626
627
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700628/* Return 0 on success, 1 on failure.
629 * Before calling this function, caller must
630 * assign a unique value to mci->mc_idx.
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700631 *
632 * locking model:
633 *
634 * called with the mem_ctls_mutex lock held
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700635 */
Douglas Thompson079708b2007-07-19 01:49:58 -0700636static int add_mc_to_global_list(struct mem_ctl_info *mci)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800637{
638 struct list_head *item, *insert_before;
639 struct mem_ctl_info *p;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800640
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700641 insert_before = &mc_devices;
642
Mauro Carvalho Chehabfd687502012-03-16 07:44:18 -0300643 p = find_mci_by_dev(mci->pdev);
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700644 if (unlikely(p != NULL))
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700645 goto fail0;
646
647 list_for_each(item, &mc_devices) {
648 p = list_entry(item, struct mem_ctl_info, link);
649
650 if (p->mc_idx >= mci->mc_idx) {
651 if (unlikely(p->mc_idx == mci->mc_idx))
652 goto fail1;
653
654 insert_before = item;
655 break;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800656 }
Alan Coxda9bb1d2006-01-18 17:44:13 -0800657 }
658
659 list_add_tail_rcu(&mci->link, insert_before);
Dave Jiangc0d12172007-07-19 01:49:46 -0700660 atomic_inc(&edac_handlers);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800661 return 0;
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700662
Douglas Thompson052dfb42007-07-19 01:50:13 -0700663fail0:
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700664 edac_printk(KERN_WARNING, EDAC_MC,
Mauro Carvalho Chehabfd687502012-03-16 07:44:18 -0300665 "%s (%s) %s %s already assigned %d\n", dev_name(p->pdev),
Stephen Rothwell17aa7e02008-05-05 13:54:19 +1000666 edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700667 return 1;
668
Douglas Thompson052dfb42007-07-19 01:50:13 -0700669fail1:
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700670 edac_printk(KERN_WARNING, EDAC_MC,
Douglas Thompson052dfb42007-07-19 01:50:13 -0700671 "bug in low-level driver: attempt to assign\n"
672 " duplicate mc_idx %d in %s()\n", p->mc_idx, __func__);
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700673 return 1;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800674}
675
Dave Petersone7ecd892006-03-26 01:38:52 -0800676static void del_mc_from_global_list(struct mem_ctl_info *mci)
Dave Petersona1d03fc2006-03-26 01:38:46 -0800677{
Dave Jiangc0d12172007-07-19 01:49:46 -0700678 atomic_dec(&edac_handlers);
Dave Petersona1d03fc2006-03-26 01:38:46 -0800679 list_del_rcu(&mci->link);
Lai Jiangshane2e77092011-05-26 16:25:58 -0700680
681 /* these are for safe removal of devices from global list while
682 * NMI handlers may be traversing list
683 */
684 synchronize_rcu();
685 INIT_LIST_HEAD(&mci->link);
Dave Petersona1d03fc2006-03-26 01:38:46 -0800686}
687
Alan Coxda9bb1d2006-01-18 17:44:13 -0800688/**
Douglas Thompson5da08312007-07-19 01:49:31 -0700689 * edac_mc_find: Search for a mem_ctl_info structure whose index is 'idx'.
690 *
691 * If found, return a pointer to the structure.
692 * Else return NULL.
693 *
694 * Caller must hold mem_ctls_mutex.
695 */
Douglas Thompson079708b2007-07-19 01:49:58 -0700696struct mem_ctl_info *edac_mc_find(int idx)
Douglas Thompson5da08312007-07-19 01:49:31 -0700697{
698 struct list_head *item;
699 struct mem_ctl_info *mci;
700
701 list_for_each(item, &mc_devices) {
702 mci = list_entry(item, struct mem_ctl_info, link);
703
704 if (mci->mc_idx >= idx) {
705 if (mci->mc_idx == idx)
706 return mci;
707
708 break;
709 }
710 }
711
712 return NULL;
713}
714EXPORT_SYMBOL(edac_mc_find);
715
716/**
Dave Peterson472678e2006-03-26 01:38:49 -0800717 * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and
718 * create sysfs entries associated with mci structure
Alan Coxda9bb1d2006-01-18 17:44:13 -0800719 * @mci: pointer to the mci structure to be added to the list
720 *
721 * Return:
722 * 0 Success
723 * !0 Failure
724 */
725
726/* FIXME - should a warning be printed if no error detection? correction? */
Doug Thompsonb8f6f972007-07-19 01:50:26 -0700727int edac_mc_add_mc(struct mem_ctl_info *mci)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800728{
Joe Perches956b9ba12012-04-29 17:08:39 -0300729 edac_dbg(0, "\n");
Doug Thompsonb8f6f972007-07-19 01:50:26 -0700730
Alan Coxda9bb1d2006-01-18 17:44:13 -0800731#ifdef CONFIG_EDAC_DEBUG
732 if (edac_debug_level >= 3)
733 edac_mc_dump_mci(mci);
Dave Petersone7ecd892006-03-26 01:38:52 -0800734
Alan Coxda9bb1d2006-01-18 17:44:13 -0800735 if (edac_debug_level >= 4) {
736 int i;
737
738 for (i = 0; i < mci->nr_csrows; i++) {
Mauro Carvalho Chehab6e84d352012-04-30 10:24:43 -0300739 struct csrow_info *csrow = mci->csrows[i];
740 u32 nr_pages = 0;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800741 int j;
Dave Petersone7ecd892006-03-26 01:38:52 -0800742
Mauro Carvalho Chehab6e84d352012-04-30 10:24:43 -0300743 for (j = 0; j < csrow->nr_channels; j++)
744 nr_pages += csrow->channels[j]->dimm->nr_pages;
745 if (!nr_pages)
746 continue;
747 edac_mc_dump_csrow(csrow);
748 for (j = 0; j < csrow->nr_channels; j++)
749 if (csrow->channels[j]->dimm->nr_pages)
750 edac_mc_dump_channel(csrow->channels[j]);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800751 }
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300752 for (i = 0; i < mci->tot_dimms; i++)
Mauro Carvalho Chehab6e84d352012-04-30 10:24:43 -0300753 if (mci->dimms[i]->nr_pages)
754 edac_mc_dump_dimm(mci->dimms[i], i);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800755 }
756#endif
Matthias Kaehlcke63b7df92007-07-19 01:49:38 -0700757 mutex_lock(&mem_ctls_mutex);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800758
759 if (add_mc_to_global_list(mci))
Dave Peterson028a7b62006-03-26 01:38:47 -0800760 goto fail0;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800761
762 /* set load time so that error rate can be tracked */
763 mci->start_time = jiffies;
764
eric wollesen9794f332007-02-12 00:53:08 -0800765 if (edac_create_sysfs_mci_device(mci)) {
766 edac_mc_printk(mci, KERN_WARNING,
Douglas Thompson052dfb42007-07-19 01:50:13 -0700767 "failed to create sysfs device\n");
eric wollesen9794f332007-02-12 00:53:08 -0800768 goto fail1;
769 }
Alan Coxda9bb1d2006-01-18 17:44:13 -0800770
Dave Jiang81d87cb2007-07-19 01:49:52 -0700771 /* If there IS a check routine, then we are running POLLED */
772 if (mci->edac_check != NULL) {
773 /* This instance is NOW RUNNING */
774 mci->op_state = OP_RUNNING_POLL;
775
776 edac_mc_workq_setup(mci, edac_mc_get_poll_msec());
777 } else {
778 mci->op_state = OP_RUNNING_INTERRUPT;
779 }
780
Alan Coxda9bb1d2006-01-18 17:44:13 -0800781 /* Report action taken */
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700782 edac_mc_printk(mci, KERN_INFO, "Giving out device to '%s' '%s':"
Stephen Rothwell17aa7e02008-05-05 13:54:19 +1000783 " DEV %s\n", mci->mod_name, mci->ctl_name, edac_dev_name(mci));
Alan Coxda9bb1d2006-01-18 17:44:13 -0800784
Matthias Kaehlcke63b7df92007-07-19 01:49:38 -0700785 mutex_unlock(&mem_ctls_mutex);
Dave Peterson028a7b62006-03-26 01:38:47 -0800786 return 0;
787
Douglas Thompson052dfb42007-07-19 01:50:13 -0700788fail1:
Dave Peterson028a7b62006-03-26 01:38:47 -0800789 del_mc_from_global_list(mci);
790
Douglas Thompson052dfb42007-07-19 01:50:13 -0700791fail0:
Matthias Kaehlcke63b7df92007-07-19 01:49:38 -0700792 mutex_unlock(&mem_ctls_mutex);
Dave Peterson028a7b62006-03-26 01:38:47 -0800793 return 1;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800794}
Dave Peterson91105402006-03-26 01:38:55 -0800795EXPORT_SYMBOL_GPL(edac_mc_add_mc);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800796
Alan Coxda9bb1d2006-01-18 17:44:13 -0800797/**
Dave Peterson472678e2006-03-26 01:38:49 -0800798 * edac_mc_del_mc: Remove sysfs entries for specified mci structure and
799 * remove mci structure from global list
Doug Thompson37f04582006-06-30 01:56:07 -0700800 * @pdev: Pointer to 'struct device' representing mci structure to remove.
Alan Coxda9bb1d2006-01-18 17:44:13 -0800801 *
Dave Peterson18dbc332006-03-26 01:38:50 -0800802 * Return pointer to removed mci structure, or NULL if device not found.
Alan Coxda9bb1d2006-01-18 17:44:13 -0800803 */
Douglas Thompson079708b2007-07-19 01:49:58 -0700804struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800805{
Dave Peterson18dbc332006-03-26 01:38:50 -0800806 struct mem_ctl_info *mci;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800807
Joe Perches956b9ba12012-04-29 17:08:39 -0300808 edac_dbg(0, "\n");
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700809
Matthias Kaehlcke63b7df92007-07-19 01:49:38 -0700810 mutex_lock(&mem_ctls_mutex);
Dave Peterson18dbc332006-03-26 01:38:50 -0800811
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700812 /* find the requested mci struct in the global list */
813 mci = find_mci_by_dev(dev);
814 if (mci == NULL) {
Matthias Kaehlcke63b7df92007-07-19 01:49:38 -0700815 mutex_unlock(&mem_ctls_mutex);
Dave Peterson18dbc332006-03-26 01:38:50 -0800816 return NULL;
817 }
818
Alan Coxda9bb1d2006-01-18 17:44:13 -0800819 del_mc_from_global_list(mci);
Matthias Kaehlcke63b7df92007-07-19 01:49:38 -0700820 mutex_unlock(&mem_ctls_mutex);
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700821
Borislav Petkovbb31b3122010-12-02 17:48:35 +0100822 /* flush workq processes */
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700823 edac_mc_workq_teardown(mci);
Borislav Petkovbb31b3122010-12-02 17:48:35 +0100824
825 /* marking MCI offline */
826 mci->op_state = OP_OFFLINE;
827
828 /* remove from sysfs */
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700829 edac_remove_sysfs_mci_device(mci);
830
Dave Peterson537fba22006-03-26 01:38:40 -0800831 edac_printk(KERN_INFO, EDAC_MC,
Douglas Thompson052dfb42007-07-19 01:50:13 -0700832 "Removed device %d for %s %s: DEV %s\n", mci->mc_idx,
Stephen Rothwell17aa7e02008-05-05 13:54:19 +1000833 mci->mod_name, mci->ctl_name, edac_dev_name(mci));
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700834
Dave Peterson18dbc332006-03-26 01:38:50 -0800835 return mci;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800836}
Dave Peterson91105402006-03-26 01:38:55 -0800837EXPORT_SYMBOL_GPL(edac_mc_del_mc);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800838
Adrian Bunk2da1c112007-07-19 01:49:32 -0700839static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
840 u32 size)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800841{
842 struct page *pg;
843 void *virt_addr;
844 unsigned long flags = 0;
845
Joe Perches956b9ba12012-04-29 17:08:39 -0300846 edac_dbg(3, "\n");
Alan Coxda9bb1d2006-01-18 17:44:13 -0800847
848 /* ECC error page was not in our memory. Ignore it. */
Douglas Thompson079708b2007-07-19 01:49:58 -0700849 if (!pfn_valid(page))
Alan Coxda9bb1d2006-01-18 17:44:13 -0800850 return;
851
852 /* Find the actual page structure then map it and fix */
853 pg = pfn_to_page(page);
854
855 if (PageHighMem(pg))
856 local_irq_save(flags);
857
Cong Wang4e5df7c2011-11-25 23:14:19 +0800858 virt_addr = kmap_atomic(pg);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800859
860 /* Perform architecture specific atomic scrub operation */
861 atomic_scrub(virt_addr + offset, size);
862
863 /* Unmap and complete */
Cong Wang4e5df7c2011-11-25 23:14:19 +0800864 kunmap_atomic(virt_addr);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800865
866 if (PageHighMem(pg))
867 local_irq_restore(flags);
868}
869
Alan Coxda9bb1d2006-01-18 17:44:13 -0800870/* FIXME - should return -1 */
Dave Petersone7ecd892006-03-26 01:38:52 -0800871int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800872{
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300873 struct csrow_info **csrows = mci->csrows;
Mauro Carvalho Chehaba895bf82012-01-28 09:09:38 -0300874 int row, i, j, n;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800875
Joe Perches956b9ba12012-04-29 17:08:39 -0300876 edac_dbg(1, "MC%d: 0x%lx\n", mci->mc_idx, page);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800877 row = -1;
878
879 for (i = 0; i < mci->nr_csrows; i++) {
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300880 struct csrow_info *csrow = csrows[i];
Mauro Carvalho Chehaba895bf82012-01-28 09:09:38 -0300881 n = 0;
882 for (j = 0; j < csrow->nr_channels; j++) {
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300883 struct dimm_info *dimm = csrow->channels[j]->dimm;
Mauro Carvalho Chehaba895bf82012-01-28 09:09:38 -0300884 n += dimm->nr_pages;
885 }
886 if (n == 0)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800887 continue;
888
Joe Perches956b9ba12012-04-29 17:08:39 -0300889 edac_dbg(3, "MC%d: first(0x%lx) page(0x%lx) last(0x%lx) mask(0x%lx)\n",
890 mci->mc_idx,
891 csrow->first_page, page, csrow->last_page,
892 csrow->page_mask);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800893
894 if ((page >= csrow->first_page) &&
895 (page <= csrow->last_page) &&
896 ((page & csrow->page_mask) ==
897 (csrow->first_page & csrow->page_mask))) {
898 row = i;
899 break;
900 }
901 }
902
903 if (row == -1)
Dave Peterson537fba22006-03-26 01:38:40 -0800904 edac_mc_printk(mci, KERN_ERR,
Douglas Thompson052dfb42007-07-19 01:50:13 -0700905 "could not look up page error address %lx\n",
906 (unsigned long)page);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800907
908 return row;
909}
Dave Peterson91105402006-03-26 01:38:55 -0800910EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800911
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300912const char *edac_layer_name[] = {
913 [EDAC_MC_LAYER_BRANCH] = "branch",
914 [EDAC_MC_LAYER_CHANNEL] = "channel",
915 [EDAC_MC_LAYER_SLOT] = "slot",
916 [EDAC_MC_LAYER_CHIP_SELECT] = "csrow",
917};
918EXPORT_SYMBOL_GPL(edac_layer_name);
919
920static void edac_inc_ce_error(struct mem_ctl_info *mci,
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -0300921 bool enable_per_layer_report,
922 const int pos[EDAC_MAX_LAYERS],
923 const u16 count)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800924{
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300925 int i, index = 0;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800926
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -0300927 mci->ce_mc += count;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300928
929 if (!enable_per_layer_report) {
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -0300930 mci->ce_noinfo_count += count;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300931 return;
932 }
933
934 for (i = 0; i < mci->n_layers; i++) {
935 if (pos[i] < 0)
936 break;
937 index += pos[i];
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -0300938 mci->ce_per_layer[i][index] += count;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300939
940 if (i < mci->n_layers - 1)
941 index *= mci->layers[i + 1].size;
942 }
943}
944
945static void edac_inc_ue_error(struct mem_ctl_info *mci,
946 bool enable_per_layer_report,
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -0300947 const int pos[EDAC_MAX_LAYERS],
948 const u16 count)
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300949{
950 int i, index = 0;
951
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -0300952 mci->ue_mc += count;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300953
954 if (!enable_per_layer_report) {
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -0300955 mci->ce_noinfo_count += count;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300956 return;
957 }
958
959 for (i = 0; i < mci->n_layers; i++) {
960 if (pos[i] < 0)
961 break;
962 index += pos[i];
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -0300963 mci->ue_per_layer[i][index] += count;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300964
965 if (i < mci->n_layers - 1)
966 index *= mci->layers[i + 1].size;
967 }
968}
969
970static void edac_ce_error(struct mem_ctl_info *mci,
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -0300971 const u16 error_count,
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300972 const int pos[EDAC_MAX_LAYERS],
973 const char *msg,
974 const char *location,
975 const char *label,
976 const char *detail,
977 const char *other_detail,
978 const bool enable_per_layer_report,
979 const unsigned long page_frame_number,
980 const unsigned long offset_in_page,
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -0300981 long grain)
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300982{
983 unsigned long remapped_page;
984
985 if (edac_mc_get_log_ce()) {
986 if (other_detail && *other_detail)
987 edac_mc_printk(mci, KERN_WARNING,
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -0300988 "%d CE %s on %s (%s %s - %s)\n",
989 error_count,
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300990 msg, label, location,
991 detail, other_detail);
992 else
993 edac_mc_printk(mci, KERN_WARNING,
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -0300994 "%d CE %s on %s (%s %s)\n",
995 error_count,
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300996 msg, label, location,
997 detail);
998 }
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -0300999 edac_inc_ce_error(mci, enable_per_layer_report, pos, error_count);
Alan Coxda9bb1d2006-01-18 17:44:13 -08001000
1001 if (mci->scrub_mode & SCRUB_SW_SRC) {
1002 /*
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001003 * Some memory controllers (called MCs below) can remap
1004 * memory so that it is still available at a different
1005 * address when PCI devices map into memory.
1006 * MC's that can't do this, lose the memory where PCI
1007 * devices are mapped. This mapping is MC-dependent
1008 * and so we call back into the MC driver for it to
1009 * map the MC page to a physical (CPU) page which can
1010 * then be mapped to a virtual page - which can then
1011 * be scrubbed.
1012 */
Alan Coxda9bb1d2006-01-18 17:44:13 -08001013 remapped_page = mci->ctl_page_to_phys ?
Douglas Thompson052dfb42007-07-19 01:50:13 -07001014 mci->ctl_page_to_phys(mci, page_frame_number) :
1015 page_frame_number;
Alan Coxda9bb1d2006-01-18 17:44:13 -08001016
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001017 edac_mc_scrub_block(remapped_page,
1018 offset_in_page, grain);
Alan Coxda9bb1d2006-01-18 17:44:13 -08001019 }
1020}
1021
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001022static void edac_ue_error(struct mem_ctl_info *mci,
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -03001023 const u16 error_count,
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001024 const int pos[EDAC_MAX_LAYERS],
1025 const char *msg,
1026 const char *location,
1027 const char *label,
1028 const char *detail,
1029 const char *other_detail,
1030 const bool enable_per_layer_report)
Alan Coxda9bb1d2006-01-18 17:44:13 -08001031{
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001032 if (edac_mc_get_log_ue()) {
1033 if (other_detail && *other_detail)
1034 edac_mc_printk(mci, KERN_WARNING,
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -03001035 "%d UE %s on %s (%s %s - %s)\n",
1036 error_count,
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001037 msg, label, location, detail,
1038 other_detail);
1039 else
1040 edac_mc_printk(mci, KERN_WARNING,
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -03001041 "%d UE %s on %s (%s %s)\n",
1042 error_count,
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001043 msg, label, location, detail);
1044 }
Dave Petersone7ecd892006-03-26 01:38:52 -08001045
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001046 if (edac_mc_get_panic_on_ue()) {
1047 if (other_detail && *other_detail)
1048 panic("UE %s on %s (%s%s - %s)\n",
1049 msg, label, location, detail, other_detail);
1050 else
1051 panic("UE %s on %s (%s%s)\n",
1052 msg, label, location, detail);
1053 }
1054
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -03001055 edac_inc_ue_error(mci, enable_per_layer_report, pos, error_count);
Alan Coxda9bb1d2006-01-18 17:44:13 -08001056}
1057
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001058#define OTHER_LABEL " or "
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001059
1060/**
1061 * edac_mc_handle_error - reports a memory event to userspace
1062 *
1063 * @type: severity of the error (CE/UE/Fatal)
1064 * @mci: a struct mem_ctl_info pointer
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -03001065 * @error_count: Number of errors of the same type
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001066 * @page_frame_number: mem page where the error occurred
1067 * @offset_in_page: offset of the error inside the page
1068 * @syndrome: ECC syndrome
1069 * @top_layer: Memory layer[0] position
1070 * @mid_layer: Memory layer[1] position
1071 * @low_layer: Memory layer[2] position
1072 * @msg: Message meaningful to the end users that
1073 * explains the event
1074 * @other_detail: Technical details about the event that
1075 * may help hardware manufacturers and
1076 * EDAC developers to analyse the event
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001077 */
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001078void edac_mc_handle_error(const enum hw_event_mc_err_type type,
1079 struct mem_ctl_info *mci,
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -03001080 const u16 error_count,
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001081 const unsigned long page_frame_number,
1082 const unsigned long offset_in_page,
1083 const unsigned long syndrome,
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001084 const int top_layer,
1085 const int mid_layer,
1086 const int low_layer,
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001087 const char *msg,
Mauro Carvalho Chehab03f7eae2012-06-04 11:29:25 -03001088 const char *other_detail)
Alan Coxda9bb1d2006-01-18 17:44:13 -08001089{
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001090 /* FIXME: too much for stack: move it to some pre-alocated area */
1091 char detail[80], location[80];
1092 char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * mci->tot_dimms];
1093 char *p;
1094 int row = -1, chan = -1;
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001095 int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer };
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001096 int i;
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001097 long grain;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001098 bool enable_per_layer_report = false;
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001099 u8 grain_bits;
Alan Coxda9bb1d2006-01-18 17:44:13 -08001100
Joe Perches956b9ba12012-04-29 17:08:39 -03001101 edac_dbg(3, "MC%d\n", mci->mc_idx);
Alan Coxda9bb1d2006-01-18 17:44:13 -08001102
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001103 /*
1104 * Check if the event report is consistent and if the memory
1105 * location is known. If it is known, enable_per_layer_report will be
1106 * true, the DIMM(s) label info will be filled and the per-layer
1107 * error counters will be incremented.
1108 */
1109 for (i = 0; i < mci->n_layers; i++) {
1110 if (pos[i] >= (int)mci->layers[i].size) {
1111 if (type == HW_EVENT_ERR_CORRECTED)
1112 p = "CE";
1113 else
1114 p = "UE";
1115
1116 edac_mc_printk(mci, KERN_ERR,
1117 "INTERNAL ERROR: %s value is out of range (%d >= %d)\n",
1118 edac_layer_name[mci->layers[i].type],
1119 pos[i], mci->layers[i].size);
1120 /*
1121 * Instead of just returning it, let's use what's
1122 * known about the error. The increment routines and
1123 * the DIMM filter logic will do the right thing by
1124 * pointing the likely damaged DIMMs.
1125 */
1126 pos[i] = -1;
1127 }
1128 if (pos[i] >= 0)
1129 enable_per_layer_report = true;
Alan Coxda9bb1d2006-01-18 17:44:13 -08001130 }
1131
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001132 /*
1133 * Get the dimm label/grain that applies to the match criteria.
1134 * As the error algorithm may not be able to point to just one memory
1135 * stick, the logic here will get all possible labels that could
1136 * pottentially be affected by the error.
1137 * On FB-DIMM memory controllers, for uncorrected errors, it is common
1138 * to have only the MC channel and the MC dimm (also called "branch")
1139 * but the channel is not known, as the memory is arranged in pairs,
1140 * where each memory belongs to a separate channel within the same
1141 * branch.
1142 */
1143 grain = 0;
1144 p = label;
1145 *p = '\0';
1146 for (i = 0; i < mci->tot_dimms; i++) {
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -03001147 struct dimm_info *dimm = mci->dimms[i];
Dave Petersone7ecd892006-03-26 01:38:52 -08001148
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001149 if (top_layer >= 0 && top_layer != dimm->location[0])
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001150 continue;
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001151 if (mid_layer >= 0 && mid_layer != dimm->location[1])
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001152 continue;
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001153 if (low_layer >= 0 && low_layer != dimm->location[2])
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001154 continue;
1155
1156 /* get the max grain, over the error match range */
1157 if (dimm->grain > grain)
1158 grain = dimm->grain;
1159
1160 /*
1161 * If the error is memory-controller wide, there's no need to
1162 * seek for the affected DIMMs because the whole
1163 * channel/memory controller/... may be affected.
1164 * Also, don't show errors for empty DIMM slots.
1165 */
1166 if (enable_per_layer_report && dimm->nr_pages) {
1167 if (p != label) {
1168 strcpy(p, OTHER_LABEL);
1169 p += strlen(OTHER_LABEL);
1170 }
1171 strcpy(p, dimm->label);
1172 p += strlen(p);
1173 *p = '\0';
1174
1175 /*
1176 * get csrow/channel of the DIMM, in order to allow
1177 * incrementing the compat API counters
1178 */
Joe Perches956b9ba12012-04-29 17:08:39 -03001179 edac_dbg(4, "%s csrows map: (%d,%d)\n",
1180 mci->mem_is_per_rank ? "rank" : "dimm",
1181 dimm->csrow, dimm->cschannel);
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001182 if (row == -1)
1183 row = dimm->csrow;
1184 else if (row >= 0 && row != dimm->csrow)
1185 row = -2;
1186
1187 if (chan == -1)
1188 chan = dimm->cschannel;
1189 else if (chan >= 0 && chan != dimm->cschannel)
1190 chan = -2;
1191 }
Alan Coxda9bb1d2006-01-18 17:44:13 -08001192 }
1193
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001194 if (!enable_per_layer_report) {
1195 strcpy(label, "any memory");
1196 } else {
Joe Perches956b9ba12012-04-29 17:08:39 -03001197 edac_dbg(4, "csrow/channel to increment: (%d,%d)\n", row, chan);
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001198 if (p == label)
1199 strcpy(label, "unknown memory");
1200 if (type == HW_EVENT_ERR_CORRECTED) {
1201 if (row >= 0) {
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -03001202 mci->csrows[row]->ce_count += error_count;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001203 if (chan >= 0)
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -03001204 mci->csrows[row]->channels[chan]->ce_count += error_count;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001205 }
1206 } else
1207 if (row >= 0)
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -03001208 mci->csrows[row]->ue_count += error_count;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001209 }
Alan Coxda9bb1d2006-01-18 17:44:13 -08001210
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001211 /* Fill the RAM location data */
1212 p = location;
1213 for (i = 0; i < mci->n_layers; i++) {
1214 if (pos[i] < 0)
1215 continue;
1216
1217 p += sprintf(p, "%s:%d ",
1218 edac_layer_name[mci->layers[i].type],
1219 pos[i]);
1220 }
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001221 if (p > location)
1222 *(p - 1) = '\0';
1223
1224 /* Report the error via the trace interface */
1225
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001226 grain_bits = fls_long(grain) + 1;
1227 trace_mc_event(type, msg, label, error_count,
1228 mci->mc_idx, top_layer, mid_layer, low_layer,
1229 PAGES_TO_MiB(page_frame_number) | offset_in_page,
1230 grain_bits, syndrome, other_detail);
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001231
1232 /* Memory type dependent details about the error */
1233 if (type == HW_EVENT_ERR_CORRECTED) {
1234 snprintf(detail, sizeof(detail),
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001235 "page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx",
Douglas Thompson052dfb42007-07-19 01:50:13 -07001236 page_frame_number, offset_in_page,
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001237 grain, syndrome);
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -03001238 edac_ce_error(mci, error_count, pos, msg, location, label,
1239 detail, other_detail, enable_per_layer_report,
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001240 page_frame_number, offset_in_page, grain);
1241 } else {
1242 snprintf(detail, sizeof(detail),
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001243 "page:0x%lx offset:0x%lx grain:%ld",
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001244 page_frame_number, offset_in_page, grain);
Alan Coxda9bb1d2006-01-18 17:44:13 -08001245
Mauro Carvalho Chehab9eb07a72012-06-04 13:27:43 -03001246 edac_ue_error(mci, error_count, pos, msg, location, label,
1247 detail, other_detail, enable_per_layer_report);
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001248 }
Alan Coxda9bb1d2006-01-18 17:44:13 -08001249}
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001250EXPORT_SYMBOL_GPL(edac_mc_handle_error);