blob: cda6dab5067a57709393fad7913a2e76a531a23b [file] [log] [blame]
Ralf Baechlef65aad42012-10-17 00:39:09 +02001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2009 Wind River Systems,
7 * written by Ralf Baechle <ralf@linux-mips.org>
Daniel Walker1bc021e2013-09-20 15:46:41 -07008 *
9 * Copyright (c) 2013 by Cisco Systems, Inc.
10 * All rights reserved.
Ralf Baechlef65aad42012-10-17 00:39:09 +020011 */
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/slab.h>
15#include <linux/io.h>
16#include <linux/edac.h>
Daniel Walker1bc021e2013-09-20 15:46:41 -070017#include <linux/ctype.h>
Ralf Baechlef65aad42012-10-17 00:39:09 +020018
David Daneye1ced092012-11-15 13:58:59 -080019#include <asm/octeon/octeon.h>
20#include <asm/octeon/cvmx-lmcx-defs.h>
Ralf Baechlef65aad42012-10-17 00:39:09 +020021
22#include "edac_core.h"
23#include "edac_module.h"
Ralf Baechlef65aad42012-10-17 00:39:09 +020024
David Daneye1ced092012-11-15 13:58:59 -080025#define OCTEON_MAX_MC 4
Ralf Baechlef65aad42012-10-17 00:39:09 +020026
Daniel Walker1bc021e2013-09-20 15:46:41 -070027#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
28
29struct octeon_lmc_pvt {
30 unsigned long inject;
31 unsigned long error_type;
32 unsigned long dimm;
33 unsigned long rank;
34 unsigned long bank;
35 unsigned long row;
36 unsigned long col;
37};
38
David Daneye1ced092012-11-15 13:58:59 -080039static void octeon_lmc_edac_poll(struct mem_ctl_info *mci)
Ralf Baechlef65aad42012-10-17 00:39:09 +020040{
David Daneye1ced092012-11-15 13:58:59 -080041 union cvmx_lmcx_mem_cfg0 cfg0;
42 bool do_clear = false;
Ralf Baechlef65aad42012-10-17 00:39:09 +020043 char msg[64];
44
David Daneye1ced092012-11-15 13:58:59 -080045 cfg0.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(mci->mc_idx));
46 if (cfg0.s.sec_err || cfg0.s.ded_err) {
47 union cvmx_lmcx_fadr fadr;
48 fadr.u64 = cvmx_read_csr(CVMX_LMCX_FADR(mci->mc_idx));
49 snprintf(msg, sizeof(msg),
50 "DIMM %d rank %d bank %d row %d col %d",
51 fadr.cn30xx.fdimm, fadr.cn30xx.fbunk,
52 fadr.cn30xx.fbank, fadr.cn30xx.frow, fadr.cn30xx.fcol);
Ralf Baechlef65aad42012-10-17 00:39:09 +020053 }
54
David Daneye1ced092012-11-15 13:58:59 -080055 if (cfg0.s.sec_err) {
56 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0,
57 -1, -1, -1, msg, "");
58 cfg0.s.sec_err = -1; /* Done, re-arm */
59 do_clear = true;
Ralf Baechlef65aad42012-10-17 00:39:09 +020060 }
61
David Daneye1ced092012-11-15 13:58:59 -080062 if (cfg0.s.ded_err) {
63 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
64 -1, -1, -1, msg, "");
65 cfg0.s.ded_err = -1; /* Done, re-arm */
66 do_clear = true;
67 }
68 if (do_clear)
69 cvmx_write_csr(CVMX_LMCX_MEM_CFG0(mci->mc_idx), cfg0.u64);
Ralf Baechlef65aad42012-10-17 00:39:09 +020070}
71
David Daneye1ced092012-11-15 13:58:59 -080072static void octeon_lmc_edac_poll_o2(struct mem_ctl_info *mci)
73{
Daniel Walker1bc021e2013-09-20 15:46:41 -070074 struct octeon_lmc_pvt *pvt = mci->pvt_info;
David Daneye1ced092012-11-15 13:58:59 -080075 union cvmx_lmcx_int int_reg;
76 bool do_clear = false;
77 char msg[64];
78
Daniel Walker1bc021e2013-09-20 15:46:41 -070079 if (!pvt->inject)
80 int_reg.u64 = cvmx_read_csr(CVMX_LMCX_INT(mci->mc_idx));
81 else {
82 if (pvt->error_type == 1)
83 int_reg.s.sec_err = 1;
84 if (pvt->error_type == 2)
85 int_reg.s.ded_err = 1;
86 }
87
David Daneye1ced092012-11-15 13:58:59 -080088 if (int_reg.s.sec_err || int_reg.s.ded_err) {
89 union cvmx_lmcx_fadr fadr;
Daniel Walker1bc021e2013-09-20 15:46:41 -070090 if (likely(!pvt->inject))
91 fadr.u64 = cvmx_read_csr(CVMX_LMCX_FADR(mci->mc_idx));
92 else {
93 fadr.cn61xx.fdimm = pvt->dimm;
94 fadr.cn61xx.fbunk = pvt->rank;
95 fadr.cn61xx.fbank = pvt->bank;
96 fadr.cn61xx.frow = pvt->row;
97 fadr.cn61xx.fcol = pvt->col;
98 }
David Daneye1ced092012-11-15 13:58:59 -080099 snprintf(msg, sizeof(msg),
100 "DIMM %d rank %d bank %d row %d col %d",
101 fadr.cn61xx.fdimm, fadr.cn61xx.fbunk,
102 fadr.cn61xx.fbank, fadr.cn61xx.frow, fadr.cn61xx.fcol);
103 }
104
105 if (int_reg.s.sec_err) {
106 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0,
107 -1, -1, -1, msg, "");
108 int_reg.s.sec_err = -1; /* Done, re-arm */
109 do_clear = true;
110 }
111
112 if (int_reg.s.ded_err) {
113 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
114 -1, -1, -1, msg, "");
115 int_reg.s.ded_err = -1; /* Done, re-arm */
116 do_clear = true;
117 }
Daniel Walker1bc021e2013-09-20 15:46:41 -0700118
119 if (do_clear) {
120 if (likely(!pvt->inject))
121 cvmx_write_csr(CVMX_LMCX_INT(mci->mc_idx), int_reg.u64);
122 else
123 pvt->inject = 0;
124 }
125}
126
127/************************ MC SYSFS parts ***********************************/
128
129/* Only a couple naming differences per template, so very similar */
130#define TEMPLATE_SHOW(reg) \
131static ssize_t octeon_mc_inject_##reg##_show(struct device *dev, \
132 struct device_attribute *attr, \
133 char *data) \
134{ \
135 struct mem_ctl_info *mci = to_mci(dev); \
136 struct octeon_lmc_pvt *pvt = mci->pvt_info; \
137 return sprintf(data, "%016llu\n", (u64)pvt->reg); \
138}
139
140#define TEMPLATE_STORE(reg) \
141static ssize_t octeon_mc_inject_##reg##_store(struct device *dev, \
142 struct device_attribute *attr, \
143 const char *data, size_t count) \
144{ \
145 struct mem_ctl_info *mci = to_mci(dev); \
146 struct octeon_lmc_pvt *pvt = mci->pvt_info; \
147 if (isdigit(*data)) { \
148 if (!kstrtoul(data, 0, &pvt->reg)) \
149 return count; \
150 } \
151 return 0; \
152}
153
154TEMPLATE_SHOW(inject);
155TEMPLATE_STORE(inject);
156TEMPLATE_SHOW(dimm);
157TEMPLATE_STORE(dimm);
158TEMPLATE_SHOW(bank);
159TEMPLATE_STORE(bank);
160TEMPLATE_SHOW(rank);
161TEMPLATE_STORE(rank);
162TEMPLATE_SHOW(row);
163TEMPLATE_STORE(row);
164TEMPLATE_SHOW(col);
165TEMPLATE_STORE(col);
166
167static ssize_t octeon_mc_inject_error_type_store(struct device *dev,
168 struct device_attribute *attr,
169 const char *data,
170 size_t count)
171{
172 struct mem_ctl_info *mci = to_mci(dev);
173 struct octeon_lmc_pvt *pvt = mci->pvt_info;
174
175 if (!strncmp(data, "single", 6))
176 pvt->error_type = 1;
177 else if (!strncmp(data, "double", 6))
178 pvt->error_type = 2;
179
180 return count;
181}
182
183static ssize_t octeon_mc_inject_error_type_show(struct device *dev,
184 struct device_attribute *attr,
185 char *data)
186{
187 struct mem_ctl_info *mci = to_mci(dev);
188 struct octeon_lmc_pvt *pvt = mci->pvt_info;
189 if (pvt->error_type == 1)
190 return sprintf(data, "single");
191 else if (pvt->error_type == 2)
192 return sprintf(data, "double");
193
194 return 0;
195}
196
197static DEVICE_ATTR(inject, S_IRUGO | S_IWUSR,
198 octeon_mc_inject_inject_show, octeon_mc_inject_inject_store);
199static DEVICE_ATTR(error_type, S_IRUGO | S_IWUSR,
200 octeon_mc_inject_error_type_show, octeon_mc_inject_error_type_store);
201static DEVICE_ATTR(dimm, S_IRUGO | S_IWUSR,
202 octeon_mc_inject_dimm_show, octeon_mc_inject_dimm_store);
203static DEVICE_ATTR(rank, S_IRUGO | S_IWUSR,
204 octeon_mc_inject_rank_show, octeon_mc_inject_rank_store);
205static DEVICE_ATTR(bank, S_IRUGO | S_IWUSR,
206 octeon_mc_inject_bank_show, octeon_mc_inject_bank_store);
207static DEVICE_ATTR(row, S_IRUGO | S_IWUSR,
208 octeon_mc_inject_row_show, octeon_mc_inject_row_store);
209static DEVICE_ATTR(col, S_IRUGO | S_IWUSR,
210 octeon_mc_inject_col_show, octeon_mc_inject_col_store);
211
Takashi Iwai1bf06a02015-02-04 11:48:57 +0100212static struct attribute *octeon_dev_attrs[] = {
213 &dev_attr_inject.attr,
214 &dev_attr_error_type.attr,
215 &dev_attr_dimm.attr,
216 &dev_attr_rank.attr,
217 &dev_attr_bank.attr,
218 &dev_attr_row.attr,
219 &dev_attr_col.attr,
220 NULL
221};
Daniel Walker1bc021e2013-09-20 15:46:41 -0700222
Takashi Iwai1bf06a02015-02-04 11:48:57 +0100223ATTRIBUTE_GROUPS(octeon_dev);
David Daneye1ced092012-11-15 13:58:59 -0800224
Greg Kroah-Hartman9b3c6e82012-12-21 13:23:51 -0800225static int octeon_lmc_edac_probe(struct platform_device *pdev)
Ralf Baechlef65aad42012-10-17 00:39:09 +0200226{
227 struct mem_ctl_info *mci;
David Daneye1ced092012-11-15 13:58:59 -0800228 struct edac_mc_layer layers[1];
229 int mc = pdev->id;
Ralf Baechlef65aad42012-10-17 00:39:09 +0200230
Daniel Walker5331de02013-09-20 15:46:40 -0700231 opstate_init();
232
David Daneye1ced092012-11-15 13:58:59 -0800233 layers[0].type = EDAC_MC_LAYER_CHANNEL;
234 layers[0].size = 1;
235 layers[0].is_virt_csrow = false;
Ralf Baechlef65aad42012-10-17 00:39:09 +0200236
Aaro Koskinen75a15a72015-07-01 13:38:52 +0300237 if (OCTEON_IS_OCTEON1PLUS()) {
David Daneye1ced092012-11-15 13:58:59 -0800238 union cvmx_lmcx_mem_cfg0 cfg0;
Ralf Baechlef65aad42012-10-17 00:39:09 +0200239
David Daneye1ced092012-11-15 13:58:59 -0800240 cfg0.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(0));
241 if (!cfg0.s.ecc_ena) {
242 dev_info(&pdev->dev, "Disabled (ECC not enabled)\n");
243 return 0;
244 }
Ralf Baechlef65aad42012-10-17 00:39:09 +0200245
Daniel Walker1bc021e2013-09-20 15:46:41 -0700246 mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, sizeof(struct octeon_lmc_pvt));
David Daneye1ced092012-11-15 13:58:59 -0800247 if (!mci)
248 return -ENXIO;
249
250 mci->pdev = &pdev->dev;
251 mci->dev_name = dev_name(&pdev->dev);
252
253 mci->mod_name = "octeon-lmc";
254 mci->ctl_name = "octeon-lmc-err";
255 mci->edac_check = octeon_lmc_edac_poll;
256
Takashi Iwai1bf06a02015-02-04 11:48:57 +0100257 if (edac_mc_add_mc_with_groups(mci, octeon_dev_groups)) {
David Daneye1ced092012-11-15 13:58:59 -0800258 dev_err(&pdev->dev, "edac_mc_add_mc() failed\n");
259 edac_mc_free(mci);
260 return -ENXIO;
261 }
262
263 cfg0.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(mc));
264 cfg0.s.intr_ded_ena = 0; /* We poll */
265 cfg0.s.intr_sec_ena = 0;
266 cvmx_write_csr(CVMX_LMCX_MEM_CFG0(mc), cfg0.u64);
267 } else {
268 /* OCTEON II */
269 union cvmx_lmcx_int_en en;
270 union cvmx_lmcx_config config;
271
272 config.u64 = cvmx_read_csr(CVMX_LMCX_CONFIG(0));
273 if (!config.s.ecc_ena) {
274 dev_info(&pdev->dev, "Disabled (ECC not enabled)\n");
275 return 0;
276 }
277
Daniel Walker1bc021e2013-09-20 15:46:41 -0700278 mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, sizeof(struct octeon_lmc_pvt));
David Daneye1ced092012-11-15 13:58:59 -0800279 if (!mci)
280 return -ENXIO;
281
282 mci->pdev = &pdev->dev;
283 mci->dev_name = dev_name(&pdev->dev);
284
285 mci->mod_name = "octeon-lmc";
286 mci->ctl_name = "co_lmc_err";
287 mci->edac_check = octeon_lmc_edac_poll_o2;
288
Takashi Iwai1bf06a02015-02-04 11:48:57 +0100289 if (edac_mc_add_mc_with_groups(mci, octeon_dev_groups)) {
David Daneye1ced092012-11-15 13:58:59 -0800290 dev_err(&pdev->dev, "edac_mc_add_mc() failed\n");
291 edac_mc_free(mci);
292 return -ENXIO;
293 }
294
295 en.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(mc));
296 en.s.intr_ded_ena = 0; /* We poll */
297 en.s.intr_sec_ena = 0;
298 cvmx_write_csr(CVMX_LMCX_MEM_CFG0(mc), en.u64);
Ralf Baechlef65aad42012-10-17 00:39:09 +0200299 }
David Daneye1ced092012-11-15 13:58:59 -0800300 platform_set_drvdata(pdev, mci);
Ralf Baechlef65aad42012-10-17 00:39:09 +0200301
302 return 0;
Ralf Baechlef65aad42012-10-17 00:39:09 +0200303}
304
David Daneye1ced092012-11-15 13:58:59 -0800305static int octeon_lmc_edac_remove(struct platform_device *pdev)
Ralf Baechlef65aad42012-10-17 00:39:09 +0200306{
307 struct mem_ctl_info *mci = platform_get_drvdata(pdev);
308
Ralf Baechlef65aad42012-10-17 00:39:09 +0200309 edac_mc_del_mc(&pdev->dev);
310 edac_mc_free(mci);
Ralf Baechlef65aad42012-10-17 00:39:09 +0200311 return 0;
312}
313
David Daneye1ced092012-11-15 13:58:59 -0800314static struct platform_driver octeon_lmc_edac_driver = {
315 .probe = octeon_lmc_edac_probe,
316 .remove = octeon_lmc_edac_remove,
Ralf Baechlef65aad42012-10-17 00:39:09 +0200317 .driver = {
David Daneye1ced092012-11-15 13:58:59 -0800318 .name = "octeon_lmc_edac",
Ralf Baechlef65aad42012-10-17 00:39:09 +0200319 }
320};
David Daneye1ced092012-11-15 13:58:59 -0800321module_platform_driver(octeon_lmc_edac_driver);
Ralf Baechlef65aad42012-10-17 00:39:09 +0200322
323MODULE_LICENSE("GPL");
324MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");