Commit | Line | Data |
---|---|---|
da9bb1d2 AC |
1 | /* |
2 | * edac_mc kernel module | |
49c0dab7 | 3 | * (C) 2005, 2006 Linux Networx (http://lnxi.com) |
da9bb1d2 AC |
4 | * This file may be distributed under the terms of the |
5 | * GNU General Public License. | |
6 | * | |
7 | * Written by Thayne Harbaugh | |
8 | * Based on work by Dan Hollis <goemon at anime dot net> and others. | |
9 | * http://www.anime.net/~goemon/linux-ecc/ | |
10 | * | |
11 | * Modified by Dave Peterson and Doug Thompson | |
12 | * | |
13 | */ | |
14 | ||
da9bb1d2 AC |
15 | #include <linux/module.h> |
16 | #include <linux/proc_fs.h> | |
17 | #include <linux/kernel.h> | |
18 | #include <linux/types.h> | |
19 | #include <linux/smp.h> | |
20 | #include <linux/init.h> | |
21 | #include <linux/sysctl.h> | |
22 | #include <linux/highmem.h> | |
23 | #include <linux/timer.h> | |
24 | #include <linux/slab.h> | |
25 | #include <linux/jiffies.h> | |
26 | #include <linux/spinlock.h> | |
27 | #include <linux/list.h> | |
da9bb1d2 | 28 | #include <linux/ctype.h> |
c0d12172 | 29 | #include <linux/edac.h> |
53f2d028 | 30 | #include <linux/bitops.h> |
7c0f6ba6 | 31 | #include <linux/uaccess.h> |
da9bb1d2 | 32 | #include <asm/page.h> |
78d88e8a | 33 | #include "edac_mc.h" |
7c9281d7 | 34 | #include "edac_module.h" |
53f2d028 MCC |
35 | #include <ras/ras_event.h> |
36 | ||
b01aec9b BP |
37 | #ifdef CONFIG_EDAC_ATOMIC_SCRUB |
38 | #include <asm/edac.h> | |
39 | #else | |
40 | #define edac_atomic_scrub(va, size) do { } while (0) | |
41 | #endif | |
42 | ||
8c22b4fe BP |
43 | int edac_op_state = EDAC_OPSTATE_INVAL; |
44 | EXPORT_SYMBOL_GPL(edac_op_state); | |
45 | ||
fee27d7d BP |
46 | static int edac_report = EDAC_REPORTING_ENABLED; |
47 | ||
da9bb1d2 | 48 | /* lock to memory controller's control array */ |
63b7df91 | 49 | static DEFINE_MUTEX(mem_ctls_mutex); |
ff6ac2a6 | 50 | static LIST_HEAD(mc_devices); |
da9bb1d2 | 51 | |
80cc7d87 MCC |
52 | /* |
53 | * Used to lock EDAC MC to just one module, avoiding two drivers e. g. | |
54 | * apei/ghes and i7core_edac to be used at the same time. | |
55 | */ | |
3877c7d1 | 56 | static const char *edac_mc_owner; |
80cc7d87 | 57 | |
91b327f6 RR |
58 | static struct mem_ctl_info *error_desc_to_mci(struct edac_raw_error_desc *e) |
59 | { | |
60 | return container_of(e, struct mem_ctl_info, error_desc); | |
61 | } | |
62 | ||
bffc7dec | 63 | int edac_get_report_status(void) |
fee27d7d BP |
64 | { |
65 | return edac_report; | |
66 | } | |
bffc7dec | 67 | EXPORT_SYMBOL_GPL(edac_get_report_status); |
fee27d7d | 68 | |
bffc7dec | 69 | void edac_set_report_status(int new) |
fee27d7d BP |
70 | { |
71 | if (new == EDAC_REPORTING_ENABLED || | |
72 | new == EDAC_REPORTING_DISABLED || | |
73 | new == EDAC_REPORTING_FORCE) | |
74 | edac_report = new; | |
75 | } | |
bffc7dec | 76 | EXPORT_SYMBOL_GPL(edac_set_report_status); |
fee27d7d BP |
77 | |
78 | static int edac_report_set(const char *str, const struct kernel_param *kp) | |
79 | { | |
80 | if (!str) | |
81 | return -EINVAL; | |
82 | ||
83 | if (!strncmp(str, "on", 2)) | |
84 | edac_report = EDAC_REPORTING_ENABLED; | |
85 | else if (!strncmp(str, "off", 3)) | |
86 | edac_report = EDAC_REPORTING_DISABLED; | |
87 | else if (!strncmp(str, "force", 5)) | |
88 | edac_report = EDAC_REPORTING_FORCE; | |
89 | ||
90 | return 0; | |
91 | } | |
92 | ||
93 | static int edac_report_get(char *buffer, const struct kernel_param *kp) | |
94 | { | |
95 | int ret = 0; | |
96 | ||
97 | switch (edac_report) { | |
98 | case EDAC_REPORTING_ENABLED: | |
99 | ret = sprintf(buffer, "on"); | |
100 | break; | |
101 | case EDAC_REPORTING_DISABLED: | |
102 | ret = sprintf(buffer, "off"); | |
103 | break; | |
104 | case EDAC_REPORTING_FORCE: | |
105 | ret = sprintf(buffer, "force"); | |
106 | break; | |
107 | default: | |
108 | ret = -EINVAL; | |
109 | break; | |
110 | } | |
111 | ||
112 | return ret; | |
113 | } | |
114 | ||
115 | static const struct kernel_param_ops edac_report_ops = { | |
116 | .set = edac_report_set, | |
117 | .get = edac_report_get, | |
118 | }; | |
119 | ||
120 | module_param_cb(edac_report, &edac_report_ops, &edac_report, 0644); | |
121 | ||
d55c79ac RR |
122 | unsigned int edac_dimm_info_location(struct dimm_info *dimm, char *buf, |
123 | unsigned int len) | |
6e84d359 MCC |
124 | { |
125 | struct mem_ctl_info *mci = dimm->mci; | |
126 | int i, n, count = 0; | |
127 | char *p = buf; | |
128 | ||
129 | for (i = 0; i < mci->n_layers; i++) { | |
130 | n = snprintf(p, len, "%s %d ", | |
131 | edac_layer_name[mci->layers[i].type], | |
132 | dimm->location[i]); | |
133 | p += n; | |
134 | len -= n; | |
135 | count += n; | |
136 | if (!len) | |
137 | break; | |
138 | } | |
139 | ||
140 | return count; | |
141 | } | |
142 | ||
da9bb1d2 AC |
143 | #ifdef CONFIG_EDAC_DEBUG |
144 | ||
a4b4be3f | 145 | static void edac_mc_dump_channel(struct rank_info *chan) |
da9bb1d2 | 146 | { |
6e84d359 MCC |
147 | edac_dbg(4, " channel->chan_idx = %d\n", chan->chan_idx); |
148 | edac_dbg(4, " channel = %p\n", chan); | |
149 | edac_dbg(4, " channel->csrow = %p\n", chan->csrow); | |
150 | edac_dbg(4, " channel->dimm = %p\n", chan->dimm); | |
4275be63 MCC |
151 | } |
152 | ||
c498afaf | 153 | static void edac_mc_dump_dimm(struct dimm_info *dimm) |
4275be63 | 154 | { |
6e84d359 MCC |
155 | char location[80]; |
156 | ||
c498afaf RR |
157 | if (!dimm->nr_pages) |
158 | return; | |
159 | ||
6e84d359 MCC |
160 | edac_dimm_info_location(dimm, location, sizeof(location)); |
161 | ||
162 | edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n", | |
9713faec | 163 | dimm->mci->csbased ? "rank" : "dimm", |
c498afaf | 164 | dimm->idx, location, dimm->csrow, dimm->cschannel); |
6e84d359 MCC |
165 | edac_dbg(4, " dimm = %p\n", dimm); |
166 | edac_dbg(4, " dimm->label = '%s'\n", dimm->label); | |
167 | edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages); | |
168 | edac_dbg(4, " dimm->grain = %d\n", dimm->grain); | |
169 | edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages); | |
da9bb1d2 AC |
170 | } |
171 | ||
2da1c119 | 172 | static void edac_mc_dump_csrow(struct csrow_info *csrow) |
da9bb1d2 | 173 | { |
6e84d359 MCC |
174 | edac_dbg(4, "csrow->csrow_idx = %d\n", csrow->csrow_idx); |
175 | edac_dbg(4, " csrow = %p\n", csrow); | |
176 | edac_dbg(4, " csrow->first_page = 0x%lx\n", csrow->first_page); | |
177 | edac_dbg(4, " csrow->last_page = 0x%lx\n", csrow->last_page); | |
178 | edac_dbg(4, " csrow->page_mask = 0x%lx\n", csrow->page_mask); | |
179 | edac_dbg(4, " csrow->nr_channels = %d\n", csrow->nr_channels); | |
180 | edac_dbg(4, " csrow->channels = %p\n", csrow->channels); | |
181 | edac_dbg(4, " csrow->mci = %p\n", csrow->mci); | |
da9bb1d2 AC |
182 | } |
183 | ||
2da1c119 | 184 | static void edac_mc_dump_mci(struct mem_ctl_info *mci) |
da9bb1d2 | 185 | { |
956b9ba1 JP |
186 | edac_dbg(3, "\tmci = %p\n", mci); |
187 | edac_dbg(3, "\tmci->mtype_cap = %lx\n", mci->mtype_cap); | |
188 | edac_dbg(3, "\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap); | |
189 | edac_dbg(3, "\tmci->edac_cap = %lx\n", mci->edac_cap); | |
190 | edac_dbg(4, "\tmci->edac_check = %p\n", mci->edac_check); | |
191 | edac_dbg(3, "\tmci->nr_csrows = %d, csrows = %p\n", | |
192 | mci->nr_csrows, mci->csrows); | |
193 | edac_dbg(3, "\tmci->nr_dimms = %d, dimms = %p\n", | |
194 | mci->tot_dimms, mci->dimms); | |
195 | edac_dbg(3, "\tdev = %p\n", mci->pdev); | |
196 | edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n", | |
197 | mci->mod_name, mci->ctl_name); | |
198 | edac_dbg(3, "\tpvt_info = %p\n\n", mci->pvt_info); | |
da9bb1d2 AC |
199 | } |
200 | ||
24f9a7fe BP |
201 | #endif /* CONFIG_EDAC_DEBUG */ |
202 | ||
f4ce6eca | 203 | const char * const edac_mem_types[] = { |
d6dd77eb TL |
204 | [MEM_EMPTY] = "Empty", |
205 | [MEM_RESERVED] = "Reserved", | |
206 | [MEM_UNKNOWN] = "Unknown", | |
207 | [MEM_FPM] = "FPM", | |
208 | [MEM_EDO] = "EDO", | |
209 | [MEM_BEDO] = "BEDO", | |
210 | [MEM_SDR] = "Unbuffered-SDR", | |
211 | [MEM_RDR] = "Registered-SDR", | |
212 | [MEM_DDR] = "Unbuffered-DDR", | |
213 | [MEM_RDDR] = "Registered-DDR", | |
214 | [MEM_RMBS] = "RMBS", | |
215 | [MEM_DDR2] = "Unbuffered-DDR2", | |
216 | [MEM_FB_DDR2] = "FullyBuffered-DDR2", | |
217 | [MEM_RDDR2] = "Registered-DDR2", | |
218 | [MEM_XDR] = "XDR", | |
219 | [MEM_DDR3] = "Unbuffered-DDR3", | |
220 | [MEM_RDDR3] = "Registered-DDR3", | |
221 | [MEM_LRDDR3] = "Load-Reduced-DDR3-RAM", | |
222 | [MEM_DDR4] = "Unbuffered-DDR4", | |
001f8613 | 223 | [MEM_RDDR4] = "Registered-DDR4", |
b748f2de | 224 | [MEM_LRDDR4] = "Load-Reduced-DDR4-RAM", |
001f8613 | 225 | [MEM_NVDIMM] = "Non-volatile-RAM", |
239642fe BP |
226 | }; |
227 | EXPORT_SYMBOL_GPL(edac_mem_types); | |
228 | ||
93e4fe64 MCC |
229 | /** |
230 | * edac_align_ptr - Prepares the pointer offsets for a single-shot allocation | |
231 | * @p: pointer to a pointer with the memory offset to be used. At | |
232 | * return, this will be incremented to point to the next offset | |
233 | * @size: Size of the data structure to be reserved | |
234 | * @n_elems: Number of elements that should be reserved | |
da9bb1d2 AC |
235 | * |
236 | * If 'size' is a constant, the compiler will optimize this whole function | |
93e4fe64 MCC |
237 | * down to either a no-op or the addition of a constant to the value of '*p'. |
238 | * | |
239 | * The 'p' pointer is absolutely needed to keep the proper advancing | |
240 | * further in memory to the proper offsets when allocating the struct along | |
241 | * with its embedded structs, as edac_device_alloc_ctl_info() does it | |
242 | * above, for example. | |
243 | * | |
244 | * At return, the pointer 'p' will be incremented to be used on a next call | |
245 | * to this function. | |
da9bb1d2 | 246 | */ |
d55c79ac | 247 | void *edac_align_ptr(void **p, unsigned int size, int n_elems) |
da9bb1d2 | 248 | { |
d55c79ac | 249 | unsigned int align, r; |
93e4fe64 | 250 | void *ptr = *p; |
da9bb1d2 | 251 | |
93e4fe64 MCC |
252 | *p += size * n_elems; |
253 | ||
254 | /* | |
255 | * 'p' can possibly be an unaligned item X such that sizeof(X) is | |
256 | * 'size'. Adjust 'p' so that its alignment is at least as | |
257 | * stringent as what the compiler would provide for X and return | |
258 | * the aligned result. | |
259 | * Here we assume that the alignment of a "long long" is the most | |
da9bb1d2 AC |
260 | * stringent alignment that the compiler will ever provide by default. |
261 | * As far as I know, this is a reasonable assumption. | |
262 | */ | |
263 | if (size > sizeof(long)) | |
264 | align = sizeof(long long); | |
265 | else if (size > sizeof(int)) | |
266 | align = sizeof(long); | |
267 | else if (size > sizeof(short)) | |
268 | align = sizeof(int); | |
269 | else if (size > sizeof(char)) | |
270 | align = sizeof(short); | |
271 | else | |
079708b9 | 272 | return (char *)ptr; |
da9bb1d2 | 273 | |
8447c4d1 | 274 | r = (unsigned long)p % align; |
da9bb1d2 AC |
275 | |
276 | if (r == 0) | |
079708b9 | 277 | return (char *)ptr; |
da9bb1d2 | 278 | |
93e4fe64 MCC |
279 | *p += align - r; |
280 | ||
7391c6dc | 281 | return (void *)(((unsigned long)ptr) + align - r); |
da9bb1d2 AC |
282 | } |
283 | ||
faa2ad09 SR |
284 | static void _edac_mc_free(struct mem_ctl_info *mci) |
285 | { | |
bea1bfd5 RR |
286 | put_device(&mci->dev); |
287 | } | |
288 | ||
289 | static void mci_release(struct device *dev) | |
290 | { | |
291 | struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev); | |
faa2ad09 | 292 | struct csrow_info *csr; |
718d5851 | 293 | int i, chn, row; |
faa2ad09 SR |
294 | |
295 | if (mci->dimms) { | |
718d5851 | 296 | for (i = 0; i < mci->tot_dimms; i++) |
faa2ad09 SR |
297 | kfree(mci->dimms[i]); |
298 | kfree(mci->dimms); | |
299 | } | |
718d5851 | 300 | |
faa2ad09 | 301 | if (mci->csrows) { |
718d5851 | 302 | for (row = 0; row < mci->nr_csrows; row++) { |
faa2ad09 | 303 | csr = mci->csrows[row]; |
718d5851 RR |
304 | if (!csr) |
305 | continue; | |
306 | ||
307 | if (csr->channels) { | |
308 | for (chn = 0; chn < mci->num_cschannel; chn++) | |
309 | kfree(csr->channels[chn]); | |
310 | kfree(csr->channels); | |
faa2ad09 | 311 | } |
718d5851 | 312 | kfree(csr); |
faa2ad09 SR |
313 | } |
314 | kfree(mci->csrows); | |
315 | } | |
316 | kfree(mci); | |
317 | } | |
318 | ||
aad28c6f RR |
319 | static int edac_mc_alloc_csrows(struct mem_ctl_info *mci) |
320 | { | |
321 | unsigned int tot_channels = mci->num_cschannel; | |
322 | unsigned int tot_csrows = mci->nr_csrows; | |
323 | unsigned int row, chn; | |
324 | ||
a7d7d2e1 | 325 | /* |
de3910eb | 326 | * Alocate and fill the csrow/channels structs |
a7d7d2e1 | 327 | */ |
d3d09e18 | 328 | mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL); |
de3910eb | 329 | if (!mci->csrows) |
aad28c6f RR |
330 | return -ENOMEM; |
331 | ||
4275be63 | 332 | for (row = 0; row < tot_csrows; row++) { |
aad28c6f RR |
333 | struct csrow_info *csr; |
334 | ||
de3910eb MCC |
335 | csr = kzalloc(sizeof(**mci->csrows), GFP_KERNEL); |
336 | if (!csr) | |
aad28c6f RR |
337 | return -ENOMEM; |
338 | ||
de3910eb | 339 | mci->csrows[row] = csr; |
4275be63 MCC |
340 | csr->csrow_idx = row; |
341 | csr->mci = mci; | |
342 | csr->nr_channels = tot_channels; | |
d3d09e18 | 343 | csr->channels = kcalloc(tot_channels, sizeof(*csr->channels), |
de3910eb MCC |
344 | GFP_KERNEL); |
345 | if (!csr->channels) | |
aad28c6f | 346 | return -ENOMEM; |
4275be63 MCC |
347 | |
348 | for (chn = 0; chn < tot_channels; chn++) { | |
aad28c6f RR |
349 | struct rank_info *chan; |
350 | ||
de3910eb MCC |
351 | chan = kzalloc(sizeof(**csr->channels), GFP_KERNEL); |
352 | if (!chan) | |
aad28c6f RR |
353 | return -ENOMEM; |
354 | ||
de3910eb | 355 | csr->channels[chn] = chan; |
da9bb1d2 | 356 | chan->chan_idx = chn; |
4275be63 MCC |
357 | chan->csrow = csr; |
358 | } | |
359 | } | |
360 | ||
aad28c6f RR |
361 | return 0; |
362 | } | |
363 | ||
364 | static int edac_mc_alloc_dimms(struct mem_ctl_info *mci) | |
365 | { | |
366 | unsigned int pos[EDAC_MAX_LAYERS]; | |
367 | unsigned int row, chn, idx; | |
368 | int layer; | |
369 | void *p; | |
370 | ||
4275be63 | 371 | /* |
de3910eb | 372 | * Allocate and fill the dimm structs |
4275be63 | 373 | */ |
aad28c6f | 374 | mci->dimms = kcalloc(mci->tot_dimms, sizeof(*mci->dimms), GFP_KERNEL); |
de3910eb | 375 | if (!mci->dimms) |
aad28c6f | 376 | return -ENOMEM; |
de3910eb | 377 | |
4275be63 MCC |
378 | memset(&pos, 0, sizeof(pos)); |
379 | row = 0; | |
380 | chn = 0; | |
aad28c6f RR |
381 | for (idx = 0; idx < mci->tot_dimms; idx++) { |
382 | struct dimm_info *dimm; | |
383 | struct rank_info *chan; | |
384 | int n, len; | |
385 | ||
de3910eb | 386 | chan = mci->csrows[row]->channels[chn]; |
4275be63 | 387 | |
de3910eb | 388 | dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL); |
08a4a136 | 389 | if (!dimm) |
aad28c6f | 390 | return -ENOMEM; |
977b1ce7 | 391 | mci->dimms[idx] = dimm; |
4275be63 | 392 | dimm->mci = mci; |
977b1ce7 | 393 | dimm->idx = idx; |
4275be63 | 394 | |
5926ff50 MCC |
395 | /* |
396 | * Copy DIMM location and initialize it. | |
397 | */ | |
398 | len = sizeof(dimm->label); | |
399 | p = dimm->label; | |
aad28c6f | 400 | n = snprintf(p, len, "mc#%u", mci->mc_idx); |
5926ff50 MCC |
401 | p += n; |
402 | len -= n; | |
aad28c6f | 403 | for (layer = 0; layer < mci->n_layers; layer++) { |
5926ff50 | 404 | n = snprintf(p, len, "%s#%u", |
aad28c6f RR |
405 | edac_layer_name[mci->layers[layer].type], |
406 | pos[layer]); | |
5926ff50 MCC |
407 | p += n; |
408 | len -= n; | |
aad28c6f | 409 | dimm->location[layer] = pos[layer]; |
4275be63 | 410 | |
5926ff50 MCC |
411 | if (len <= 0) |
412 | break; | |
413 | } | |
414 | ||
4275be63 MCC |
415 | /* Link it to the csrows old API data */ |
416 | chan->dimm = dimm; | |
417 | dimm->csrow = row; | |
418 | dimm->cschannel = chn; | |
419 | ||
420 | /* Increment csrow location */ | |
aad28c6f | 421 | if (mci->layers[0].is_virt_csrow) { |
4275be63 | 422 | chn++; |
aad28c6f | 423 | if (chn == mci->num_cschannel) { |
24bef66e MCC |
424 | chn = 0; |
425 | row++; | |
426 | } | |
427 | } else { | |
428 | row++; | |
aad28c6f | 429 | if (row == mci->nr_csrows) { |
24bef66e MCC |
430 | row = 0; |
431 | chn++; | |
432 | } | |
4275be63 | 433 | } |
a7d7d2e1 | 434 | |
4275be63 | 435 | /* Increment dimm location */ |
aad28c6f RR |
436 | for (layer = mci->n_layers - 1; layer >= 0; layer--) { |
437 | pos[layer]++; | |
438 | if (pos[layer] < mci->layers[layer].size) | |
4275be63 | 439 | break; |
aad28c6f | 440 | pos[layer] = 0; |
da9bb1d2 AC |
441 | } |
442 | } | |
443 | ||
aad28c6f | 444 | return 0; |
4275be63 | 445 | } |
da9bb1d2 | 446 | |
1f27c790 RR |
447 | struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num, |
448 | unsigned int n_layers, | |
449 | struct edac_mc_layer *layers, | |
450 | unsigned int sz_pvt) | |
451 | { | |
452 | struct mem_ctl_info *mci; | |
453 | struct edac_mc_layer *layer; | |
454 | u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS]; | |
455 | unsigned int idx, size, tot_dimms = 1, count = 1; | |
456 | unsigned int tot_csrows = 1, tot_channels = 1, tot_errcount = 0; | |
457 | void *pvt, *ptr = NULL; | |
458 | int i; | |
459 | bool per_rank = false; | |
460 | ||
461 | if (WARN_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0)) | |
462 | return NULL; | |
463 | ||
464 | /* | |
465 | * Calculate the total amount of dimms and csrows/cschannels while | |
466 | * in the old API emulation mode | |
467 | */ | |
468 | for (idx = 0; idx < n_layers; idx++) { | |
469 | tot_dimms *= layers[idx].size; | |
470 | ||
471 | if (layers[idx].is_virt_csrow) | |
472 | tot_csrows *= layers[idx].size; | |
473 | else | |
474 | tot_channels *= layers[idx].size; | |
475 | ||
476 | if (layers[idx].type == EDAC_MC_LAYER_CHIP_SELECT) | |
477 | per_rank = true; | |
478 | } | |
479 | ||
480 | /* Figure out the offsets of the various items from the start of an mc | |
481 | * structure. We want the alignment of each item to be at least as | |
482 | * stringent as what the compiler would provide if we could simply | |
483 | * hardcode everything into a single struct. | |
484 | */ | |
485 | mci = edac_align_ptr(&ptr, sizeof(*mci), 1); | |
486 | layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers); | |
487 | for (i = 0; i < n_layers; i++) { | |
488 | count *= layers[i].size; | |
489 | edac_dbg(4, "errcount layer %d size %d\n", i, count); | |
490 | ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count); | |
491 | ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count); | |
492 | tot_errcount += 2 * count; | |
493 | } | |
494 | ||
495 | edac_dbg(4, "allocating %d error counters\n", tot_errcount); | |
496 | pvt = edac_align_ptr(&ptr, sz_pvt, 1); | |
497 | size = ((unsigned long)pvt) + sz_pvt; | |
498 | ||
499 | edac_dbg(1, "allocating %u bytes for mci data (%d %s, %d csrows/channels)\n", | |
500 | size, | |
501 | tot_dimms, | |
502 | per_rank ? "ranks" : "dimms", | |
503 | tot_csrows * tot_channels); | |
504 | ||
505 | mci = kzalloc(size, GFP_KERNEL); | |
506 | if (mci == NULL) | |
507 | return NULL; | |
508 | ||
509 | mci->dev.release = mci_release; | |
510 | device_initialize(&mci->dev); | |
511 | ||
512 | /* Adjust pointers so they point within the memory we just allocated | |
513 | * rather than an imaginary chunk of memory located at address 0. | |
514 | */ | |
515 | layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer)); | |
516 | for (i = 0; i < n_layers; i++) { | |
517 | mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i])); | |
518 | mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i])); | |
519 | } | |
520 | pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL; | |
521 | ||
522 | /* setup index and various internal pointers */ | |
523 | mci->mc_idx = mc_num; | |
524 | mci->tot_dimms = tot_dimms; | |
525 | mci->pvt_info = pvt; | |
526 | mci->n_layers = n_layers; | |
527 | mci->layers = layer; | |
528 | memcpy(mci->layers, layers, sizeof(*layer) * n_layers); | |
529 | mci->nr_csrows = tot_csrows; | |
530 | mci->num_cschannel = tot_channels; | |
531 | mci->csbased = per_rank; | |
532 | ||
533 | if (edac_mc_alloc_csrows(mci)) | |
534 | goto error; | |
535 | ||
536 | if (edac_mc_alloc_dimms(mci)) | |
537 | goto error; | |
538 | ||
539 | mci->op_state = OP_ALLOC; | |
540 | ||
541 | return mci; | |
542 | ||
543 | error: | |
544 | _edac_mc_free(mci); | |
545 | ||
546 | return NULL; | |
547 | } | |
548 | EXPORT_SYMBOL_GPL(edac_mc_alloc); | |
549 | ||
da9bb1d2 AC |
550 | void edac_mc_free(struct mem_ctl_info *mci) |
551 | { | |
956b9ba1 | 552 | edac_dbg(1, "\n"); |
bbc560ae | 553 | |
216aa145 | 554 | _edac_mc_free(mci); |
da9bb1d2 | 555 | } |
9110540f | 556 | EXPORT_SYMBOL_GPL(edac_mc_free); |
da9bb1d2 | 557 | |
d7fc9d77 YG |
558 | bool edac_has_mcs(void) |
559 | { | |
560 | bool ret; | |
561 | ||
562 | mutex_lock(&mem_ctls_mutex); | |
563 | ||
564 | ret = list_empty(&mc_devices); | |
565 | ||
566 | mutex_unlock(&mem_ctls_mutex); | |
567 | ||
568 | return !ret; | |
569 | } | |
570 | EXPORT_SYMBOL_GPL(edac_has_mcs); | |
571 | ||
c73e8833 BP |
572 | /* Caller must hold mem_ctls_mutex */ |
573 | static struct mem_ctl_info *__find_mci_by_dev(struct device *dev) | |
da9bb1d2 AC |
574 | { |
575 | struct mem_ctl_info *mci; | |
576 | struct list_head *item; | |
577 | ||
956b9ba1 | 578 | edac_dbg(3, "\n"); |
da9bb1d2 AC |
579 | |
580 | list_for_each(item, &mc_devices) { | |
581 | mci = list_entry(item, struct mem_ctl_info, link); | |
582 | ||
fd687502 | 583 | if (mci->pdev == dev) |
da9bb1d2 AC |
584 | return mci; |
585 | } | |
586 | ||
587 | return NULL; | |
588 | } | |
c73e8833 BP |
589 | |
590 | /** | |
591 | * find_mci_by_dev | |
592 | * | |
593 | * scan list of controllers looking for the one that manages | |
594 | * the 'dev' device | |
595 | * @dev: pointer to a struct device related with the MCI | |
596 | */ | |
597 | struct mem_ctl_info *find_mci_by_dev(struct device *dev) | |
598 | { | |
599 | struct mem_ctl_info *ret; | |
600 | ||
601 | mutex_lock(&mem_ctls_mutex); | |
602 | ret = __find_mci_by_dev(dev); | |
603 | mutex_unlock(&mem_ctls_mutex); | |
604 | ||
605 | return ret; | |
606 | } | |
939747bd | 607 | EXPORT_SYMBOL_GPL(find_mci_by_dev); |
da9bb1d2 | 608 | |
81d87cb1 DJ |
609 | /* |
610 | * edac_mc_workq_function | |
611 | * performs the operation scheduled by a workq request | |
612 | */ | |
81d87cb1 DJ |
613 | static void edac_mc_workq_function(struct work_struct *work_req) |
614 | { | |
fbeb4384 | 615 | struct delayed_work *d_work = to_delayed_work(work_req); |
81d87cb1 | 616 | struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work); |
81d87cb1 DJ |
617 | |
618 | mutex_lock(&mem_ctls_mutex); | |
619 | ||
06e912d4 | 620 | if (mci->op_state != OP_RUNNING_POLL) { |
bf52fa4a DT |
621 | mutex_unlock(&mem_ctls_mutex); |
622 | return; | |
623 | } | |
624 | ||
d3116a08 | 625 | if (edac_op_state == EDAC_OPSTATE_POLL) |
81d87cb1 DJ |
626 | mci->edac_check(mci); |
627 | ||
81d87cb1 DJ |
628 | mutex_unlock(&mem_ctls_mutex); |
629 | ||
06e912d4 | 630 | /* Queue ourselves again. */ |
c4cf3b45 | 631 | edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec())); |
81d87cb1 DJ |
632 | } |
633 | ||
81d87cb1 | 634 | /* |
bce19683 DT |
635 | * edac_mc_reset_delay_period(unsigned long value) |
636 | * | |
637 | * user space has updated our poll period value, need to | |
638 | * reset our workq delays | |
81d87cb1 | 639 | */ |
9da21b15 | 640 | void edac_mc_reset_delay_period(unsigned long value) |
81d87cb1 | 641 | { |
bce19683 DT |
642 | struct mem_ctl_info *mci; |
643 | struct list_head *item; | |
644 | ||
645 | mutex_lock(&mem_ctls_mutex); | |
646 | ||
bce19683 DT |
647 | list_for_each(item, &mc_devices) { |
648 | mci = list_entry(item, struct mem_ctl_info, link); | |
649 | ||
fbedcaf4 NK |
650 | if (mci->op_state == OP_RUNNING_POLL) |
651 | edac_mod_work(&mci->work, value); | |
bce19683 | 652 | } |
81d87cb1 DJ |
653 | mutex_unlock(&mem_ctls_mutex); |
654 | } | |
655 | ||
bce19683 DT |
656 | |
657 | ||
2d7bbb91 DT |
658 | /* Return 0 on success, 1 on failure. |
659 | * Before calling this function, caller must | |
660 | * assign a unique value to mci->mc_idx. | |
bf52fa4a DT |
661 | * |
662 | * locking model: | |
663 | * | |
664 | * called with the mem_ctls_mutex lock held | |
2d7bbb91 | 665 | */ |
079708b9 | 666 | static int add_mc_to_global_list(struct mem_ctl_info *mci) |
da9bb1d2 AC |
667 | { |
668 | struct list_head *item, *insert_before; | |
669 | struct mem_ctl_info *p; | |
da9bb1d2 | 670 | |
2d7bbb91 | 671 | insert_before = &mc_devices; |
da9bb1d2 | 672 | |
c73e8833 | 673 | p = __find_mci_by_dev(mci->pdev); |
bf52fa4a | 674 | if (unlikely(p != NULL)) |
2d7bbb91 | 675 | goto fail0; |
da9bb1d2 | 676 | |
2d7bbb91 DT |
677 | list_for_each(item, &mc_devices) { |
678 | p = list_entry(item, struct mem_ctl_info, link); | |
da9bb1d2 | 679 | |
2d7bbb91 DT |
680 | if (p->mc_idx >= mci->mc_idx) { |
681 | if (unlikely(p->mc_idx == mci->mc_idx)) | |
682 | goto fail1; | |
da9bb1d2 | 683 | |
2d7bbb91 DT |
684 | insert_before = item; |
685 | break; | |
da9bb1d2 | 686 | } |
da9bb1d2 AC |
687 | } |
688 | ||
689 | list_add_tail_rcu(&mci->link, insert_before); | |
690 | return 0; | |
2d7bbb91 | 691 | |
052dfb45 | 692 | fail0: |
2d7bbb91 | 693 | edac_printk(KERN_WARNING, EDAC_MC, |
fd687502 | 694 | "%s (%s) %s %s already assigned %d\n", dev_name(p->pdev), |
17aa7e03 | 695 | edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx); |
2d7bbb91 DT |
696 | return 1; |
697 | ||
052dfb45 | 698 | fail1: |
2d7bbb91 | 699 | edac_printk(KERN_WARNING, EDAC_MC, |
052dfb45 DT |
700 | "bug in low-level driver: attempt to assign\n" |
701 | " duplicate mc_idx %d in %s()\n", p->mc_idx, __func__); | |
2d7bbb91 | 702 | return 1; |
da9bb1d2 AC |
703 | } |
704 | ||
80cc7d87 | 705 | static int del_mc_from_global_list(struct mem_ctl_info *mci) |
a1d03fcc DP |
706 | { |
707 | list_del_rcu(&mci->link); | |
e2e77098 LJ |
708 | |
709 | /* these are for safe removal of devices from global list while | |
710 | * NMI handlers may be traversing list | |
711 | */ | |
712 | synchronize_rcu(); | |
713 | INIT_LIST_HEAD(&mci->link); | |
80cc7d87 | 714 | |
97bb6c17 | 715 | return list_empty(&mc_devices); |
a1d03fcc DP |
716 | } |
717 | ||
079708b9 | 718 | struct mem_ctl_info *edac_mc_find(int idx) |
5da0831c | 719 | { |
29a0c843 | 720 | struct mem_ctl_info *mci; |
5da0831c | 721 | struct list_head *item; |
c73e8833 BP |
722 | |
723 | mutex_lock(&mem_ctls_mutex); | |
5da0831c DT |
724 | |
725 | list_for_each(item, &mc_devices) { | |
726 | mci = list_entry(item, struct mem_ctl_info, link); | |
29a0c843 RR |
727 | if (mci->mc_idx == idx) |
728 | goto unlock; | |
5da0831c DT |
729 | } |
730 | ||
29a0c843 | 731 | mci = NULL; |
c73e8833 BP |
732 | unlock: |
733 | mutex_unlock(&mem_ctls_mutex); | |
734 | return mci; | |
5da0831c DT |
735 | } |
736 | EXPORT_SYMBOL(edac_mc_find); | |
737 | ||
3877c7d1 TK |
738 | const char *edac_get_owner(void) |
739 | { | |
740 | return edac_mc_owner; | |
741 | } | |
742 | EXPORT_SYMBOL_GPL(edac_get_owner); | |
da9bb1d2 AC |
743 | |
744 | /* FIXME - should a warning be printed if no error detection? correction? */ | |
4e8d230d TI |
745 | int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci, |
746 | const struct attribute_group **groups) | |
da9bb1d2 | 747 | { |
80cc7d87 | 748 | int ret = -EINVAL; |
956b9ba1 | 749 | edac_dbg(0, "\n"); |
b8f6f975 | 750 | |
da9bb1d2 AC |
751 | #ifdef CONFIG_EDAC_DEBUG |
752 | if (edac_debug_level >= 3) | |
753 | edac_mc_dump_mci(mci); | |
e7ecd891 | 754 | |
da9bb1d2 | 755 | if (edac_debug_level >= 4) { |
c498afaf | 756 | struct dimm_info *dimm; |
da9bb1d2 AC |
757 | int i; |
758 | ||
759 | for (i = 0; i < mci->nr_csrows; i++) { | |
6e84d359 MCC |
760 | struct csrow_info *csrow = mci->csrows[i]; |
761 | u32 nr_pages = 0; | |
da9bb1d2 | 762 | int j; |
e7ecd891 | 763 | |
6e84d359 MCC |
764 | for (j = 0; j < csrow->nr_channels; j++) |
765 | nr_pages += csrow->channels[j]->dimm->nr_pages; | |
766 | if (!nr_pages) | |
767 | continue; | |
768 | edac_mc_dump_csrow(csrow); | |
769 | for (j = 0; j < csrow->nr_channels; j++) | |
770 | if (csrow->channels[j]->dimm->nr_pages) | |
771 | edac_mc_dump_channel(csrow->channels[j]); | |
da9bb1d2 | 772 | } |
c498afaf RR |
773 | |
774 | mci_for_each_dimm(mci, dimm) | |
775 | edac_mc_dump_dimm(dimm); | |
da9bb1d2 AC |
776 | } |
777 | #endif | |
63b7df91 | 778 | mutex_lock(&mem_ctls_mutex); |
da9bb1d2 | 779 | |
80cc7d87 MCC |
780 | if (edac_mc_owner && edac_mc_owner != mci->mod_name) { |
781 | ret = -EPERM; | |
782 | goto fail0; | |
783 | } | |
784 | ||
da9bb1d2 | 785 | if (add_mc_to_global_list(mci)) |
028a7b6d | 786 | goto fail0; |
da9bb1d2 AC |
787 | |
788 | /* set load time so that error rate can be tracked */ | |
789 | mci->start_time = jiffies; | |
790 | ||
861e6ed6 | 791 | mci->bus = edac_get_sysfs_subsys(); |
88d84ac9 | 792 | |
4e8d230d | 793 | if (edac_create_sysfs_mci_device(mci, groups)) { |
9794f33d | 794 | edac_mc_printk(mci, KERN_WARNING, |
052dfb45 | 795 | "failed to create sysfs device\n"); |
9794f33d | 796 | goto fail1; |
797 | } | |
da9bb1d2 | 798 | |
09667606 | 799 | if (mci->edac_check) { |
81d87cb1 DJ |
800 | mci->op_state = OP_RUNNING_POLL; |
801 | ||
626a7a4d BP |
802 | INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function); |
803 | edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec())); | |
804 | ||
81d87cb1 DJ |
805 | } else { |
806 | mci->op_state = OP_RUNNING_INTERRUPT; | |
807 | } | |
808 | ||
da9bb1d2 | 809 | /* Report action taken */ |
7270a608 RR |
810 | edac_mc_printk(mci, KERN_INFO, |
811 | "Giving out device to module %s controller %s: DEV %s (%s)\n", | |
812 | mci->mod_name, mci->ctl_name, mci->dev_name, | |
813 | edac_op_state_to_string(mci->op_state)); | |
da9bb1d2 | 814 | |
80cc7d87 MCC |
815 | edac_mc_owner = mci->mod_name; |
816 | ||
63b7df91 | 817 | mutex_unlock(&mem_ctls_mutex); |
028a7b6d | 818 | return 0; |
da9bb1d2 | 819 | |
052dfb45 | 820 | fail1: |
028a7b6d DP |
821 | del_mc_from_global_list(mci); |
822 | ||
052dfb45 | 823 | fail0: |
63b7df91 | 824 | mutex_unlock(&mem_ctls_mutex); |
80cc7d87 | 825 | return ret; |
da9bb1d2 | 826 | } |
4e8d230d | 827 | EXPORT_SYMBOL_GPL(edac_mc_add_mc_with_groups); |
da9bb1d2 | 828 | |
079708b9 | 829 | struct mem_ctl_info *edac_mc_del_mc(struct device *dev) |
da9bb1d2 | 830 | { |
18dbc337 | 831 | struct mem_ctl_info *mci; |
da9bb1d2 | 832 | |
956b9ba1 | 833 | edac_dbg(0, "\n"); |
bf52fa4a | 834 | |
63b7df91 | 835 | mutex_lock(&mem_ctls_mutex); |
18dbc337 | 836 | |
bf52fa4a | 837 | /* find the requested mci struct in the global list */ |
c73e8833 | 838 | mci = __find_mci_by_dev(dev); |
bf52fa4a | 839 | if (mci == NULL) { |
63b7df91 | 840 | mutex_unlock(&mem_ctls_mutex); |
18dbc337 DP |
841 | return NULL; |
842 | } | |
843 | ||
09667606 BP |
844 | /* mark MCI offline: */ |
845 | mci->op_state = OP_OFFLINE; | |
846 | ||
97bb6c17 | 847 | if (del_mc_from_global_list(mci)) |
80cc7d87 | 848 | edac_mc_owner = NULL; |
bf52fa4a | 849 | |
09667606 | 850 | mutex_unlock(&mem_ctls_mutex); |
bb31b312 | 851 | |
09667606 | 852 | if (mci->edac_check) |
626a7a4d | 853 | edac_stop_work(&mci->work); |
bb31b312 BP |
854 | |
855 | /* remove from sysfs */ | |
bf52fa4a DT |
856 | edac_remove_sysfs_mci_device(mci); |
857 | ||
537fba28 | 858 | edac_printk(KERN_INFO, EDAC_MC, |
052dfb45 | 859 | "Removed device %d for %s %s: DEV %s\n", mci->mc_idx, |
17aa7e03 | 860 | mci->mod_name, mci->ctl_name, edac_dev_name(mci)); |
bf52fa4a | 861 | |
18dbc337 | 862 | return mci; |
da9bb1d2 | 863 | } |
9110540f | 864 | EXPORT_SYMBOL_GPL(edac_mc_del_mc); |
da9bb1d2 | 865 | |
2da1c119 AB |
866 | static void edac_mc_scrub_block(unsigned long page, unsigned long offset, |
867 | u32 size) | |
da9bb1d2 AC |
868 | { |
869 | struct page *pg; | |
870 | void *virt_addr; | |
871 | unsigned long flags = 0; | |
872 | ||
956b9ba1 | 873 | edac_dbg(3, "\n"); |
da9bb1d2 AC |
874 | |
875 | /* ECC error page was not in our memory. Ignore it. */ | |
079708b9 | 876 | if (!pfn_valid(page)) |
da9bb1d2 AC |
877 | return; |
878 | ||
879 | /* Find the actual page structure then map it and fix */ | |
880 | pg = pfn_to_page(page); | |
881 | ||
882 | if (PageHighMem(pg)) | |
883 | local_irq_save(flags); | |
884 | ||
4e5df7ca | 885 | virt_addr = kmap_atomic(pg); |
da9bb1d2 AC |
886 | |
887 | /* Perform architecture specific atomic scrub operation */ | |
b01aec9b | 888 | edac_atomic_scrub(virt_addr + offset, size); |
da9bb1d2 AC |
889 | |
890 | /* Unmap and complete */ | |
4e5df7ca | 891 | kunmap_atomic(virt_addr); |
da9bb1d2 AC |
892 | |
893 | if (PageHighMem(pg)) | |
894 | local_irq_restore(flags); | |
895 | } | |
896 | ||
da9bb1d2 | 897 | /* FIXME - should return -1 */ |
e7ecd891 | 898 | int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page) |
da9bb1d2 | 899 | { |
de3910eb | 900 | struct csrow_info **csrows = mci->csrows; |
a895bf8b | 901 | int row, i, j, n; |
da9bb1d2 | 902 | |
956b9ba1 | 903 | edac_dbg(1, "MC%d: 0x%lx\n", mci->mc_idx, page); |
da9bb1d2 AC |
904 | row = -1; |
905 | ||
906 | for (i = 0; i < mci->nr_csrows; i++) { | |
de3910eb | 907 | struct csrow_info *csrow = csrows[i]; |
a895bf8b MCC |
908 | n = 0; |
909 | for (j = 0; j < csrow->nr_channels; j++) { | |
de3910eb | 910 | struct dimm_info *dimm = csrow->channels[j]->dimm; |
a895bf8b MCC |
911 | n += dimm->nr_pages; |
912 | } | |
913 | if (n == 0) | |
da9bb1d2 AC |
914 | continue; |
915 | ||
956b9ba1 JP |
916 | edac_dbg(3, "MC%d: first(0x%lx) page(0x%lx) last(0x%lx) mask(0x%lx)\n", |
917 | mci->mc_idx, | |
918 | csrow->first_page, page, csrow->last_page, | |
919 | csrow->page_mask); | |
da9bb1d2 AC |
920 | |
921 | if ((page >= csrow->first_page) && | |
922 | (page <= csrow->last_page) && | |
923 | ((page & csrow->page_mask) == | |
924 | (csrow->first_page & csrow->page_mask))) { | |
925 | row = i; | |
926 | break; | |
927 | } | |
928 | } | |
929 | ||
930 | if (row == -1) | |
537fba28 | 931 | edac_mc_printk(mci, KERN_ERR, |
052dfb45 DT |
932 | "could not look up page error address %lx\n", |
933 | (unsigned long)page); | |
da9bb1d2 AC |
934 | |
935 | return row; | |
936 | } | |
9110540f | 937 | EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page); |
da9bb1d2 | 938 | |
4275be63 MCC |
939 | const char *edac_layer_name[] = { |
940 | [EDAC_MC_LAYER_BRANCH] = "branch", | |
941 | [EDAC_MC_LAYER_CHANNEL] = "channel", | |
942 | [EDAC_MC_LAYER_SLOT] = "slot", | |
943 | [EDAC_MC_LAYER_CHIP_SELECT] = "csrow", | |
c66b5a79 | 944 | [EDAC_MC_LAYER_ALL_MEM] = "memory", |
4275be63 MCC |
945 | }; |
946 | EXPORT_SYMBOL_GPL(edac_layer_name); | |
947 | ||
6ab76179 | 948 | static void edac_inc_ce_error(struct edac_raw_error_desc *e) |
da9bb1d2 | 949 | { |
6ab76179 RR |
950 | int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer }; |
951 | struct mem_ctl_info *mci = error_desc_to_mci(e); | |
4275be63 | 952 | int i, index = 0; |
da9bb1d2 | 953 | |
6ab76179 | 954 | mci->ce_mc += e->error_count; |
da9bb1d2 | 955 | |
67792cf9 | 956 | if (pos[0] < 0) { |
6ab76179 | 957 | mci->ce_noinfo_count += e->error_count; |
da9bb1d2 AC |
958 | return; |
959 | } | |
e7ecd891 | 960 | |
4275be63 MCC |
961 | for (i = 0; i < mci->n_layers; i++) { |
962 | if (pos[i] < 0) | |
963 | break; | |
964 | index += pos[i]; | |
6ab76179 | 965 | mci->ce_per_layer[i][index] += e->error_count; |
4275be63 MCC |
966 | |
967 | if (i < mci->n_layers - 1) | |
968 | index *= mci->layers[i + 1].size; | |
969 | } | |
970 | } | |
971 | ||
6ab76179 | 972 | static void edac_inc_ue_error(struct edac_raw_error_desc *e) |
4275be63 | 973 | { |
6ab76179 RR |
974 | int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer }; |
975 | struct mem_ctl_info *mci = error_desc_to_mci(e); | |
4275be63 MCC |
976 | int i, index = 0; |
977 | ||
6ab76179 | 978 | mci->ue_mc += e->error_count; |
4275be63 | 979 | |
67792cf9 | 980 | if (pos[0] < 0) { |
6ab76179 | 981 | mci->ue_noinfo_count += e->error_count; |
da9bb1d2 AC |
982 | return; |
983 | } | |
984 | ||
4275be63 MCC |
985 | for (i = 0; i < mci->n_layers; i++) { |
986 | if (pos[i] < 0) | |
987 | break; | |
988 | index += pos[i]; | |
6ab76179 | 989 | mci->ue_per_layer[i][index] += e->error_count; |
a7d7d2e1 | 990 | |
4275be63 MCC |
991 | if (i < mci->n_layers - 1) |
992 | index *= mci->layers[i + 1].size; | |
993 | } | |
994 | } | |
da9bb1d2 | 995 | |
1853ee72 | 996 | static void edac_ce_error(struct edac_raw_error_desc *e) |
4275be63 | 997 | { |
6ab76179 | 998 | struct mem_ctl_info *mci = error_desc_to_mci(e); |
4275be63 MCC |
999 | unsigned long remapped_page; |
1000 | ||
1001 | if (edac_mc_get_log_ce()) { | |
1853ee72 RR |
1002 | edac_mc_printk(mci, KERN_WARNING, |
1003 | "%d CE %s%son %s (%s page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx%s%s)\n", | |
1004 | e->error_count, e->msg, | |
1005 | *e->msg ? " " : "", | |
1006 | e->label, e->location, e->page_frame_number, e->offset_in_page, | |
1007 | e->grain, e->syndrome, | |
1008 | *e->other_detail ? " - " : "", | |
1009 | e->other_detail); | |
4275be63 | 1010 | } |
6ab76179 RR |
1011 | |
1012 | edac_inc_ce_error(e); | |
da9bb1d2 | 1013 | |
aa2064d7 | 1014 | if (mci->scrub_mode == SCRUB_SW_SRC) { |
da9bb1d2 | 1015 | /* |
4275be63 MCC |
1016 | * Some memory controllers (called MCs below) can remap |
1017 | * memory so that it is still available at a different | |
1018 | * address when PCI devices map into memory. | |
1019 | * MC's that can't do this, lose the memory where PCI | |
1020 | * devices are mapped. This mapping is MC-dependent | |
1021 | * and so we call back into the MC driver for it to | |
1022 | * map the MC page to a physical (CPU) page which can | |
1023 | * then be mapped to a virtual page - which can then | |
1024 | * be scrubbed. | |
1025 | */ | |
da9bb1d2 | 1026 | remapped_page = mci->ctl_page_to_phys ? |
6ab76179 RR |
1027 | mci->ctl_page_to_phys(mci, e->page_frame_number) : |
1028 | e->page_frame_number; | |
da9bb1d2 | 1029 | |
6ab76179 | 1030 | edac_mc_scrub_block(remapped_page, e->offset_in_page, e->grain); |
da9bb1d2 AC |
1031 | } |
1032 | } | |
1033 | ||
1853ee72 | 1034 | static void edac_ue_error(struct edac_raw_error_desc *e) |
da9bb1d2 | 1035 | { |
6ab76179 | 1036 | struct mem_ctl_info *mci = error_desc_to_mci(e); |
f430d570 | 1037 | |
4275be63 | 1038 | if (edac_mc_get_log_ue()) { |
1853ee72 RR |
1039 | edac_mc_printk(mci, KERN_WARNING, |
1040 | "%d UE %s%son %s (%s page:0x%lx offset:0x%lx grain:%ld%s%s)\n", | |
1041 | e->error_count, e->msg, | |
1042 | *e->msg ? " " : "", | |
1043 | e->label, e->location, e->page_frame_number, e->offset_in_page, | |
1044 | e->grain, | |
1045 | *e->other_detail ? " - " : "", | |
1046 | e->other_detail); | |
4275be63 | 1047 | } |
e7ecd891 | 1048 | |
4275be63 | 1049 | if (edac_mc_get_panic_on_ue()) { |
1853ee72 RR |
1050 | panic("UE %s%son %s (%s page:0x%lx offset:0x%lx grain:%ld%s%s)\n", |
1051 | e->msg, | |
1052 | *e->msg ? " " : "", | |
1053 | e->label, e->location, e->page_frame_number, e->offset_in_page, | |
1054 | e->grain, | |
1055 | *e->other_detail ? " - " : "", | |
1056 | e->other_detail); | |
4275be63 MCC |
1057 | } |
1058 | ||
6ab76179 | 1059 | edac_inc_ue_error(e); |
da9bb1d2 AC |
1060 | } |
1061 | ||
6334dc4e RR |
1062 | static void edac_inc_csrow(struct edac_raw_error_desc *e, int row, int chan) |
1063 | { | |
1064 | struct mem_ctl_info *mci = error_desc_to_mci(e); | |
1065 | enum hw_event_mc_err_type type = e->type; | |
1066 | u16 count = e->error_count; | |
1067 | ||
1068 | if (row < 0) | |
1069 | return; | |
1070 | ||
1071 | edac_dbg(4, "csrow/channel to increment: (%d,%d)\n", row, chan); | |
1072 | ||
1073 | if (type == HW_EVENT_ERR_CORRECTED) { | |
1074 | mci->csrows[row]->ce_count += count; | |
1075 | if (chan >= 0) | |
1076 | mci->csrows[row]->channels[chan]->ce_count += count; | |
1077 | } else { | |
1078 | mci->csrows[row]->ue_count += count; | |
1079 | } | |
1080 | } | |
1081 | ||
91b327f6 | 1082 | void edac_raw_mc_handle_error(struct edac_raw_error_desc *e) |
e7e24830 | 1083 | { |
91b327f6 | 1084 | struct mem_ctl_info *mci = error_desc_to_mci(e); |
787d8999 RR |
1085 | u8 grain_bits; |
1086 | ||
1087 | /* Sanity-check driver-supplied grain value. */ | |
1088 | if (WARN_ON_ONCE(!e->grain)) | |
1089 | e->grain = 1; | |
1090 | ||
1091 | grain_bits = fls_long(e->grain - 1); | |
1092 | ||
1093 | /* Report the error via the trace interface */ | |
1094 | if (IS_ENABLED(CONFIG_RAS)) | |
672ef0e5 | 1095 | trace_mc_event(e->type, e->msg, e->label, e->error_count, |
787d8999 RR |
1096 | mci->mc_idx, e->top_layer, e->mid_layer, |
1097 | e->low_layer, | |
1098 | (e->page_frame_number << PAGE_SHIFT) | e->offset_in_page, | |
1099 | grain_bits, e->syndrome, e->other_detail); | |
e7e24830 | 1100 | |
1853ee72 RR |
1101 | if (e->type == HW_EVENT_ERR_CORRECTED) |
1102 | edac_ce_error(e); | |
1103 | else | |
1104 | edac_ue_error(e); | |
e7e24830 MCC |
1105 | } |
1106 | EXPORT_SYMBOL_GPL(edac_raw_mc_handle_error); | |
53f2d028 | 1107 | |
4275be63 MCC |
1108 | void edac_mc_handle_error(const enum hw_event_mc_err_type type, |
1109 | struct mem_ctl_info *mci, | |
9eb07a7f | 1110 | const u16 error_count, |
4275be63 MCC |
1111 | const unsigned long page_frame_number, |
1112 | const unsigned long offset_in_page, | |
1113 | const unsigned long syndrome, | |
53f2d028 MCC |
1114 | const int top_layer, |
1115 | const int mid_layer, | |
1116 | const int low_layer, | |
4275be63 | 1117 | const char *msg, |
03f7eae8 | 1118 | const char *other_detail) |
da9bb1d2 | 1119 | { |
c498afaf | 1120 | struct dimm_info *dimm; |
4275be63 MCC |
1121 | char *p; |
1122 | int row = -1, chan = -1; | |
53f2d028 | 1123 | int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer }; |
c7ef7645 | 1124 | int i, n_labels = 0; |
c7ef7645 | 1125 | struct edac_raw_error_desc *e = &mci->error_desc; |
67792cf9 | 1126 | bool any_memory = true; |
da9bb1d2 | 1127 | |
956b9ba1 | 1128 | edac_dbg(3, "MC%d\n", mci->mc_idx); |
da9bb1d2 | 1129 | |
c7ef7645 MCC |
1130 | /* Fills the error report buffer */ |
1131 | memset(e, 0, sizeof (*e)); | |
1132 | e->error_count = error_count; | |
672ef0e5 | 1133 | e->type = type; |
c7ef7645 MCC |
1134 | e->top_layer = top_layer; |
1135 | e->mid_layer = mid_layer; | |
1136 | e->low_layer = low_layer; | |
1137 | e->page_frame_number = page_frame_number; | |
1138 | e->offset_in_page = offset_in_page; | |
1139 | e->syndrome = syndrome; | |
1853ee72 RR |
1140 | /* need valid strings here for both: */ |
1141 | e->msg = msg ?: ""; | |
1142 | e->other_detail = other_detail ?: ""; | |
c7ef7645 | 1143 | |
4275be63 | 1144 | /* |
67792cf9 RR |
1145 | * Check if the event report is consistent and if the memory location is |
1146 | * known. If it is, the DIMM(s) label info will be filled and the | |
1147 | * per-layer error counters will be incremented. | |
4275be63 MCC |
1148 | */ |
1149 | for (i = 0; i < mci->n_layers; i++) { | |
1150 | if (pos[i] >= (int)mci->layers[i].size) { | |
4275be63 MCC |
1151 | |
1152 | edac_mc_printk(mci, KERN_ERR, | |
1153 | "INTERNAL ERROR: %s value is out of range (%d >= %d)\n", | |
1154 | edac_layer_name[mci->layers[i].type], | |
1155 | pos[i], mci->layers[i].size); | |
1156 | /* | |
1157 | * Instead of just returning it, let's use what's | |
1158 | * known about the error. The increment routines and | |
1159 | * the DIMM filter logic will do the right thing by | |
1160 | * pointing the likely damaged DIMMs. | |
1161 | */ | |
1162 | pos[i] = -1; | |
1163 | } | |
1164 | if (pos[i] >= 0) | |
67792cf9 | 1165 | any_memory = false; |
da9bb1d2 AC |
1166 | } |
1167 | ||
4275be63 MCC |
1168 | /* |
1169 | * Get the dimm label/grain that applies to the match criteria. | |
1170 | * As the error algorithm may not be able to point to just one memory | |
1171 | * stick, the logic here will get all possible labels that could | |
1172 | * pottentially be affected by the error. | |
1173 | * On FB-DIMM memory controllers, for uncorrected errors, it is common | |
1174 | * to have only the MC channel and the MC dimm (also called "branch") | |
1175 | * but the channel is not known, as the memory is arranged in pairs, | |
1176 | * where each memory belongs to a separate channel within the same | |
1177 | * branch. | |
1178 | */ | |
c7ef7645 | 1179 | p = e->label; |
4275be63 | 1180 | *p = '\0'; |
4da1b7bf | 1181 | |
c498afaf | 1182 | mci_for_each_dimm(mci, dimm) { |
53f2d028 | 1183 | if (top_layer >= 0 && top_layer != dimm->location[0]) |
4275be63 | 1184 | continue; |
53f2d028 | 1185 | if (mid_layer >= 0 && mid_layer != dimm->location[1]) |
4275be63 | 1186 | continue; |
53f2d028 | 1187 | if (low_layer >= 0 && low_layer != dimm->location[2]) |
4275be63 | 1188 | continue; |
da9bb1d2 | 1189 | |
4275be63 | 1190 | /* get the max grain, over the error match range */ |
c7ef7645 MCC |
1191 | if (dimm->grain > e->grain) |
1192 | e->grain = dimm->grain; | |
9794f33d | 1193 | |
4275be63 MCC |
1194 | /* |
1195 | * If the error is memory-controller wide, there's no need to | |
67792cf9 RR |
1196 | * seek for the affected DIMMs because the whole channel/memory |
1197 | * controller/... may be affected. Also, don't show errors for | |
1198 | * empty DIMM slots. | |
4275be63 | 1199 | */ |
65bb4d1a | 1200 | if (!dimm->nr_pages) |
0d8292e0 | 1201 | continue; |
4275be63 | 1202 | |
0d8292e0 | 1203 | n_labels++; |
65bb4d1a RR |
1204 | if (n_labels > EDAC_MAX_LABELS) { |
1205 | p = e->label; | |
1206 | *p = '\0'; | |
1207 | } else { | |
1208 | if (p != e->label) { | |
1209 | strcpy(p, OTHER_LABEL); | |
1210 | p += strlen(OTHER_LABEL); | |
1211 | } | |
1212 | strcpy(p, dimm->label); | |
1213 | p += strlen(p); | |
4275be63 | 1214 | } |
0d8292e0 RR |
1215 | |
1216 | /* | |
1217 | * get csrow/channel of the DIMM, in order to allow | |
1218 | * incrementing the compat API counters | |
1219 | */ | |
1220 | edac_dbg(4, "%s csrows map: (%d,%d)\n", | |
1221 | mci->csbased ? "rank" : "dimm", | |
1222 | dimm->csrow, dimm->cschannel); | |
1223 | if (row == -1) | |
1224 | row = dimm->csrow; | |
1225 | else if (row >= 0 && row != dimm->csrow) | |
1226 | row = -2; | |
1227 | ||
1228 | if (chan == -1) | |
1229 | chan = dimm->cschannel; | |
1230 | else if (chan >= 0 && chan != dimm->cschannel) | |
1231 | chan = -2; | |
9794f33d | 1232 | } |
1233 | ||
67792cf9 | 1234 | if (any_memory) |
c7ef7645 | 1235 | strcpy(e->label, "any memory"); |
6334dc4e RR |
1236 | else if (!*e->label) |
1237 | strcpy(e->label, "unknown memory"); | |
1238 | ||
1239 | edac_inc_csrow(e, row, chan); | |
9794f33d | 1240 | |
4275be63 | 1241 | /* Fill the RAM location data */ |
c7ef7645 | 1242 | p = e->location; |
4da1b7bf | 1243 | |
4275be63 MCC |
1244 | for (i = 0; i < mci->n_layers; i++) { |
1245 | if (pos[i] < 0) | |
1246 | continue; | |
9794f33d | 1247 | |
4275be63 MCC |
1248 | p += sprintf(p, "%s:%d ", |
1249 | edac_layer_name[mci->layers[i].type], | |
1250 | pos[i]); | |
9794f33d | 1251 | } |
c7ef7645 | 1252 | if (p > e->location) |
53f2d028 MCC |
1253 | *(p - 1) = '\0'; |
1254 | ||
91b327f6 | 1255 | edac_raw_mc_handle_error(e); |
9794f33d | 1256 | } |
4275be63 | 1257 | EXPORT_SYMBOL_GPL(edac_mc_handle_error); |