EDAC, amd64: Reserve correct PCI devices on AMD Fam17h
[linux-2.6-block.git] / drivers / edac / amd64_edac.c
CommitLineData
2bc65418 1#include "amd64_edac.h"
23ac4ae8 2#include <asm/amd_nb.h>
2bc65418 3
d1ea71cd 4static struct edac_pci_ctl_info *pci_ctl;
2bc65418
DT
5
6static int report_gart_errors;
7module_param(report_gart_errors, int, 0644);
8
9/*
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver.
12 */
13static int ecc_enable_override;
14module_param(ecc_enable_override, int, 0644);
15
a29d8b8e 16static struct msr __percpu *msrs;
50542251 17
2ec591ac 18/* Per-node stuff */
ae7bb7c6 19static struct ecc_settings **ecc_stngs;
2bc65418 20
b70ef010
BP
21/*
22 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
23 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
24 * or higher value'.
25 *
26 *FIXME: Produce a better mapping/linearisation.
27 */
c7e5301a 28static const struct scrubrate {
39094443
BP
29 u32 scrubval; /* bit pattern for scrub rate */
30 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
31} scrubrates[] = {
b70ef010
BP
32 { 0x01, 1600000000UL},
33 { 0x02, 800000000UL},
34 { 0x03, 400000000UL},
35 { 0x04, 200000000UL},
36 { 0x05, 100000000UL},
37 { 0x06, 50000000UL},
38 { 0x07, 25000000UL},
39 { 0x08, 12284069UL},
40 { 0x09, 6274509UL},
41 { 0x0A, 3121951UL},
42 { 0x0B, 1560975UL},
43 { 0x0C, 781440UL},
44 { 0x0D, 390720UL},
45 { 0x0E, 195300UL},
46 { 0x0F, 97650UL},
47 { 0x10, 48854UL},
48 { 0x11, 24427UL},
49 { 0x12, 12213UL},
50 { 0x13, 6101UL},
51 { 0x14, 3051UL},
52 { 0x15, 1523UL},
53 { 0x16, 761UL},
54 { 0x00, 0UL}, /* scrubbing off */
55};
56
66fed2d4
BP
57int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
58 u32 *val, const char *func)
b2b0c605
BP
59{
60 int err = 0;
61
62 err = pci_read_config_dword(pdev, offset, val);
63 if (err)
64 amd64_warn("%s: error reading F%dx%03x.\n",
65 func, PCI_FUNC(pdev->devfn), offset);
66
67 return err;
68}
69
70int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
71 u32 val, const char *func)
72{
73 int err = 0;
74
75 err = pci_write_config_dword(pdev, offset, val);
76 if (err)
77 amd64_warn("%s: error writing to F%dx%03x.\n",
78 func, PCI_FUNC(pdev->devfn), offset);
79
80 return err;
81}
82
7981a28f
AG
83/*
84 * Select DCT to which PCI cfg accesses are routed
85 */
86static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
87{
88 u32 reg = 0;
89
90 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
91 reg &= (pvt->model == 0x30) ? ~3 : ~1;
92 reg |= dct;
93 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
94}
95
b2b0c605
BP
96/*
97 *
98 * Depending on the family, F2 DCT reads need special handling:
99 *
7981a28f 100 * K8: has a single DCT only and no address offsets >= 0x100
b2b0c605
BP
101 *
102 * F10h: each DCT has its own set of regs
103 * DCT0 -> F2x040..
104 * DCT1 -> F2x140..
105 *
94c1acf2 106 * F16h: has only 1 DCT
7981a28f
AG
107 *
108 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
b2b0c605 109 */
7981a28f
AG
110static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
111 int offset, u32 *val)
b2b0c605 112{
7981a28f
AG
113 switch (pvt->fam) {
114 case 0xf:
115 if (dct || offset >= 0x100)
116 return -EINVAL;
117 break;
b2b0c605 118
7981a28f
AG
119 case 0x10:
120 if (dct) {
121 /*
122 * Note: If ganging is enabled, barring the regs
123 * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx
124 * return 0. (cf. Section 2.8.1 F10h BKDG)
125 */
126 if (dct_ganging_enabled(pvt))
127 return 0;
b2b0c605 128
7981a28f
AG
129 offset += 0x100;
130 }
131 break;
73ba8593 132
7981a28f
AG
133 case 0x15:
134 /*
135 * F15h: F2x1xx addresses do not map explicitly to DCT1.
136 * We should select which DCT we access using F1x10C[DctCfgSel]
137 */
138 dct = (dct && pvt->model == 0x30) ? 3 : dct;
139 f15h_select_dct(pvt, dct);
140 break;
73ba8593 141
7981a28f
AG
142 case 0x16:
143 if (dct)
144 return -EINVAL;
145 break;
b2b0c605 146
7981a28f
AG
147 default:
148 break;
b2b0c605 149 }
7981a28f 150 return amd64_read_pci_cfg(pvt->F2, offset, val);
b2b0c605
BP
151}
152
2bc65418
DT
153/*
154 * Memory scrubber control interface. For K8, memory scrubbing is handled by
155 * hardware and can involve L2 cache, dcache as well as the main memory. With
156 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
157 * functionality.
158 *
159 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
160 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
161 * bytes/sec for the setting.
162 *
163 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
164 * other archs, we might not have access to the caches directly.
165 */
166
167/*
168 * scan the scrub rate mapping table for a close or matching bandwidth value to
169 * issue. If requested is too big, then use last maximum value found.
170 */
da92110d 171static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
2bc65418
DT
172{
173 u32 scrubval;
174 int i;
175
176 /*
177 * map the configured rate (new_bw) to a value specific to the AMD64
178 * memory controller and apply to register. Search for the first
179 * bandwidth entry that is greater or equal than the setting requested
180 * and program that. If at last entry, turn off DRAM scrubbing.
168bfeef
AM
181 *
182 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
183 * by falling back to the last element in scrubrates[].
2bc65418 184 */
168bfeef 185 for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
2bc65418
DT
186 /*
187 * skip scrub rates which aren't recommended
188 * (see F10 BKDG, F3x58)
189 */
395ae783 190 if (scrubrates[i].scrubval < min_rate)
2bc65418
DT
191 continue;
192
193 if (scrubrates[i].bandwidth <= new_bw)
194 break;
2bc65418
DT
195 }
196
197 scrubval = scrubrates[i].scrubval;
2bc65418 198
da92110d
AG
199 if (pvt->fam == 0x15 && pvt->model == 0x60) {
200 f15h_select_dct(pvt, 0);
201 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
202 f15h_select_dct(pvt, 1);
203 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
204 } else {
205 pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F);
206 }
2bc65418 207
39094443
BP
208 if (scrubval)
209 return scrubrates[i].bandwidth;
210
2bc65418
DT
211 return 0;
212}
213
d1ea71cd 214static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
2bc65418
DT
215{
216 struct amd64_pvt *pvt = mci->pvt_info;
87b3e0e6 217 u32 min_scrubrate = 0x5;
2bc65418 218
a4b4bedc 219 if (pvt->fam == 0xf)
87b3e0e6
BP
220 min_scrubrate = 0x0;
221
da92110d
AG
222 if (pvt->fam == 0x15) {
223 /* Erratum #505 */
224 if (pvt->model < 0x10)
225 f15h_select_dct(pvt, 0);
73ba8593 226
da92110d
AG
227 if (pvt->model == 0x60)
228 min_scrubrate = 0x6;
229 }
230 return __set_scrub_rate(pvt, bw, min_scrubrate);
2bc65418
DT
231}
232
d1ea71cd 233static int get_scrub_rate(struct mem_ctl_info *mci)
2bc65418
DT
234{
235 struct amd64_pvt *pvt = mci->pvt_info;
236 u32 scrubval = 0;
39094443 237 int i, retval = -EINVAL;
2bc65418 238
da92110d
AG
239 if (pvt->fam == 0x15) {
240 /* Erratum #505 */
241 if (pvt->model < 0x10)
242 f15h_select_dct(pvt, 0);
73ba8593 243
da92110d
AG
244 if (pvt->model == 0x60)
245 amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
246 } else
247 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
2bc65418
DT
248
249 scrubval = scrubval & 0x001F;
250
926311fd 251 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
2bc65418 252 if (scrubrates[i].scrubval == scrubval) {
39094443 253 retval = scrubrates[i].bandwidth;
2bc65418
DT
254 break;
255 }
256 }
39094443 257 return retval;
2bc65418
DT
258}
259
6775763a 260/*
7f19bf75
BP
261 * returns true if the SysAddr given by sys_addr matches the
262 * DRAM base/limit associated with node_id
6775763a 263 */
d1ea71cd 264static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
6775763a 265{
7f19bf75 266 u64 addr;
6775763a
DT
267
268 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
269 * all ones if the most significant implemented address bit is 1.
270 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
271 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
272 * Application Programming.
273 */
274 addr = sys_addr & 0x000000ffffffffffull;
275
7f19bf75
BP
276 return ((addr >= get_dram_base(pvt, nid)) &&
277 (addr <= get_dram_limit(pvt, nid)));
6775763a
DT
278}
279
280/*
281 * Attempt to map a SysAddr to a node. On success, return a pointer to the
282 * mem_ctl_info structure for the node that the SysAddr maps to.
283 *
284 * On failure, return NULL.
285 */
286static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
287 u64 sys_addr)
288{
289 struct amd64_pvt *pvt;
c7e5301a 290 u8 node_id;
6775763a
DT
291 u32 intlv_en, bits;
292
293 /*
294 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
295 * 3.4.4.2) registers to map the SysAddr to a node ID.
296 */
297 pvt = mci->pvt_info;
298
299 /*
300 * The value of this field should be the same for all DRAM Base
301 * registers. Therefore we arbitrarily choose to read it from the
302 * register for node 0.
303 */
7f19bf75 304 intlv_en = dram_intlv_en(pvt, 0);
6775763a
DT
305
306 if (intlv_en == 0) {
7f19bf75 307 for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
d1ea71cd 308 if (base_limit_match(pvt, sys_addr, node_id))
8edc5445 309 goto found;
6775763a 310 }
8edc5445 311 goto err_no_match;
6775763a
DT
312 }
313
72f158fe
BP
314 if (unlikely((intlv_en != 0x01) &&
315 (intlv_en != 0x03) &&
316 (intlv_en != 0x07))) {
24f9a7fe 317 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
6775763a
DT
318 return NULL;
319 }
320
321 bits = (((u32) sys_addr) >> 12) & intlv_en;
322
323 for (node_id = 0; ; ) {
7f19bf75 324 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
6775763a
DT
325 break; /* intlv_sel field matches */
326
7f19bf75 327 if (++node_id >= DRAM_RANGES)
6775763a
DT
328 goto err_no_match;
329 }
330
331 /* sanity test for sys_addr */
d1ea71cd 332 if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
24f9a7fe
BP
333 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
334 "range for node %d with node interleaving enabled.\n",
335 __func__, sys_addr, node_id);
6775763a
DT
336 return NULL;
337 }
338
339found:
b487c33e 340 return edac_mc_find((int)node_id);
6775763a
DT
341
342err_no_match:
956b9ba1
JP
343 edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
344 (unsigned long)sys_addr);
6775763a
DT
345
346 return NULL;
347}
e2ce7255
DT
348
349/*
11c75ead
BP
350 * compute the CS base address of the @csrow on the DRAM controller @dct.
351 * For details see F2x[5C:40] in the processor's BKDG
e2ce7255 352 */
11c75ead
BP
353static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
354 u64 *base, u64 *mask)
e2ce7255 355{
11c75ead
BP
356 u64 csbase, csmask, base_bits, mask_bits;
357 u8 addr_shift;
e2ce7255 358
18b94f66 359 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
11c75ead
BP
360 csbase = pvt->csels[dct].csbases[csrow];
361 csmask = pvt->csels[dct].csmasks[csrow];
10ef6b0d
CG
362 base_bits = GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
363 mask_bits = GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
11c75ead 364 addr_shift = 4;
94c1acf2
AG
365
366 /*
18b94f66
AG
367 * F16h and F15h, models 30h and later need two addr_shift values:
368 * 8 for high and 6 for low (cf. F16h BKDG).
369 */
370 } else if (pvt->fam == 0x16 ||
371 (pvt->fam == 0x15 && pvt->model >= 0x30)) {
94c1acf2
AG
372 csbase = pvt->csels[dct].csbases[csrow];
373 csmask = pvt->csels[dct].csmasks[csrow >> 1];
374
10ef6b0d
CG
375 *base = (csbase & GENMASK_ULL(15, 5)) << 6;
376 *base |= (csbase & GENMASK_ULL(30, 19)) << 8;
94c1acf2
AG
377
378 *mask = ~0ULL;
379 /* poke holes for the csmask */
10ef6b0d
CG
380 *mask &= ~((GENMASK_ULL(15, 5) << 6) |
381 (GENMASK_ULL(30, 19) << 8));
94c1acf2 382
10ef6b0d
CG
383 *mask |= (csmask & GENMASK_ULL(15, 5)) << 6;
384 *mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
94c1acf2
AG
385
386 return;
11c75ead
BP
387 } else {
388 csbase = pvt->csels[dct].csbases[csrow];
389 csmask = pvt->csels[dct].csmasks[csrow >> 1];
390 addr_shift = 8;
e2ce7255 391
a4b4bedc 392 if (pvt->fam == 0x15)
10ef6b0d
CG
393 base_bits = mask_bits =
394 GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
11c75ead 395 else
10ef6b0d
CG
396 base_bits = mask_bits =
397 GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
11c75ead 398 }
e2ce7255 399
11c75ead 400 *base = (csbase & base_bits) << addr_shift;
e2ce7255 401
11c75ead
BP
402 *mask = ~0ULL;
403 /* poke holes for the csmask */
404 *mask &= ~(mask_bits << addr_shift);
405 /* OR them in */
406 *mask |= (csmask & mask_bits) << addr_shift;
e2ce7255
DT
407}
408
11c75ead
BP
409#define for_each_chip_select(i, dct, pvt) \
410 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
411
614ec9d8
BP
412#define chip_select_base(i, dct, pvt) \
413 pvt->csels[dct].csbases[i]
414
11c75ead
BP
415#define for_each_chip_select_mask(i, dct, pvt) \
416 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
417
e2ce7255
DT
418/*
419 * @input_addr is an InputAddr associated with the node given by mci. Return the
420 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
421 */
422static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
423{
424 struct amd64_pvt *pvt;
425 int csrow;
426 u64 base, mask;
427
428 pvt = mci->pvt_info;
429
11c75ead
BP
430 for_each_chip_select(csrow, 0, pvt) {
431 if (!csrow_enabled(csrow, 0, pvt))
e2ce7255
DT
432 continue;
433
11c75ead
BP
434 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
435
436 mask = ~mask;
e2ce7255
DT
437
438 if ((input_addr & mask) == (base & mask)) {
956b9ba1
JP
439 edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
440 (unsigned long)input_addr, csrow,
441 pvt->mc_node_id);
e2ce7255
DT
442
443 return csrow;
444 }
445 }
956b9ba1
JP
446 edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
447 (unsigned long)input_addr, pvt->mc_node_id);
e2ce7255
DT
448
449 return -1;
450}
451
e2ce7255
DT
452/*
453 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
454 * for the node represented by mci. Info is passed back in *hole_base,
455 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
456 * info is invalid. Info may be invalid for either of the following reasons:
457 *
458 * - The revision of the node is not E or greater. In this case, the DRAM Hole
459 * Address Register does not exist.
460 *
461 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
462 * indicating that its contents are not valid.
463 *
464 * The values passed back in *hole_base, *hole_offset, and *hole_size are
465 * complete 32-bit values despite the fact that the bitfields in the DHAR
466 * only represent bits 31-24 of the base and offset values.
467 */
468int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
469 u64 *hole_offset, u64 *hole_size)
470{
471 struct amd64_pvt *pvt = mci->pvt_info;
e2ce7255
DT
472
473 /* only revE and later have the DRAM Hole Address Register */
a4b4bedc 474 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
956b9ba1
JP
475 edac_dbg(1, " revision %d for node %d does not support DHAR\n",
476 pvt->ext_model, pvt->mc_node_id);
e2ce7255
DT
477 return 1;
478 }
479
bc21fa57 480 /* valid for Fam10h and above */
a4b4bedc 481 if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
956b9ba1 482 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
e2ce7255
DT
483 return 1;
484 }
485
c8e518d5 486 if (!dhar_valid(pvt)) {
956b9ba1
JP
487 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n",
488 pvt->mc_node_id);
e2ce7255
DT
489 return 1;
490 }
491
492 /* This node has Memory Hoisting */
493
494 /* +------------------+--------------------+--------------------+-----
495 * | memory | DRAM hole | relocated |
496 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
497 * | | | DRAM hole |
498 * | | | [0x100000000, |
499 * | | | (0x100000000+ |
500 * | | | (0xffffffff-x))] |
501 * +------------------+--------------------+--------------------+-----
502 *
503 * Above is a diagram of physical memory showing the DRAM hole and the
504 * relocated addresses from the DRAM hole. As shown, the DRAM hole
505 * starts at address x (the base address) and extends through address
506 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
507 * addresses in the hole so that they start at 0x100000000.
508 */
509
1f31677e
BP
510 *hole_base = dhar_base(pvt);
511 *hole_size = (1ULL << 32) - *hole_base;
e2ce7255 512
a4b4bedc
BP
513 *hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
514 : k8_dhar_offset(pvt);
e2ce7255 515
956b9ba1
JP
516 edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
517 pvt->mc_node_id, (unsigned long)*hole_base,
518 (unsigned long)*hole_offset, (unsigned long)*hole_size);
e2ce7255
DT
519
520 return 0;
521}
522EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
523
93c2df58
DT
524/*
525 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
526 * assumed that sys_addr maps to the node given by mci.
527 *
528 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
529 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
530 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
531 * then it is also involved in translating a SysAddr to a DramAddr. Sections
532 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
533 * These parts of the documentation are unclear. I interpret them as follows:
534 *
535 * When node n receives a SysAddr, it processes the SysAddr as follows:
536 *
537 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
538 * Limit registers for node n. If the SysAddr is not within the range
539 * specified by the base and limit values, then node n ignores the Sysaddr
540 * (since it does not map to node n). Otherwise continue to step 2 below.
541 *
542 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
543 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
544 * the range of relocated addresses (starting at 0x100000000) from the DRAM
545 * hole. If not, skip to step 3 below. Else get the value of the
546 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
547 * offset defined by this value from the SysAddr.
548 *
549 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
550 * Base register for node n. To obtain the DramAddr, subtract the base
551 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
552 */
553static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
554{
7f19bf75 555 struct amd64_pvt *pvt = mci->pvt_info;
93c2df58 556 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
1f31677e 557 int ret;
93c2df58 558
7f19bf75 559 dram_base = get_dram_base(pvt, pvt->mc_node_id);
93c2df58
DT
560
561 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
562 &hole_size);
563 if (!ret) {
1f31677e
BP
564 if ((sys_addr >= (1ULL << 32)) &&
565 (sys_addr < ((1ULL << 32) + hole_size))) {
93c2df58
DT
566 /* use DHAR to translate SysAddr to DramAddr */
567 dram_addr = sys_addr - hole_offset;
568
956b9ba1
JP
569 edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
570 (unsigned long)sys_addr,
571 (unsigned long)dram_addr);
93c2df58
DT
572
573 return dram_addr;
574 }
575 }
576
577 /*
578 * Translate the SysAddr to a DramAddr as shown near the start of
579 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
580 * only deals with 40-bit values. Therefore we discard bits 63-40 of
581 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
582 * discard are all 1s. Otherwise the bits we discard are all 0s. See
583 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
584 * Programmer's Manual Volume 1 Application Programming.
585 */
10ef6b0d 586 dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
93c2df58 587
956b9ba1
JP
588 edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
589 (unsigned long)sys_addr, (unsigned long)dram_addr);
93c2df58
DT
590 return dram_addr;
591}
592
593/*
594 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
595 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
596 * for node interleaving.
597 */
598static int num_node_interleave_bits(unsigned intlv_en)
599{
600 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
601 int n;
602
603 BUG_ON(intlv_en > 7);
604 n = intlv_shift_table[intlv_en];
605 return n;
606}
607
608/* Translate the DramAddr given by @dram_addr to an InputAddr. */
609static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
610{
611 struct amd64_pvt *pvt;
612 int intlv_shift;
613 u64 input_addr;
614
615 pvt = mci->pvt_info;
616
617 /*
618 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
619 * concerning translating a DramAddr to an InputAddr.
620 */
7f19bf75 621 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
10ef6b0d 622 input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
f678b8cc 623 (dram_addr & 0xfff);
93c2df58 624
956b9ba1
JP
625 edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
626 intlv_shift, (unsigned long)dram_addr,
627 (unsigned long)input_addr);
93c2df58
DT
628
629 return input_addr;
630}
631
632/*
633 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
634 * assumed that @sys_addr maps to the node given by mci.
635 */
636static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
637{
638 u64 input_addr;
639
640 input_addr =
641 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
642
c19ca6cb 643 edac_dbg(2, "SysAddr 0x%lx translates to InputAddr 0x%lx\n",
956b9ba1 644 (unsigned long)sys_addr, (unsigned long)input_addr);
93c2df58
DT
645
646 return input_addr;
647}
648
93c2df58
DT
649/* Map the Error address to a PAGE and PAGE OFFSET. */
650static inline void error_address_to_page_and_offset(u64 error_address,
33ca0643 651 struct err_info *err)
93c2df58 652{
33ca0643
BP
653 err->page = (u32) (error_address >> PAGE_SHIFT);
654 err->offset = ((u32) error_address) & ~PAGE_MASK;
93c2df58
DT
655}
656
657/*
658 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
659 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
660 * of a node that detected an ECC memory error. mci represents the node that
661 * the error address maps to (possibly different from the node that detected
662 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
663 * error.
664 */
665static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
666{
667 int csrow;
668
669 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
670
671 if (csrow == -1)
24f9a7fe
BP
672 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
673 "address 0x%lx\n", (unsigned long)sys_addr);
93c2df58
DT
674 return csrow;
675}
e2ce7255 676
bfc04aec 677static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
2da11654 678
2da11654
DT
679/*
680 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
681 * are ECC capable.
682 */
d1ea71cd 683static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
2da11654 684{
cb328507 685 u8 bit;
1f6189ed 686 unsigned long edac_cap = EDAC_FLAG_NONE;
2da11654 687
a4b4bedc 688 bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
2da11654
DT
689 ? 19
690 : 17;
691
584fcff4 692 if (pvt->dclr0 & BIT(bit))
2da11654
DT
693 edac_cap = EDAC_FLAG_SECDED;
694
695 return edac_cap;
696}
697
d1ea71cd 698static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
2da11654 699
d1ea71cd 700static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
68798e17 701{
956b9ba1 702 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
68798e17 703
a597d2a5
AG
704 if (pvt->dram_type == MEM_LRDDR3) {
705 u32 dcsm = pvt->csels[chan].csmasks[0];
706 /*
707 * It's assumed all LRDIMMs in a DCT are going to be of
708 * same 'type' until proven otherwise. So, use a cs
709 * value of '0' here to get dcsm value.
710 */
711 edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
712 }
713
714 edac_dbg(1, "All DIMMs support ECC:%s\n",
715 (dclr & BIT(19)) ? "yes" : "no");
716
68798e17 717
956b9ba1
JP
718 edac_dbg(1, " PAR/ERR parity: %s\n",
719 (dclr & BIT(8)) ? "enabled" : "disabled");
68798e17 720
a4b4bedc 721 if (pvt->fam == 0x10)
956b9ba1
JP
722 edac_dbg(1, " DCT 128bit mode width: %s\n",
723 (dclr & BIT(11)) ? "128b" : "64b");
68798e17 724
956b9ba1
JP
725 edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
726 (dclr & BIT(12)) ? "yes" : "no",
727 (dclr & BIT(13)) ? "yes" : "no",
728 (dclr & BIT(14)) ? "yes" : "no",
729 (dclr & BIT(15)) ? "yes" : "no");
68798e17
BP
730}
731
2da11654 732/* Display and decode various NB registers for debug purposes. */
b2b0c605 733static void dump_misc_regs(struct amd64_pvt *pvt)
2da11654 734{
956b9ba1 735 edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
68798e17 736
956b9ba1
JP
737 edac_dbg(1, " NB two channel DRAM capable: %s\n",
738 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
2da11654 739
956b9ba1
JP
740 edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
741 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
742 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
68798e17 743
d1ea71cd 744 debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
2da11654 745
956b9ba1 746 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
2da11654 747
956b9ba1
JP
748 edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
749 pvt->dhar, dhar_base(pvt),
a4b4bedc
BP
750 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
751 : f10_dhar_offset(pvt));
2da11654 752
956b9ba1 753 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
2da11654 754
d1ea71cd 755 debug_display_dimm_sizes(pvt, 0);
4d796364 756
8de1d91e 757 /* everything below this point is Fam10h and above */
a4b4bedc 758 if (pvt->fam == 0xf)
2da11654 759 return;
4d796364 760
d1ea71cd 761 debug_display_dimm_sizes(pvt, 1);
2da11654 762
a3b7db09 763 amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
ad6a32e9 764
8de1d91e 765 /* Only if NOT ganged does dclr1 have valid info */
68798e17 766 if (!dct_ganging_enabled(pvt))
d1ea71cd 767 debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
2da11654
DT
768}
769
94be4bff 770/*
18b94f66 771 * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
94be4bff 772 */
11c75ead 773static void prep_chip_selects(struct amd64_pvt *pvt)
94be4bff 774{
18b94f66 775 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
11c75ead
BP
776 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
777 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
a597d2a5 778 } else if (pvt->fam == 0x15 && pvt->model == 0x30) {
18b94f66
AG
779 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
780 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
9d858bb1 781 } else {
11c75ead
BP
782 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
783 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
94be4bff
DT
784 }
785}
786
787/*
11c75ead 788 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
94be4bff 789 */
b2b0c605 790static void read_dct_base_mask(struct amd64_pvt *pvt)
94be4bff 791{
11c75ead 792 int cs;
94be4bff 793
11c75ead 794 prep_chip_selects(pvt);
94be4bff 795
11c75ead 796 for_each_chip_select(cs, 0, pvt) {
71d2a32e
BP
797 int reg0 = DCSB0 + (cs * 4);
798 int reg1 = DCSB1 + (cs * 4);
11c75ead
BP
799 u32 *base0 = &pvt->csels[0].csbases[cs];
800 u32 *base1 = &pvt->csels[1].csbases[cs];
b2b0c605 801
7981a28f 802 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
956b9ba1
JP
803 edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
804 cs, *base0, reg0);
94be4bff 805
7981a28f 806 if (pvt->fam == 0xf)
11c75ead 807 continue;
b2b0c605 808
7981a28f 809 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
956b9ba1 810 edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
7981a28f
AG
811 cs, *base1, (pvt->fam == 0x10) ? reg1
812 : reg0);
94be4bff
DT
813 }
814
11c75ead 815 for_each_chip_select_mask(cs, 0, pvt) {
71d2a32e
BP
816 int reg0 = DCSM0 + (cs * 4);
817 int reg1 = DCSM1 + (cs * 4);
11c75ead
BP
818 u32 *mask0 = &pvt->csels[0].csmasks[cs];
819 u32 *mask1 = &pvt->csels[1].csmasks[cs];
b2b0c605 820
7981a28f 821 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
956b9ba1
JP
822 edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
823 cs, *mask0, reg0);
94be4bff 824
7981a28f 825 if (pvt->fam == 0xf)
11c75ead 826 continue;
b2b0c605 827
7981a28f 828 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
956b9ba1 829 edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
7981a28f
AG
830 cs, *mask1, (pvt->fam == 0x10) ? reg1
831 : reg0);
94be4bff
DT
832 }
833}
834
a597d2a5 835static void determine_memory_type(struct amd64_pvt *pvt)
94be4bff 836{
a597d2a5 837 u32 dram_ctrl, dcsm;
94be4bff 838
a597d2a5
AG
839 switch (pvt->fam) {
840 case 0xf:
841 if (pvt->ext_model >= K8_REV_F)
842 goto ddr3;
843
844 pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
845 return;
846
847 case 0x10:
6b4c0bde 848 if (pvt->dchr0 & DDR3_MODE)
a597d2a5
AG
849 goto ddr3;
850
851 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
852 return;
853
854 case 0x15:
855 if (pvt->model < 0x60)
856 goto ddr3;
857
858 /*
859 * Model 0x60h needs special handling:
860 *
861 * We use a Chip Select value of '0' to obtain dcsm.
862 * Theoretically, it is possible to populate LRDIMMs of different
863 * 'Rank' value on a DCT. But this is not the common case. So,
864 * it's reasonable to assume all DIMMs are going to be of same
865 * 'type' until proven otherwise.
866 */
867 amd64_read_dct_pci_cfg(pvt, 0, DRAM_CONTROL, &dram_ctrl);
868 dcsm = pvt->csels[0].csmasks[0];
869
870 if (((dram_ctrl >> 8) & 0x7) == 0x2)
871 pvt->dram_type = MEM_DDR4;
872 else if (pvt->dclr0 & BIT(16))
873 pvt->dram_type = MEM_DDR3;
874 else if (dcsm & 0x3)
875 pvt->dram_type = MEM_LRDDR3;
6b4c0bde 876 else
a597d2a5 877 pvt->dram_type = MEM_RDDR3;
94be4bff 878
a597d2a5
AG
879 return;
880
881 case 0x16:
882 goto ddr3;
883
884 default:
885 WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
886 pvt->dram_type = MEM_EMPTY;
887 }
888 return;
94be4bff 889
a597d2a5
AG
890ddr3:
891 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
94be4bff
DT
892}
893
cb328507 894/* Get the number of DCT channels the memory controller is using. */
ddff876d
DT
895static int k8_early_channel_count(struct amd64_pvt *pvt)
896{
cb328507 897 int flag;
ddff876d 898
9f56da0e 899 if (pvt->ext_model >= K8_REV_F)
ddff876d 900 /* RevF (NPT) and later */
41d8bfab 901 flag = pvt->dclr0 & WIDTH_128;
9f56da0e 902 else
ddff876d
DT
903 /* RevE and earlier */
904 flag = pvt->dclr0 & REVE_WIDTH_128;
ddff876d
DT
905
906 /* not used */
907 pvt->dclr1 = 0;
908
909 return (flag) ? 2 : 1;
910}
911
70046624 912/* On F10h and later ErrAddr is MC4_ADDR[47:1] */
a4b4bedc 913static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
ddff876d 914{
2ec591ac
BP
915 u16 mce_nid = amd_get_nb_id(m->extcpu);
916 struct mem_ctl_info *mci;
70046624
BP
917 u8 start_bit = 1;
918 u8 end_bit = 47;
2ec591ac
BP
919 u64 addr;
920
921 mci = edac_mc_find(mce_nid);
922 if (!mci)
923 return 0;
924
925 pvt = mci->pvt_info;
70046624 926
a4b4bedc 927 if (pvt->fam == 0xf) {
70046624
BP
928 start_bit = 3;
929 end_bit = 39;
930 }
931
10ef6b0d 932 addr = m->addr & GENMASK_ULL(end_bit, start_bit);
c1ae6830
BP
933
934 /*
935 * Erratum 637 workaround
936 */
a4b4bedc 937 if (pvt->fam == 0x15) {
c1ae6830
BP
938 u64 cc6_base, tmp_addr;
939 u32 tmp;
8b84c8df 940 u8 intlv_en;
c1ae6830 941
10ef6b0d 942 if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
c1ae6830
BP
943 return addr;
944
c1ae6830
BP
945
946 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
947 intlv_en = tmp >> 21 & 0x7;
948
949 /* add [47:27] + 3 trailing bits */
10ef6b0d 950 cc6_base = (tmp & GENMASK_ULL(20, 0)) << 3;
c1ae6830
BP
951
952 /* reverse and add DramIntlvEn */
953 cc6_base |= intlv_en ^ 0x7;
954
955 /* pin at [47:24] */
956 cc6_base <<= 24;
957
958 if (!intlv_en)
10ef6b0d 959 return cc6_base | (addr & GENMASK_ULL(23, 0));
c1ae6830
BP
960
961 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
962
963 /* faster log2 */
10ef6b0d 964 tmp_addr = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
c1ae6830
BP
965
966 /* OR DramIntlvSel into bits [14:12] */
10ef6b0d 967 tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
c1ae6830
BP
968
969 /* add remaining [11:0] bits from original MC4_ADDR */
10ef6b0d 970 tmp_addr |= addr & GENMASK_ULL(11, 0);
c1ae6830
BP
971
972 return cc6_base | tmp_addr;
973 }
974
975 return addr;
ddff876d
DT
976}
977
e2c0bffe
DB
978static struct pci_dev *pci_get_related_function(unsigned int vendor,
979 unsigned int device,
980 struct pci_dev *related)
981{
982 struct pci_dev *dev = NULL;
983
984 while ((dev = pci_get_device(vendor, device, dev))) {
985 if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
986 (dev->bus->number == related->bus->number) &&
987 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
988 break;
989 }
990
991 return dev;
992}
993
7f19bf75 994static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
ddff876d 995{
e2c0bffe 996 struct amd_northbridge *nb;
18b94f66
AG
997 struct pci_dev *f1 = NULL;
998 unsigned int pci_func;
71d2a32e 999 int off = range << 3;
e2c0bffe 1000 u32 llim;
ddff876d 1001
7f19bf75
BP
1002 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
1003 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
ddff876d 1004
18b94f66 1005 if (pvt->fam == 0xf)
7f19bf75 1006 return;
ddff876d 1007
7f19bf75
BP
1008 if (!dram_rw(pvt, range))
1009 return;
ddff876d 1010
7f19bf75
BP
1011 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
1012 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
f08e457c 1013
e2c0bffe 1014 /* F15h: factor in CC6 save area by reading dst node's limit reg */
18b94f66 1015 if (pvt->fam != 0x15)
e2c0bffe 1016 return;
f08e457c 1017
e2c0bffe
DB
1018 nb = node_to_amd_nb(dram_dst_node(pvt, range));
1019 if (WARN_ON(!nb))
1020 return;
f08e457c 1021
a597d2a5
AG
1022 if (pvt->model == 0x60)
1023 pci_func = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
1024 else if (pvt->model == 0x30)
1025 pci_func = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
1026 else
1027 pci_func = PCI_DEVICE_ID_AMD_15H_NB_F1;
18b94f66
AG
1028
1029 f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
e2c0bffe
DB
1030 if (WARN_ON(!f1))
1031 return;
f08e457c 1032
e2c0bffe 1033 amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
f08e457c 1034
10ef6b0d 1035 pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
f08e457c 1036
e2c0bffe
DB
1037 /* {[39:27],111b} */
1038 pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
f08e457c 1039
10ef6b0d 1040 pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
f08e457c 1041
e2c0bffe
DB
1042 /* [47:40] */
1043 pvt->ranges[range].lim.hi |= llim >> 13;
1044
1045 pci_dev_put(f1);
ddff876d
DT
1046}
1047
f192c7b1 1048static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
33ca0643 1049 struct err_info *err)
ddff876d 1050{
f192c7b1 1051 struct amd64_pvt *pvt = mci->pvt_info;
ddff876d 1052
33ca0643 1053 error_address_to_page_and_offset(sys_addr, err);
ab5a503c
MCC
1054
1055 /*
1056 * Find out which node the error address belongs to. This may be
1057 * different from the node that detected the error.
1058 */
33ca0643
BP
1059 err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
1060 if (!err->src_mci) {
ab5a503c
MCC
1061 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1062 (unsigned long)sys_addr);
33ca0643 1063 err->err_code = ERR_NODE;
ab5a503c
MCC
1064 return;
1065 }
1066
1067 /* Now map the sys_addr to a CSROW */
33ca0643
BP
1068 err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
1069 if (err->csrow < 0) {
1070 err->err_code = ERR_CSROW;
ab5a503c
MCC
1071 return;
1072 }
1073
ddff876d 1074 /* CHIPKILL enabled */
f192c7b1 1075 if (pvt->nbcfg & NBCFG_CHIPKILL) {
33ca0643
BP
1076 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1077 if (err->channel < 0) {
ddff876d
DT
1078 /*
1079 * Syndrome didn't map, so we don't know which of the
1080 * 2 DIMMs is in error. So we need to ID 'both' of them
1081 * as suspect.
1082 */
33ca0643 1083 amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
ab5a503c 1084 "possible error reporting race\n",
33ca0643
BP
1085 err->syndrome);
1086 err->err_code = ERR_CHANNEL;
ddff876d
DT
1087 return;
1088 }
1089 } else {
1090 /*
1091 * non-chipkill ecc mode
1092 *
1093 * The k8 documentation is unclear about how to determine the
1094 * channel number when using non-chipkill memory. This method
1095 * was obtained from email communication with someone at AMD.
1096 * (Wish the email was placed in this comment - norsk)
1097 */
33ca0643 1098 err->channel = ((sys_addr & BIT(3)) != 0);
ddff876d 1099 }
ddff876d
DT
1100}
1101
41d8bfab 1102static int ddr2_cs_size(unsigned i, bool dct_width)
ddff876d 1103{
41d8bfab 1104 unsigned shift = 0;
ddff876d 1105
41d8bfab
BP
1106 if (i <= 2)
1107 shift = i;
1108 else if (!(i & 0x1))
1109 shift = i >> 1;
1433eb99 1110 else
41d8bfab 1111 shift = (i + 1) >> 1;
ddff876d 1112
41d8bfab
BP
1113 return 128 << (shift + !!dct_width);
1114}
1115
1116static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
a597d2a5 1117 unsigned cs_mode, int cs_mask_nr)
41d8bfab
BP
1118{
1119 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1120
1121 if (pvt->ext_model >= K8_REV_F) {
1122 WARN_ON(cs_mode > 11);
1123 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1124 }
1125 else if (pvt->ext_model >= K8_REV_D) {
11b0a314 1126 unsigned diff;
41d8bfab
BP
1127 WARN_ON(cs_mode > 10);
1128
11b0a314
BP
1129 /*
1130 * the below calculation, besides trying to win an obfuscated C
1131 * contest, maps cs_mode values to DIMM chip select sizes. The
1132 * mappings are:
1133 *
1134 * cs_mode CS size (mb)
1135 * ======= ============
1136 * 0 32
1137 * 1 64
1138 * 2 128
1139 * 3 128
1140 * 4 256
1141 * 5 512
1142 * 6 256
1143 * 7 512
1144 * 8 1024
1145 * 9 1024
1146 * 10 2048
1147 *
1148 * Basically, it calculates a value with which to shift the
1149 * smallest CS size of 32MB.
1150 *
1151 * ddr[23]_cs_size have a similar purpose.
1152 */
1153 diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1154
1155 return 32 << (cs_mode - diff);
41d8bfab
BP
1156 }
1157 else {
1158 WARN_ON(cs_mode > 6);
1159 return 32 << cs_mode;
1160 }
ddff876d
DT
1161}
1162
1afd3c98
DT
1163/*
1164 * Get the number of DCT channels in use.
1165 *
1166 * Return:
1167 * number of Memory Channels in operation
1168 * Pass back:
1169 * contents of the DCL0_LOW register
1170 */
7d20d14d 1171static int f1x_early_channel_count(struct amd64_pvt *pvt)
1afd3c98 1172{
6ba5dcdc 1173 int i, j, channels = 0;
1afd3c98 1174
7d20d14d 1175 /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
a4b4bedc 1176 if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
7d20d14d 1177 return 2;
1afd3c98
DT
1178
1179 /*
d16149e8
BP
1180 * Need to check if in unganged mode: In such, there are 2 channels,
1181 * but they are not in 128 bit mode and thus the above 'dclr0' status
1182 * bit will be OFF.
1afd3c98
DT
1183 *
1184 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1185 * their CSEnable bit on. If so, then SINGLE DIMM case.
1186 */
956b9ba1 1187 edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
ddff876d 1188
1afd3c98
DT
1189 /*
1190 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1191 * is more than just one DIMM present in unganged mode. Need to check
1192 * both controllers since DIMMs can be placed in either one.
1193 */
525a1b20
BP
1194 for (i = 0; i < 2; i++) {
1195 u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1afd3c98 1196
57a30854
WW
1197 for (j = 0; j < 4; j++) {
1198 if (DBAM_DIMM(j, dbam) > 0) {
1199 channels++;
1200 break;
1201 }
1202 }
1afd3c98
DT
1203 }
1204
d16149e8
BP
1205 if (channels > 2)
1206 channels = 2;
1207
24f9a7fe 1208 amd64_info("MCT channel count: %d\n", channels);
1afd3c98
DT
1209
1210 return channels;
1afd3c98
DT
1211}
1212
f1cbbec9
YG
1213static int f17_early_channel_count(struct amd64_pvt *pvt)
1214{
1215 int i, channels = 0;
1216
1217 /* SDP Control bit 31 (SdpInit) is clear for unused UMC channels */
1218 for (i = 0; i < NUM_UMCS; i++)
1219 channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT);
1220
1221 amd64_info("MCT channel count: %d\n", channels);
1222
1223 return channels;
1224}
1225
41d8bfab 1226static int ddr3_cs_size(unsigned i, bool dct_width)
1afd3c98 1227{
41d8bfab
BP
1228 unsigned shift = 0;
1229 int cs_size = 0;
1230
1231 if (i == 0 || i == 3 || i == 4)
1232 cs_size = -1;
1233 else if (i <= 2)
1234 shift = i;
1235 else if (i == 12)
1236 shift = 7;
1237 else if (!(i & 0x1))
1238 shift = i >> 1;
1239 else
1240 shift = (i + 1) >> 1;
1241
1242 if (cs_size != -1)
1243 cs_size = (128 * (1 << !!dct_width)) << shift;
1244
1245 return cs_size;
1246}
1247
a597d2a5
AG
1248static int ddr3_lrdimm_cs_size(unsigned i, unsigned rank_multiply)
1249{
1250 unsigned shift = 0;
1251 int cs_size = 0;
1252
1253 if (i < 4 || i == 6)
1254 cs_size = -1;
1255 else if (i == 12)
1256 shift = 7;
1257 else if (!(i & 0x1))
1258 shift = i >> 1;
1259 else
1260 shift = (i + 1) >> 1;
1261
1262 if (cs_size != -1)
1263 cs_size = rank_multiply * (128 << shift);
1264
1265 return cs_size;
1266}
1267
1268static int ddr4_cs_size(unsigned i)
1269{
1270 int cs_size = 0;
1271
1272 if (i == 0)
1273 cs_size = -1;
1274 else if (i == 1)
1275 cs_size = 1024;
1276 else
1277 /* Min cs_size = 1G */
1278 cs_size = 1024 * (1 << (i >> 1));
1279
1280 return cs_size;
1281}
1282
41d8bfab 1283static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
a597d2a5 1284 unsigned cs_mode, int cs_mask_nr)
41d8bfab
BP
1285{
1286 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1287
1288 WARN_ON(cs_mode > 11);
1433eb99
BP
1289
1290 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
41d8bfab 1291 return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1433eb99 1292 else
41d8bfab
BP
1293 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1294}
1295
1296/*
1297 * F15h supports only 64bit DCT interfaces
1298 */
1299static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
a597d2a5 1300 unsigned cs_mode, int cs_mask_nr)
41d8bfab
BP
1301{
1302 WARN_ON(cs_mode > 12);
1433eb99 1303
41d8bfab 1304 return ddr3_cs_size(cs_mode, false);
1afd3c98
DT
1305}
1306
a597d2a5
AG
1307/* F15h M60h supports DDR4 mapping as well.. */
1308static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1309 unsigned cs_mode, int cs_mask_nr)
1310{
1311 int cs_size;
1312 u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr];
1313
1314 WARN_ON(cs_mode > 12);
1315
1316 if (pvt->dram_type == MEM_DDR4) {
1317 if (cs_mode > 9)
1318 return -1;
1319
1320 cs_size = ddr4_cs_size(cs_mode);
1321 } else if (pvt->dram_type == MEM_LRDDR3) {
1322 unsigned rank_multiply = dcsm & 0xf;
1323
1324 if (rank_multiply == 3)
1325 rank_multiply = 4;
1326 cs_size = ddr3_lrdimm_cs_size(cs_mode, rank_multiply);
1327 } else {
1328 /* Minimum cs size is 512mb for F15hM60h*/
1329 if (cs_mode == 0x1)
1330 return -1;
1331
1332 cs_size = ddr3_cs_size(cs_mode, false);
1333 }
1334
1335 return cs_size;
1336}
1337
94c1acf2 1338/*
18b94f66 1339 * F16h and F15h model 30h have only limited cs_modes.
94c1acf2
AG
1340 */
1341static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
a597d2a5 1342 unsigned cs_mode, int cs_mask_nr)
94c1acf2
AG
1343{
1344 WARN_ON(cs_mode > 12);
1345
1346 if (cs_mode == 6 || cs_mode == 8 ||
1347 cs_mode == 9 || cs_mode == 12)
1348 return -1;
1349 else
1350 return ddr3_cs_size(cs_mode, false);
1351}
1352
f1cbbec9
YG
1353static int f17_base_addr_to_cs_size(struct amd64_pvt *pvt, u8 umc,
1354 unsigned int cs_mode, int csrow_nr)
1355{
1356 u32 base_addr = pvt->csels[umc].csbases[csrow_nr];
1357
1358 /* Each mask is used for every two base addresses. */
1359 u32 addr_mask = pvt->csels[umc].csmasks[csrow_nr >> 1];
1360
1361 /* Register [31:1] = Address [39:9]. Size is in kBs here. */
1362 u32 size = ((addr_mask >> 1) - (base_addr >> 1) + 1) >> 1;
1363
1364 edac_dbg(1, "BaseAddr: 0x%x, AddrMask: 0x%x\n", base_addr, addr_mask);
1365
1366 /* Return size in MBs. */
1367 return size >> 10;
1368}
1369
5a5d2371 1370static void read_dram_ctl_register(struct amd64_pvt *pvt)
6163b5d4 1371{
6163b5d4 1372
a4b4bedc 1373 if (pvt->fam == 0xf)
5a5d2371
BP
1374 return;
1375
7981a28f 1376 if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
956b9ba1
JP
1377 edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1378 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
72381bd5 1379
956b9ba1
JP
1380 edac_dbg(0, " DCTs operate in %s mode\n",
1381 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
72381bd5
BP
1382
1383 if (!dct_ganging_enabled(pvt))
956b9ba1
JP
1384 edac_dbg(0, " Address range split per DCT: %s\n",
1385 (dct_high_range_enabled(pvt) ? "yes" : "no"));
72381bd5 1386
956b9ba1
JP
1387 edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1388 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1389 (dct_memory_cleared(pvt) ? "yes" : "no"));
72381bd5 1390
956b9ba1
JP
1391 edac_dbg(0, " channel interleave: %s, "
1392 "interleave bits selector: 0x%x\n",
1393 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1394 dct_sel_interleave_addr(pvt));
6163b5d4
DT
1395 }
1396
7981a28f 1397 amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
6163b5d4
DT
1398}
1399
18b94f66
AG
1400/*
1401 * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
1402 * 2.10.12 Memory Interleaving Modes).
1403 */
1404static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1405 u8 intlv_en, int num_dcts_intlv,
1406 u32 dct_sel)
1407{
1408 u8 channel = 0;
1409 u8 select;
1410
1411 if (!(intlv_en))
1412 return (u8)(dct_sel);
1413
1414 if (num_dcts_intlv == 2) {
1415 select = (sys_addr >> 8) & 0x3;
1416 channel = select ? 0x3 : 0;
9d0e8d83
AG
1417 } else if (num_dcts_intlv == 4) {
1418 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1419 switch (intlv_addr) {
1420 case 0x4:
1421 channel = (sys_addr >> 8) & 0x3;
1422 break;
1423 case 0x5:
1424 channel = (sys_addr >> 9) & 0x3;
1425 break;
1426 }
1427 }
18b94f66
AG
1428 return channel;
1429}
1430
f71d0a05 1431/*
229a7a11 1432 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
f71d0a05
DT
1433 * Interleaving Modes.
1434 */
b15f0fca 1435static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
229a7a11 1436 bool hi_range_sel, u8 intlv_en)
6163b5d4 1437{
151fa71c 1438 u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
6163b5d4
DT
1439
1440 if (dct_ganging_enabled(pvt))
229a7a11 1441 return 0;
6163b5d4 1442
229a7a11
BP
1443 if (hi_range_sel)
1444 return dct_sel_high;
6163b5d4 1445
229a7a11
BP
1446 /*
1447 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1448 */
1449 if (dct_interleave_enabled(pvt)) {
1450 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1451
1452 /* return DCT select function: 0=DCT0, 1=DCT1 */
1453 if (!intlv_addr)
1454 return sys_addr >> 6 & 1;
1455
1456 if (intlv_addr & 0x2) {
1457 u8 shift = intlv_addr & 0x1 ? 9 : 6;
dc0a50a8 1458 u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) & 1;
229a7a11
BP
1459
1460 return ((sys_addr >> shift) & 1) ^ temp;
1461 }
1462
dc0a50a8
YG
1463 if (intlv_addr & 0x4) {
1464 u8 shift = intlv_addr & 0x1 ? 9 : 8;
1465
1466 return (sys_addr >> shift) & 1;
1467 }
1468
229a7a11
BP
1469 return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1470 }
1471
1472 if (dct_high_range_enabled(pvt))
1473 return ~dct_sel_high & 1;
6163b5d4
DT
1474
1475 return 0;
1476}
1477
c8e518d5 1478/* Convert the sys_addr to the normalized DCT address */
c7e5301a 1479static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
c8e518d5
BP
1480 u64 sys_addr, bool hi_rng,
1481 u32 dct_sel_base_addr)
6163b5d4
DT
1482{
1483 u64 chan_off;
c8e518d5
BP
1484 u64 dram_base = get_dram_base(pvt, range);
1485 u64 hole_off = f10_dhar_offset(pvt);
6f3508f6 1486 u64 dct_sel_base_off = (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
6163b5d4 1487
c8e518d5
BP
1488 if (hi_rng) {
1489 /*
1490 * if
1491 * base address of high range is below 4Gb
1492 * (bits [47:27] at [31:11])
1493 * DRAM address space on this DCT is hoisted above 4Gb &&
1494 * sys_addr > 4Gb
1495 *
1496 * remove hole offset from sys_addr
1497 * else
1498 * remove high range offset from sys_addr
1499 */
1500 if ((!(dct_sel_base_addr >> 16) ||
1501 dct_sel_base_addr < dhar_base(pvt)) &&
972ea17a 1502 dhar_valid(pvt) &&
c8e518d5 1503 (sys_addr >= BIT_64(32)))
bc21fa57 1504 chan_off = hole_off;
6163b5d4
DT
1505 else
1506 chan_off = dct_sel_base_off;
1507 } else {
c8e518d5
BP
1508 /*
1509 * if
1510 * we have a valid hole &&
1511 * sys_addr > 4Gb
1512 *
1513 * remove hole
1514 * else
1515 * remove dram base to normalize to DCT address
1516 */
972ea17a 1517 if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
bc21fa57 1518 chan_off = hole_off;
6163b5d4 1519 else
c8e518d5 1520 chan_off = dram_base;
6163b5d4
DT
1521 }
1522
10ef6b0d 1523 return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
6163b5d4
DT
1524}
1525
6163b5d4
DT
1526/*
1527 * checks if the csrow passed in is marked as SPARED, if so returns the new
1528 * spare row
1529 */
11c75ead 1530static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
6163b5d4 1531{
614ec9d8
BP
1532 int tmp_cs;
1533
1534 if (online_spare_swap_done(pvt, dct) &&
1535 csrow == online_spare_bad_dramcs(pvt, dct)) {
1536
1537 for_each_chip_select(tmp_cs, dct, pvt) {
1538 if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1539 csrow = tmp_cs;
1540 break;
1541 }
1542 }
6163b5d4
DT
1543 }
1544 return csrow;
1545}
1546
1547/*
1548 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1549 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1550 *
1551 * Return:
1552 * -EINVAL: NOT FOUND
1553 * 0..csrow = Chip-Select Row
1554 */
c7e5301a 1555static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
6163b5d4
DT
1556{
1557 struct mem_ctl_info *mci;
1558 struct amd64_pvt *pvt;
11c75ead 1559 u64 cs_base, cs_mask;
6163b5d4
DT
1560 int cs_found = -EINVAL;
1561 int csrow;
1562
2ec591ac 1563 mci = edac_mc_find(nid);
6163b5d4
DT
1564 if (!mci)
1565 return cs_found;
1566
1567 pvt = mci->pvt_info;
1568
956b9ba1 1569 edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
6163b5d4 1570
11c75ead
BP
1571 for_each_chip_select(csrow, dct, pvt) {
1572 if (!csrow_enabled(csrow, dct, pvt))
6163b5d4
DT
1573 continue;
1574
11c75ead 1575 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
6163b5d4 1576
956b9ba1
JP
1577 edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1578 csrow, cs_base, cs_mask);
6163b5d4 1579
11c75ead 1580 cs_mask = ~cs_mask;
6163b5d4 1581
956b9ba1
JP
1582 edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1583 (in_addr & cs_mask), (cs_base & cs_mask));
6163b5d4 1584
11c75ead 1585 if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
18b94f66
AG
1586 if (pvt->fam == 0x15 && pvt->model >= 0x30) {
1587 cs_found = csrow;
1588 break;
1589 }
11c75ead 1590 cs_found = f10_process_possible_spare(pvt, dct, csrow);
6163b5d4 1591
956b9ba1 1592 edac_dbg(1, " MATCH csrow=%d\n", cs_found);
6163b5d4
DT
1593 break;
1594 }
1595 }
1596 return cs_found;
1597}
1598
95b0ef55
BP
1599/*
1600 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1601 * swapped with a region located at the bottom of memory so that the GPU can use
1602 * the interleaved region and thus two channels.
1603 */
b15f0fca 1604static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
95b0ef55
BP
1605{
1606 u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
1607
a4b4bedc 1608 if (pvt->fam == 0x10) {
95b0ef55 1609 /* only revC3 and revE have that feature */
a4b4bedc 1610 if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
95b0ef55
BP
1611 return sys_addr;
1612 }
1613
7981a28f 1614 amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
95b0ef55
BP
1615
1616 if (!(swap_reg & 0x1))
1617 return sys_addr;
1618
1619 swap_base = (swap_reg >> 3) & 0x7f;
1620 swap_limit = (swap_reg >> 11) & 0x7f;
1621 rgn_size = (swap_reg >> 20) & 0x7f;
1622 tmp_addr = sys_addr >> 27;
1623
1624 if (!(sys_addr >> 34) &&
1625 (((tmp_addr >= swap_base) &&
1626 (tmp_addr <= swap_limit)) ||
1627 (tmp_addr < rgn_size)))
1628 return sys_addr ^ (u64)swap_base << 27;
1629
1630 return sys_addr;
1631}
1632
f71d0a05 1633/* For a given @dram_range, check if @sys_addr falls within it. */
e761359a 1634static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
33ca0643 1635 u64 sys_addr, int *chan_sel)
f71d0a05 1636{
229a7a11 1637 int cs_found = -EINVAL;
c8e518d5 1638 u64 chan_addr;
5d4b58e8 1639 u32 dct_sel_base;
11c75ead 1640 u8 channel;
229a7a11 1641 bool high_range = false;
f71d0a05 1642
7f19bf75 1643 u8 node_id = dram_dst_node(pvt, range);
229a7a11 1644 u8 intlv_en = dram_intlv_en(pvt, range);
7f19bf75 1645 u32 intlv_sel = dram_intlv_sel(pvt, range);
f71d0a05 1646
956b9ba1
JP
1647 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1648 range, sys_addr, get_dram_limit(pvt, range));
f71d0a05 1649
355fba60
BP
1650 if (dhar_valid(pvt) &&
1651 dhar_base(pvt) <= sys_addr &&
1652 sys_addr < BIT_64(32)) {
1653 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1654 sys_addr);
1655 return -EINVAL;
1656 }
1657
f030ddfb 1658 if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
f71d0a05
DT
1659 return -EINVAL;
1660
b15f0fca 1661 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
95b0ef55 1662
f71d0a05
DT
1663 dct_sel_base = dct_sel_baseaddr(pvt);
1664
1665 /*
1666 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1667 * select between DCT0 and DCT1.
1668 */
1669 if (dct_high_range_enabled(pvt) &&
1670 !dct_ganging_enabled(pvt) &&
1671 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
229a7a11 1672 high_range = true;
f71d0a05 1673
b15f0fca 1674 channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
f71d0a05 1675
b15f0fca 1676 chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
c8e518d5 1677 high_range, dct_sel_base);
f71d0a05 1678
e2f79dbd
BP
1679 /* Remove node interleaving, see F1x120 */
1680 if (intlv_en)
1681 chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
1682 (chan_addr & 0xfff);
f71d0a05 1683
5d4b58e8 1684 /* remove channel interleave */
f71d0a05
DT
1685 if (dct_interleave_enabled(pvt) &&
1686 !dct_high_range_enabled(pvt) &&
1687 !dct_ganging_enabled(pvt)) {
5d4b58e8
BP
1688
1689 if (dct_sel_interleave_addr(pvt) != 1) {
1690 if (dct_sel_interleave_addr(pvt) == 0x3)
1691 /* hash 9 */
1692 chan_addr = ((chan_addr >> 10) << 9) |
1693 (chan_addr & 0x1ff);
1694 else
1695 /* A[6] or hash 6 */
1696 chan_addr = ((chan_addr >> 7) << 6) |
1697 (chan_addr & 0x3f);
1698 } else
1699 /* A[12] */
1700 chan_addr = ((chan_addr >> 13) << 12) |
1701 (chan_addr & 0xfff);
f71d0a05
DT
1702 }
1703
956b9ba1 1704 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
f71d0a05 1705
b15f0fca 1706 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
f71d0a05 1707
33ca0643 1708 if (cs_found >= 0)
f71d0a05 1709 *chan_sel = channel;
33ca0643 1710
f71d0a05
DT
1711 return cs_found;
1712}
1713
18b94f66
AG
1714static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1715 u64 sys_addr, int *chan_sel)
1716{
1717 int cs_found = -EINVAL;
1718 int num_dcts_intlv = 0;
1719 u64 chan_addr, chan_offset;
1720 u64 dct_base, dct_limit;
1721 u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
1722 u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
1723
1724 u64 dhar_offset = f10_dhar_offset(pvt);
1725 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1726 u8 node_id = dram_dst_node(pvt, range);
1727 u8 intlv_en = dram_intlv_en(pvt, range);
1728
1729 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
1730 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
1731
1732 dct_offset_en = (u8) ((dct_cont_base_reg >> 3) & BIT(0));
1733 dct_sel = (u8) ((dct_cont_base_reg >> 4) & 0x7);
1734
1735 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1736 range, sys_addr, get_dram_limit(pvt, range));
1737
1738 if (!(get_dram_base(pvt, range) <= sys_addr) &&
1739 !(get_dram_limit(pvt, range) >= sys_addr))
1740 return -EINVAL;
1741
1742 if (dhar_valid(pvt) &&
1743 dhar_base(pvt) <= sys_addr &&
1744 sys_addr < BIT_64(32)) {
1745 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1746 sys_addr);
1747 return -EINVAL;
1748 }
1749
1750 /* Verify sys_addr is within DCT Range. */
4fc06b31
AG
1751 dct_base = (u64) dct_sel_baseaddr(pvt);
1752 dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
18b94f66
AG
1753
1754 if (!(dct_cont_base_reg & BIT(0)) &&
4fc06b31
AG
1755 !(dct_base <= (sys_addr >> 27) &&
1756 dct_limit >= (sys_addr >> 27)))
18b94f66
AG
1757 return -EINVAL;
1758
1759 /* Verify number of dct's that participate in channel interleaving. */
1760 num_dcts_intlv = (int) hweight8(intlv_en);
1761
1762 if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
1763 return -EINVAL;
1764
dc0a50a8
YG
1765 if (pvt->model >= 0x60)
1766 channel = f1x_determine_channel(pvt, sys_addr, false, intlv_en);
1767 else
1768 channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
1769 num_dcts_intlv, dct_sel);
18b94f66
AG
1770
1771 /* Verify we stay within the MAX number of channels allowed */
7f3f5240 1772 if (channel > 3)
18b94f66
AG
1773 return -EINVAL;
1774
1775 leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
1776
1777 /* Get normalized DCT addr */
1778 if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
1779 chan_offset = dhar_offset;
1780 else
4fc06b31 1781 chan_offset = dct_base << 27;
18b94f66
AG
1782
1783 chan_addr = sys_addr - chan_offset;
1784
1785 /* remove channel interleave */
1786 if (num_dcts_intlv == 2) {
1787 if (intlv_addr == 0x4)
1788 chan_addr = ((chan_addr >> 9) << 8) |
1789 (chan_addr & 0xff);
1790 else if (intlv_addr == 0x5)
1791 chan_addr = ((chan_addr >> 10) << 9) |
1792 (chan_addr & 0x1ff);
1793 else
1794 return -EINVAL;
1795
1796 } else if (num_dcts_intlv == 4) {
1797 if (intlv_addr == 0x4)
1798 chan_addr = ((chan_addr >> 10) << 8) |
1799 (chan_addr & 0xff);
1800 else if (intlv_addr == 0x5)
1801 chan_addr = ((chan_addr >> 11) << 9) |
1802 (chan_addr & 0x1ff);
1803 else
1804 return -EINVAL;
1805 }
1806
1807 if (dct_offset_en) {
1808 amd64_read_pci_cfg(pvt->F1,
1809 DRAM_CONT_HIGH_OFF + (int) channel * 4,
1810 &tmp);
4fc06b31 1811 chan_addr += (u64) ((tmp >> 11) & 0xfff) << 27;
18b94f66
AG
1812 }
1813
1814 f15h_select_dct(pvt, channel);
1815
1816 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
1817
1818 /*
1819 * Find Chip select:
1820 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
1821 * there is support for 4 DCT's, but only 2 are currently functional.
1822 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
1823 * pvt->csels[1]. So we need to use '1' here to get correct info.
1824 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
1825 */
1826 alias_channel = (channel == 3) ? 1 : channel;
1827
1828 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
1829
1830 if (cs_found >= 0)
1831 *chan_sel = alias_channel;
1832
1833 return cs_found;
1834}
1835
1836static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
1837 u64 sys_addr,
1838 int *chan_sel)
f71d0a05 1839{
e761359a
BP
1840 int cs_found = -EINVAL;
1841 unsigned range;
f71d0a05 1842
7f19bf75 1843 for (range = 0; range < DRAM_RANGES; range++) {
7f19bf75 1844 if (!dram_rw(pvt, range))
f71d0a05
DT
1845 continue;
1846
18b94f66
AG
1847 if (pvt->fam == 0x15 && pvt->model >= 0x30)
1848 cs_found = f15_m30h_match_to_this_node(pvt, range,
1849 sys_addr,
1850 chan_sel);
f71d0a05 1851
18b94f66
AG
1852 else if ((get_dram_base(pvt, range) <= sys_addr) &&
1853 (get_dram_limit(pvt, range) >= sys_addr)) {
b15f0fca 1854 cs_found = f1x_match_to_this_node(pvt, range,
33ca0643 1855 sys_addr, chan_sel);
f71d0a05
DT
1856 if (cs_found >= 0)
1857 break;
1858 }
1859 }
1860 return cs_found;
1861}
1862
1863/*
bdc30a0c
BP
1864 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
1865 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
f71d0a05 1866 *
bdc30a0c
BP
1867 * The @sys_addr is usually an error address received from the hardware
1868 * (MCX_ADDR).
f71d0a05 1869 */
b15f0fca 1870static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
33ca0643 1871 struct err_info *err)
f71d0a05
DT
1872{
1873 struct amd64_pvt *pvt = mci->pvt_info;
f71d0a05 1874
33ca0643 1875 error_address_to_page_and_offset(sys_addr, err);
ab5a503c 1876
33ca0643
BP
1877 err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
1878 if (err->csrow < 0) {
1879 err->err_code = ERR_CSROW;
bdc30a0c
BP
1880 return;
1881 }
1882
bdc30a0c
BP
1883 /*
1884 * We need the syndromes for channel detection only when we're
1885 * ganged. Otherwise @chan should already contain the channel at
1886 * this point.
1887 */
a97fa68e 1888 if (dct_ganging_enabled(pvt))
33ca0643 1889 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
f71d0a05
DT
1890}
1891
f71d0a05 1892/*
8566c4df 1893 * debug routine to display the memory sizes of all logical DIMMs and its
cb328507 1894 * CSROWs
f71d0a05 1895 */
d1ea71cd 1896static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
f71d0a05 1897{
bb89f5a0 1898 int dimm, size0, size1;
525a1b20
BP
1899 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
1900 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
f71d0a05 1901
a4b4bedc 1902 if (pvt->fam == 0xf) {
8566c4df 1903 /* K8 families < revF not supported yet */
1433eb99 1904 if (pvt->ext_model < K8_REV_F)
8566c4df
BP
1905 return;
1906 else
1907 WARN_ON(ctrl != 0);
1908 }
1909
7981a28f
AG
1910 if (pvt->fam == 0x10) {
1911 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
1912 : pvt->dbam0;
1913 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
1914 pvt->csels[1].csbases :
1915 pvt->csels[0].csbases;
1916 } else if (ctrl) {
1917 dbam = pvt->dbam0;
1918 dcsb = pvt->csels[1].csbases;
1919 }
956b9ba1
JP
1920 edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
1921 ctrl, dbam);
f71d0a05 1922
8566c4df
BP
1923 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1924
f71d0a05
DT
1925 /* Dump memory sizes for DIMM and its CSROWs */
1926 for (dimm = 0; dimm < 4; dimm++) {
1927
1928 size0 = 0;
11c75ead 1929 if (dcsb[dimm*2] & DCSB_CS_ENABLE)
a597d2a5
AG
1930 /* For f15m60h, need multiplier for LRDIMM cs_size
1931 * calculation. We pass 'dimm' value to the dbam_to_cs
1932 * mapper so we can find the multiplier from the
1933 * corresponding DCSM.
1934 */
41d8bfab 1935 size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
a597d2a5
AG
1936 DBAM_DIMM(dimm, dbam),
1937 dimm);
f71d0a05
DT
1938
1939 size1 = 0;
11c75ead 1940 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
41d8bfab 1941 size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
a597d2a5
AG
1942 DBAM_DIMM(dimm, dbam),
1943 dimm);
f71d0a05 1944
24f9a7fe 1945 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
bb89f5a0
BP
1946 dimm * 2, size0,
1947 dimm * 2 + 1, size1);
f71d0a05
DT
1948 }
1949}
1950
d1ea71cd 1951static struct amd64_family_type family_types[] = {
4d37607a 1952 [K8_CPUS] = {
0092b20d 1953 .ctl_name = "K8",
8d5b5d9c 1954 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
3f37a36b 1955 .f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
4d37607a 1956 .ops = {
1433eb99 1957 .early_channel_count = k8_early_channel_count,
1433eb99
BP
1958 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1959 .dbam_to_cs = k8_dbam_to_chip_select,
4d37607a
DT
1960 }
1961 },
1962 [F10_CPUS] = {
0092b20d 1963 .ctl_name = "F10h",
8d5b5d9c 1964 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
3f37a36b 1965 .f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
4d37607a 1966 .ops = {
7d20d14d 1967 .early_channel_count = f1x_early_channel_count,
b15f0fca 1968 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1433eb99 1969 .dbam_to_cs = f10_dbam_to_chip_select,
b2b0c605
BP
1970 }
1971 },
1972 [F15_CPUS] = {
1973 .ctl_name = "F15h",
df71a053 1974 .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
3f37a36b 1975 .f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2,
b2b0c605 1976 .ops = {
7d20d14d 1977 .early_channel_count = f1x_early_channel_count,
b15f0fca 1978 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
41d8bfab 1979 .dbam_to_cs = f15_dbam_to_chip_select,
4d37607a
DT
1980 }
1981 },
18b94f66
AG
1982 [F15_M30H_CPUS] = {
1983 .ctl_name = "F15h_M30h",
1984 .f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
3f37a36b 1985 .f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
18b94f66
AG
1986 .ops = {
1987 .early_channel_count = f1x_early_channel_count,
1988 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1989 .dbam_to_cs = f16_dbam_to_chip_select,
18b94f66
AG
1990 }
1991 },
a597d2a5
AG
1992 [F15_M60H_CPUS] = {
1993 .ctl_name = "F15h_M60h",
1994 .f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
3f37a36b 1995 .f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2,
a597d2a5
AG
1996 .ops = {
1997 .early_channel_count = f1x_early_channel_count,
1998 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1999 .dbam_to_cs = f15_m60h_dbam_to_chip_select,
2000 }
2001 },
94c1acf2
AG
2002 [F16_CPUS] = {
2003 .ctl_name = "F16h",
2004 .f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
3f37a36b 2005 .f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2,
94c1acf2
AG
2006 .ops = {
2007 .early_channel_count = f1x_early_channel_count,
2008 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2009 .dbam_to_cs = f16_dbam_to_chip_select,
94c1acf2
AG
2010 }
2011 },
85a8885b
AG
2012 [F16_M30H_CPUS] = {
2013 .ctl_name = "F16h_M30h",
2014 .f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
3f37a36b 2015 .f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
85a8885b
AG
2016 .ops = {
2017 .early_channel_count = f1x_early_channel_count,
2018 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2019 .dbam_to_cs = f16_dbam_to_chip_select,
85a8885b
AG
2020 }
2021 },
f1cbbec9
YG
2022 [F17_CPUS] = {
2023 .ctl_name = "F17h",
2024 .f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0,
2025 .f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
2026 .ops = {
2027 .early_channel_count = f17_early_channel_count,
2028 .dbam_to_cs = f17_base_addr_to_cs_size,
2029 }
2030 },
4d37607a
DT
2031};
2032
b1289d6f 2033/*
bfc04aec
BP
2034 * These are tables of eigenvectors (one per line) which can be used for the
2035 * construction of the syndrome tables. The modified syndrome search algorithm
2036 * uses those to find the symbol in error and thus the DIMM.
b1289d6f 2037 *
bfc04aec 2038 * Algorithm courtesy of Ross LaFetra from AMD.
b1289d6f 2039 */
c7e5301a 2040static const u16 x4_vectors[] = {
bfc04aec
BP
2041 0x2f57, 0x1afe, 0x66cc, 0xdd88,
2042 0x11eb, 0x3396, 0x7f4c, 0xeac8,
2043 0x0001, 0x0002, 0x0004, 0x0008,
2044 0x1013, 0x3032, 0x4044, 0x8088,
2045 0x106b, 0x30d6, 0x70fc, 0xe0a8,
2046 0x4857, 0xc4fe, 0x13cc, 0x3288,
2047 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
2048 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
2049 0x15c1, 0x2a42, 0x89ac, 0x4758,
2050 0x2b03, 0x1602, 0x4f0c, 0xca08,
2051 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
2052 0x8ba7, 0x465e, 0x244c, 0x1cc8,
2053 0x2b87, 0x164e, 0x642c, 0xdc18,
2054 0x40b9, 0x80de, 0x1094, 0x20e8,
2055 0x27db, 0x1eb6, 0x9dac, 0x7b58,
2056 0x11c1, 0x2242, 0x84ac, 0x4c58,
2057 0x1be5, 0x2d7a, 0x5e34, 0xa718,
2058 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
2059 0x4c97, 0xc87e, 0x11fc, 0x33a8,
2060 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
2061 0x16b3, 0x3d62, 0x4f34, 0x8518,
2062 0x1e2f, 0x391a, 0x5cac, 0xf858,
2063 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
2064 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
2065 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
2066 0x4397, 0xc27e, 0x17fc, 0x3ea8,
2067 0x1617, 0x3d3e, 0x6464, 0xb8b8,
2068 0x23ff, 0x12aa, 0xab6c, 0x56d8,
2069 0x2dfb, 0x1ba6, 0x913c, 0x7328,
2070 0x185d, 0x2ca6, 0x7914, 0x9e28,
2071 0x171b, 0x3e36, 0x7d7c, 0xebe8,
2072 0x4199, 0x82ee, 0x19f4, 0x2e58,
2073 0x4807, 0xc40e, 0x130c, 0x3208,
2074 0x1905, 0x2e0a, 0x5804, 0xac08,
2075 0x213f, 0x132a, 0xadfc, 0x5ba8,
2076 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
b1289d6f
DT
2077};
2078
c7e5301a 2079static const u16 x8_vectors[] = {
bfc04aec
BP
2080 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
2081 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
2082 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
2083 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
2084 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
2085 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
2086 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
2087 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
2088 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
2089 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
2090 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
2091 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
2092 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
2093 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
2094 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
2095 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
2096 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
2097 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
2098 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
2099};
2100
c7e5301a 2101static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
d34a6ecd 2102 unsigned v_dim)
b1289d6f 2103{
bfc04aec
BP
2104 unsigned int i, err_sym;
2105
2106 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
2107 u16 s = syndrome;
d34a6ecd
BP
2108 unsigned v_idx = err_sym * v_dim;
2109 unsigned v_end = (err_sym + 1) * v_dim;
bfc04aec
BP
2110
2111 /* walk over all 16 bits of the syndrome */
2112 for (i = 1; i < (1U << 16); i <<= 1) {
2113
2114 /* if bit is set in that eigenvector... */
2115 if (v_idx < v_end && vectors[v_idx] & i) {
2116 u16 ev_comp = vectors[v_idx++];
2117
2118 /* ... and bit set in the modified syndrome, */
2119 if (s & i) {
2120 /* remove it. */
2121 s ^= ev_comp;
4d37607a 2122
bfc04aec
BP
2123 if (!s)
2124 return err_sym;
2125 }
b1289d6f 2126
bfc04aec
BP
2127 } else if (s & i)
2128 /* can't get to zero, move to next symbol */
2129 break;
2130 }
b1289d6f
DT
2131 }
2132
956b9ba1 2133 edac_dbg(0, "syndrome(%x) not found\n", syndrome);
b1289d6f
DT
2134 return -1;
2135}
d27bf6fa 2136
bfc04aec
BP
2137static int map_err_sym_to_channel(int err_sym, int sym_size)
2138{
2139 if (sym_size == 4)
2140 switch (err_sym) {
2141 case 0x20:
2142 case 0x21:
2143 return 0;
2144 break;
2145 case 0x22:
2146 case 0x23:
2147 return 1;
2148 break;
2149 default:
2150 return err_sym >> 4;
2151 break;
2152 }
2153 /* x8 symbols */
2154 else
2155 switch (err_sym) {
2156 /* imaginary bits not in a DIMM */
2157 case 0x10:
2158 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
2159 err_sym);
2160 return -1;
2161 break;
2162
2163 case 0x11:
2164 return 0;
2165 break;
2166 case 0x12:
2167 return 1;
2168 break;
2169 default:
2170 return err_sym >> 3;
2171 break;
2172 }
2173 return -1;
2174}
2175
2176static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
2177{
2178 struct amd64_pvt *pvt = mci->pvt_info;
ad6a32e9
BP
2179 int err_sym = -1;
2180
a3b7db09 2181 if (pvt->ecc_sym_sz == 8)
ad6a32e9
BP
2182 err_sym = decode_syndrome(syndrome, x8_vectors,
2183 ARRAY_SIZE(x8_vectors),
a3b7db09
BP
2184 pvt->ecc_sym_sz);
2185 else if (pvt->ecc_sym_sz == 4)
ad6a32e9
BP
2186 err_sym = decode_syndrome(syndrome, x4_vectors,
2187 ARRAY_SIZE(x4_vectors),
a3b7db09 2188 pvt->ecc_sym_sz);
ad6a32e9 2189 else {
a3b7db09 2190 amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
ad6a32e9 2191 return err_sym;
bfc04aec 2192 }
ad6a32e9 2193
a3b7db09 2194 return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
bfc04aec
BP
2195}
2196
e70984d9 2197static void __log_ecc_error(struct mem_ctl_info *mci, struct err_info *err,
33ca0643 2198 u8 ecc_type)
d27bf6fa 2199{
33ca0643
BP
2200 enum hw_event_mc_err_type err_type;
2201 const char *string;
d27bf6fa 2202
33ca0643
BP
2203 if (ecc_type == 2)
2204 err_type = HW_EVENT_ERR_CORRECTED;
2205 else if (ecc_type == 1)
2206 err_type = HW_EVENT_ERR_UNCORRECTED;
d12a969e
YG
2207 else if (ecc_type == 3)
2208 err_type = HW_EVENT_ERR_DEFERRED;
33ca0643
BP
2209 else {
2210 WARN(1, "Something is rotten in the state of Denmark.\n");
d27bf6fa
DT
2211 return;
2212 }
2213
33ca0643
BP
2214 switch (err->err_code) {
2215 case DECODE_OK:
2216 string = "";
2217 break;
2218 case ERR_NODE:
2219 string = "Failed to map error addr to a node";
2220 break;
2221 case ERR_CSROW:
2222 string = "Failed to map error addr to a csrow";
2223 break;
2224 case ERR_CHANNEL:
2225 string = "unknown syndrome - possible error reporting race";
2226 break;
2227 default:
2228 string = "WTF error";
2229 break;
d27bf6fa 2230 }
33ca0643
BP
2231
2232 edac_mc_handle_error(err_type, mci, 1,
2233 err->page, err->offset, err->syndrome,
2234 err->csrow, err->channel, -1,
2235 string, "");
d27bf6fa
DT
2236}
2237
df781d03 2238static inline void decode_bus_error(int node_id, struct mce *m)
d27bf6fa 2239{
0c510cc8
DB
2240 struct mem_ctl_info *mci;
2241 struct amd64_pvt *pvt;
f192c7b1 2242 u8 ecc_type = (m->status >> 45) & 0x3;
66fed2d4
BP
2243 u8 xec = XEC(m->status, 0x1f);
2244 u16 ec = EC(m->status);
33ca0643
BP
2245 u64 sys_addr;
2246 struct err_info err;
d27bf6fa 2247
0c510cc8
DB
2248 mci = edac_mc_find(node_id);
2249 if (!mci)
2250 return;
2251
2252 pvt = mci->pvt_info;
2253
66fed2d4 2254 /* Bail out early if this was an 'observed' error */
5980bb9c 2255 if (PP(ec) == NBSL_PP_OBS)
b70ef010 2256 return;
d27bf6fa 2257
ecaf5606
BP
2258 /* Do only ECC errors */
2259 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
d27bf6fa 2260 return;
d27bf6fa 2261
33ca0643
BP
2262 memset(&err, 0, sizeof(err));
2263
a4b4bedc 2264 sys_addr = get_error_address(pvt, m);
33ca0643 2265
ecaf5606 2266 if (ecc_type == 2)
33ca0643
BP
2267 err.syndrome = extract_syndrome(m->status);
2268
2269 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
2270
e70984d9 2271 __log_ecc_error(mci, &err, ecc_type);
d27bf6fa
DT
2272}
2273
0ec449ee 2274/*
3f37a36b
BP
2275 * Use pvt->F3 which contains the F3 CPU PCI device to get the related
2276 * F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
936fc3af 2277 * Reserve F0 and F6 on systems with a UMC.
0ec449ee 2278 */
936fc3af
YG
2279static int
2280reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
2281{
2282 if (pvt->umc) {
2283 pvt->F0 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2284 if (!pvt->F0) {
2285 amd64_err("error F0 device not found: vendor %x device 0x%x (broken BIOS?)\n",
2286 PCI_VENDOR_ID_AMD, pci_id1);
2287 return -ENODEV;
2288 }
2289
2290 pvt->F6 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2291 if (!pvt->F6) {
2292 pci_dev_put(pvt->F0);
2293 pvt->F0 = NULL;
2294
2295 amd64_err("error F6 device not found: vendor %x device 0x%x (broken BIOS?)\n",
2296 PCI_VENDOR_ID_AMD, pci_id2);
2297
2298 return -ENODEV;
2299 }
2300 edac_dbg(1, "F0: %s\n", pci_name(pvt->F0));
2301 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2302 edac_dbg(1, "F6: %s\n", pci_name(pvt->F6));
2303
2304 return 0;
2305 }
2306
0ec449ee 2307 /* Reserve the ADDRESS MAP Device */
936fc3af 2308 pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
8d5b5d9c 2309 if (!pvt->F1) {
936fc3af
YG
2310 amd64_err("error address map device not found: vendor %x device 0x%x (broken BIOS?)\n",
2311 PCI_VENDOR_ID_AMD, pci_id1);
bbd0c1f6 2312 return -ENODEV;
0ec449ee
DT
2313 }
2314
3f37a36b 2315 /* Reserve the DCT Device */
936fc3af 2316 pvt->F2 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
3f37a36b 2317 if (!pvt->F2) {
8d5b5d9c
BP
2318 pci_dev_put(pvt->F1);
2319 pvt->F1 = NULL;
0ec449ee 2320
936fc3af
YG
2321 amd64_err("error F2 device not found: vendor %x device 0x%x (broken BIOS?)\n",
2322 PCI_VENDOR_ID_AMD, pci_id2);
2323 return -ENODEV;
0ec449ee 2324 }
936fc3af 2325
956b9ba1
JP
2326 edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
2327 edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
2328 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
0ec449ee
DT
2329
2330 return 0;
2331}
2332
360b7f3c 2333static void free_mc_sibling_devs(struct amd64_pvt *pvt)
0ec449ee 2334{
936fc3af
YG
2335 if (pvt->umc) {
2336 pci_dev_put(pvt->F0);
2337 pci_dev_put(pvt->F6);
2338 } else {
2339 pci_dev_put(pvt->F1);
2340 pci_dev_put(pvt->F2);
2341 }
0ec449ee
DT
2342}
2343
2344/*
2345 * Retrieve the hardware registers of the memory controller (this includes the
2346 * 'Address Map' and 'Misc' device regs)
2347 */
360b7f3c 2348static void read_mc_regs(struct amd64_pvt *pvt)
0ec449ee 2349{
a4b4bedc 2350 unsigned range;
0ec449ee 2351 u64 msr_val;
ad6a32e9 2352 u32 tmp;
0ec449ee
DT
2353
2354 /*
2355 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2356 * those are Read-As-Zero
2357 */
e97f8bb8 2358 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
956b9ba1 2359 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
0ec449ee
DT
2360
2361 /* check first whether TOP_MEM2 is enabled */
2362 rdmsrl(MSR_K8_SYSCFG, msr_val);
2363 if (msr_val & (1U << 21)) {
e97f8bb8 2364 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
956b9ba1 2365 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
0ec449ee 2366 } else
956b9ba1 2367 edac_dbg(0, " TOP_MEM2 disabled\n");
0ec449ee 2368
5980bb9c 2369 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
0ec449ee 2370
5a5d2371 2371 read_dram_ctl_register(pvt);
0ec449ee 2372
7f19bf75
BP
2373 for (range = 0; range < DRAM_RANGES; range++) {
2374 u8 rw;
0ec449ee 2375
7f19bf75
BP
2376 /* read settings for this DRAM range */
2377 read_dram_base_limit_regs(pvt, range);
2378
2379 rw = dram_rw(pvt, range);
2380 if (!rw)
2381 continue;
2382
956b9ba1
JP
2383 edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2384 range,
2385 get_dram_base(pvt, range),
2386 get_dram_limit(pvt, range));
7f19bf75 2387
956b9ba1
JP
2388 edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2389 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2390 (rw & 0x1) ? "R" : "-",
2391 (rw & 0x2) ? "W" : "-",
2392 dram_intlv_sel(pvt, range),
2393 dram_dst_node(pvt, range));
0ec449ee
DT
2394 }
2395
b2b0c605 2396 read_dct_base_mask(pvt);
0ec449ee 2397
bc21fa57 2398 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
7981a28f 2399 amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
0ec449ee 2400
8d5b5d9c 2401 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
0ec449ee 2402
7981a28f
AG
2403 amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
2404 amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
0ec449ee 2405
78da121e 2406 if (!dct_ganging_enabled(pvt)) {
7981a28f
AG
2407 amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
2408 amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
0ec449ee 2409 }
ad6a32e9 2410
a3b7db09 2411 pvt->ecc_sym_sz = 4;
a597d2a5
AG
2412 determine_memory_type(pvt);
2413 edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
a3b7db09 2414
a4b4bedc 2415 if (pvt->fam >= 0x10) {
b2b0c605 2416 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
7981a28f 2417 /* F16h has only DCT0, so no need to read dbam1 */
a4b4bedc 2418 if (pvt->fam != 0x16)
7981a28f 2419 amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
ad6a32e9 2420
a3b7db09 2421 /* F10h, revD and later can do x8 ECC too */
a4b4bedc 2422 if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
a3b7db09
BP
2423 pvt->ecc_sym_sz = 8;
2424 }
b2b0c605 2425 dump_misc_regs(pvt);
0ec449ee
DT
2426}
2427
2428/*
2429 * NOTE: CPU Revision Dependent code
2430 *
2431 * Input:
11c75ead 2432 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
0ec449ee
DT
2433 * k8 private pointer to -->
2434 * DRAM Bank Address mapping register
2435 * node_id
2436 * DCL register where dual_channel_active is
2437 *
2438 * The DBAM register consists of 4 sets of 4 bits each definitions:
2439 *
2440 * Bits: CSROWs
2441 * 0-3 CSROWs 0 and 1
2442 * 4-7 CSROWs 2 and 3
2443 * 8-11 CSROWs 4 and 5
2444 * 12-15 CSROWs 6 and 7
2445 *
2446 * Values range from: 0 to 15
2447 * The meaning of the values depends on CPU revision and dual-channel state,
2448 * see relevant BKDG more info.
2449 *
2450 * The memory controller provides for total of only 8 CSROWs in its current
2451 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2452 * single channel or two (2) DIMMs in dual channel mode.
2453 *
2454 * The following code logic collapses the various tables for CSROW based on CPU
2455 * revision.
2456 *
2457 * Returns:
2458 * The number of PAGE_SIZE pages on the specified CSROW number it
2459 * encompasses
2460 *
2461 */
d1ea71cd 2462static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
0ec449ee 2463{
1433eb99 2464 u32 cs_mode, nr_pages;
f92cae45 2465 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
0ec449ee 2466
10de6497 2467
0ec449ee
DT
2468 /*
2469 * The math on this doesn't look right on the surface because x/2*4 can
2470 * be simplified to x*2 but this expression makes use of the fact that
2471 * it is integral math where 1/2=0. This intermediate value becomes the
2472 * number of bits to shift the DBAM register to extract the proper CSROW
2473 * field.
2474 */
0a5dfc31 2475 cs_mode = DBAM_DIMM(csrow_nr / 2, dbam);
0ec449ee 2476
a597d2a5
AG
2477 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, (csrow_nr / 2))
2478 << (20 - PAGE_SHIFT);
0ec449ee 2479
10de6497
BP
2480 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
2481 csrow_nr, dct, cs_mode);
2482 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
0ec449ee
DT
2483
2484 return nr_pages;
2485}
2486
2487/*
2488 * Initialize the array of csrow attribute instances, based on the values
2489 * from pci config hardware registers.
2490 */
360b7f3c 2491static int init_csrows(struct mem_ctl_info *mci)
0ec449ee 2492{
10de6497 2493 struct amd64_pvt *pvt = mci->pvt_info;
0ec449ee 2494 struct csrow_info *csrow;
de3910eb 2495 struct dimm_info *dimm;
084a4fcc 2496 enum edac_type edac_mode;
10de6497 2497 int i, j, empty = 1;
a895bf8b 2498 int nr_pages = 0;
10de6497 2499 u32 val;
0ec449ee 2500
a97fa68e 2501 amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
0ec449ee 2502
2299ef71 2503 pvt->nbcfg = val;
0ec449ee 2504
956b9ba1
JP
2505 edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2506 pvt->mc_node_id, val,
2507 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
0ec449ee 2508
10de6497
BP
2509 /*
2510 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
2511 */
11c75ead 2512 for_each_chip_select(i, 0, pvt) {
10de6497
BP
2513 bool row_dct0 = !!csrow_enabled(i, 0, pvt);
2514 bool row_dct1 = false;
0ec449ee 2515
a4b4bedc 2516 if (pvt->fam != 0xf)
10de6497
BP
2517 row_dct1 = !!csrow_enabled(i, 1, pvt);
2518
2519 if (!row_dct0 && !row_dct1)
0ec449ee 2520 continue;
0ec449ee 2521
10de6497 2522 csrow = mci->csrows[i];
0ec449ee 2523 empty = 0;
10de6497
BP
2524
2525 edac_dbg(1, "MC node: %d, csrow: %d\n",
2526 pvt->mc_node_id, i);
2527
1eef1282 2528 if (row_dct0) {
d1ea71cd 2529 nr_pages = get_csrow_nr_pages(pvt, 0, i);
1eef1282
MCC
2530 csrow->channels[0]->dimm->nr_pages = nr_pages;
2531 }
11c75ead 2532
10de6497 2533 /* K8 has only one DCT */
a4b4bedc 2534 if (pvt->fam != 0xf && row_dct1) {
d1ea71cd 2535 int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
1eef1282
MCC
2536
2537 csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
2538 nr_pages += row_dct1_pages;
2539 }
0ec449ee 2540
10de6497 2541 edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
0ec449ee
DT
2542
2543 /*
2544 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2545 */
a97fa68e 2546 if (pvt->nbcfg & NBCFG_ECC_ENABLE)
084a4fcc
MCC
2547 edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) ?
2548 EDAC_S4ECD4ED : EDAC_SECDED;
0ec449ee 2549 else
084a4fcc
MCC
2550 edac_mode = EDAC_NONE;
2551
2552 for (j = 0; j < pvt->channel_count; j++) {
de3910eb 2553 dimm = csrow->channels[j]->dimm;
a597d2a5 2554 dimm->mtype = pvt->dram_type;
de3910eb 2555 dimm->edac_mode = edac_mode;
084a4fcc 2556 }
0ec449ee
DT
2557 }
2558
2559 return empty;
2560}
d27bf6fa 2561
f6d6ae96 2562/* get all cores on this DCT */
8b84c8df 2563static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
f6d6ae96
BP
2564{
2565 int cpu;
2566
2567 for_each_online_cpu(cpu)
2568 if (amd_get_nb_id(cpu) == nid)
2569 cpumask_set_cpu(cpu, mask);
2570}
2571
2572/* check MCG_CTL on all the cpus on this node */
d1ea71cd 2573static bool nb_mce_bank_enabled_on_node(u16 nid)
f6d6ae96
BP
2574{
2575 cpumask_var_t mask;
50542251 2576 int cpu, nbe;
f6d6ae96
BP
2577 bool ret = false;
2578
2579 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
24f9a7fe 2580 amd64_warn("%s: Error allocating mask\n", __func__);
f6d6ae96
BP
2581 return false;
2582 }
2583
2584 get_cpus_on_this_dct_cpumask(mask, nid);
2585
f6d6ae96
BP
2586 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2587
2588 for_each_cpu(cpu, mask) {
50542251 2589 struct msr *reg = per_cpu_ptr(msrs, cpu);
5980bb9c 2590 nbe = reg->l & MSR_MCGCTL_NBE;
f6d6ae96 2591
956b9ba1
JP
2592 edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2593 cpu, reg->q,
2594 (nbe ? "enabled" : "disabled"));
f6d6ae96
BP
2595
2596 if (!nbe)
2597 goto out;
f6d6ae96
BP
2598 }
2599 ret = true;
2600
2601out:
f6d6ae96
BP
2602 free_cpumask_var(mask);
2603 return ret;
2604}
2605
c7e5301a 2606static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
f6d6ae96
BP
2607{
2608 cpumask_var_t cmask;
50542251 2609 int cpu;
f6d6ae96
BP
2610
2611 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
24f9a7fe 2612 amd64_warn("%s: error allocating mask\n", __func__);
f6d6ae96
BP
2613 return false;
2614 }
2615
ae7bb7c6 2616 get_cpus_on_this_dct_cpumask(cmask, nid);
f6d6ae96 2617
f6d6ae96
BP
2618 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2619
2620 for_each_cpu(cpu, cmask) {
2621
50542251
BP
2622 struct msr *reg = per_cpu_ptr(msrs, cpu);
2623
f6d6ae96 2624 if (on) {
5980bb9c 2625 if (reg->l & MSR_MCGCTL_NBE)
ae7bb7c6 2626 s->flags.nb_mce_enable = 1;
f6d6ae96 2627
5980bb9c 2628 reg->l |= MSR_MCGCTL_NBE;
f6d6ae96
BP
2629 } else {
2630 /*
d95cf4de 2631 * Turn off NB MCE reporting only when it was off before
f6d6ae96 2632 */
ae7bb7c6 2633 if (!s->flags.nb_mce_enable)
5980bb9c 2634 reg->l &= ~MSR_MCGCTL_NBE;
f6d6ae96 2635 }
f6d6ae96
BP
2636 }
2637 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2638
f6d6ae96
BP
2639 free_cpumask_var(cmask);
2640
2641 return 0;
2642}
2643
c7e5301a 2644static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
2299ef71 2645 struct pci_dev *F3)
f9431992 2646{
2299ef71 2647 bool ret = true;
c9f4f26e 2648 u32 value, mask = 0x3; /* UECC/CECC enable */
f9431992 2649
2299ef71
BP
2650 if (toggle_ecc_err_reporting(s, nid, ON)) {
2651 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2652 return false;
2653 }
2654
c9f4f26e 2655 amd64_read_pci_cfg(F3, NBCTL, &value);
f9431992 2656
ae7bb7c6
BP
2657 s->old_nbctl = value & mask;
2658 s->nbctl_valid = true;
f9431992
DT
2659
2660 value |= mask;
c9f4f26e 2661 amd64_write_pci_cfg(F3, NBCTL, value);
f9431992 2662
a97fa68e 2663 amd64_read_pci_cfg(F3, NBCFG, &value);
f9431992 2664
956b9ba1
JP
2665 edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2666 nid, value, !!(value & NBCFG_ECC_ENABLE));
f9431992 2667
a97fa68e 2668 if (!(value & NBCFG_ECC_ENABLE)) {
24f9a7fe 2669 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
f9431992 2670
ae7bb7c6 2671 s->flags.nb_ecc_prev = 0;
d95cf4de 2672
f9431992 2673 /* Attempt to turn on DRAM ECC Enable */
a97fa68e
BP
2674 value |= NBCFG_ECC_ENABLE;
2675 amd64_write_pci_cfg(F3, NBCFG, value);
f9431992 2676
a97fa68e 2677 amd64_read_pci_cfg(F3, NBCFG, &value);
f9431992 2678
a97fa68e 2679 if (!(value & NBCFG_ECC_ENABLE)) {
24f9a7fe
BP
2680 amd64_warn("Hardware rejected DRAM ECC enable,"
2681 "check memory DIMM configuration.\n");
2299ef71 2682 ret = false;
f9431992 2683 } else {
24f9a7fe 2684 amd64_info("Hardware accepted DRAM ECC Enable\n");
f9431992 2685 }
d95cf4de 2686 } else {
ae7bb7c6 2687 s->flags.nb_ecc_prev = 1;
f9431992 2688 }
d95cf4de 2689
956b9ba1
JP
2690 edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2691 nid, value, !!(value & NBCFG_ECC_ENABLE));
f9431992 2692
2299ef71 2693 return ret;
f9431992
DT
2694}
2695
c7e5301a 2696static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
360b7f3c 2697 struct pci_dev *F3)
f9431992 2698{
c9f4f26e
BP
2699 u32 value, mask = 0x3; /* UECC/CECC enable */
2700
ae7bb7c6 2701 if (!s->nbctl_valid)
f9431992
DT
2702 return;
2703
c9f4f26e 2704 amd64_read_pci_cfg(F3, NBCTL, &value);
f9431992 2705 value &= ~mask;
ae7bb7c6 2706 value |= s->old_nbctl;
f9431992 2707
c9f4f26e 2708 amd64_write_pci_cfg(F3, NBCTL, value);
f9431992 2709
ae7bb7c6
BP
2710 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
2711 if (!s->flags.nb_ecc_prev) {
a97fa68e
BP
2712 amd64_read_pci_cfg(F3, NBCFG, &value);
2713 value &= ~NBCFG_ECC_ENABLE;
2714 amd64_write_pci_cfg(F3, NBCFG, value);
d95cf4de
BP
2715 }
2716
2717 /* restore the NB Enable MCGCTL bit */
2299ef71 2718 if (toggle_ecc_err_reporting(s, nid, OFF))
24f9a7fe 2719 amd64_warn("Error restoring NB MCGCTL settings!\n");
f9431992
DT
2720}
2721
2722/*
2299ef71
BP
2723 * EDAC requires that the BIOS have ECC enabled before
2724 * taking over the processing of ECC errors. A command line
2725 * option allows to force-enable hardware ECC later in
2726 * enable_ecc_error_reporting().
f9431992 2727 */
cab4d277
BP
2728static const char *ecc_msg =
2729 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2730 " Either enable ECC checking or force module loading by setting "
2731 "'ecc_enable_override'.\n"
2732 " (Note that use of the override may cause unknown side effects.)\n";
be3468e8 2733
c7e5301a 2734static bool ecc_enabled(struct pci_dev *F3, u16 nid)
f9431992 2735{
06724535 2736 bool nb_mce_en = false;
196b79fc
YG
2737 u8 ecc_en = 0, i;
2738 u32 value;
f9431992 2739
196b79fc
YG
2740 if (boot_cpu_data.x86 >= 0x17) {
2741 u8 umc_en_mask = 0, ecc_en_mask = 0;
f9431992 2742
196b79fc
YG
2743 for (i = 0; i < NUM_UMCS; i++) {
2744 u32 base = get_umc_base(i);
2745
2746 /* Only check enabled UMCs. */
2747 if (amd_smn_read(nid, base + UMCCH_SDP_CTRL, &value))
2748 continue;
2749
2750 if (!(value & UMC_SDP_INIT))
2751 continue;
2752
2753 umc_en_mask |= BIT(i);
2754
2755 if (amd_smn_read(nid, base + UMCCH_UMC_CAP_HI, &value))
2756 continue;
2757
2758 if (value & UMC_ECC_ENABLED)
2759 ecc_en_mask |= BIT(i);
2760 }
2761
2762 /* Check whether at least one UMC is enabled: */
2763 if (umc_en_mask)
2764 ecc_en = umc_en_mask == ecc_en_mask;
2765
2766 /* Assume UMC MCA banks are enabled. */
2767 nb_mce_en = true;
2768 } else {
2769 amd64_read_pci_cfg(F3, NBCFG, &value);
f9431992 2770
196b79fc
YG
2771 ecc_en = !!(value & NBCFG_ECC_ENABLE);
2772
2773 nb_mce_en = nb_mce_bank_enabled_on_node(nid);
2774 if (!nb_mce_en)
2775 amd64_notice("NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
2776 MSR_IA32_MCG_CTL, nid);
2777 }
2778
2779 amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
f9431992 2780
2299ef71
BP
2781 if (!ecc_en || !nb_mce_en) {
2782 amd64_notice("%s", ecc_msg);
2783 return false;
2784 }
2785 return true;
f9431992
DT
2786}
2787
df71a053
BP
2788static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
2789 struct amd64_family_type *fam)
7d6034d3
DT
2790{
2791 struct amd64_pvt *pvt = mci->pvt_info;
2792
2793 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
2794 mci->edac_ctl_cap = EDAC_FLAG_NONE;
7d6034d3 2795
5980bb9c 2796 if (pvt->nbcap & NBCAP_SECDED)
7d6034d3
DT
2797 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
2798
5980bb9c 2799 if (pvt->nbcap & NBCAP_CHIPKILL)
7d6034d3
DT
2800 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2801
d1ea71cd 2802 mci->edac_cap = determine_edac_cap(pvt);
7d6034d3
DT
2803 mci->mod_name = EDAC_MOD_STR;
2804 mci->mod_ver = EDAC_AMD64_VERSION;
df71a053 2805 mci->ctl_name = fam->ctl_name;
e7934b70 2806 mci->dev_name = pci_name(pvt->F3);
7d6034d3
DT
2807 mci->ctl_page_to_phys = NULL;
2808
7d6034d3 2809 /* memory scrubber interface */
d1ea71cd
BP
2810 mci->set_sdram_scrub_rate = set_scrub_rate;
2811 mci->get_sdram_scrub_rate = get_scrub_rate;
7d6034d3
DT
2812}
2813
0092b20d
BP
2814/*
2815 * returns a pointer to the family descriptor on success, NULL otherwise.
2816 */
d1ea71cd 2817static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
395ae783 2818{
0092b20d
BP
2819 struct amd64_family_type *fam_type = NULL;
2820
18b94f66 2821 pvt->ext_model = boot_cpu_data.x86_model >> 4;
a4b4bedc 2822 pvt->stepping = boot_cpu_data.x86_mask;
18b94f66
AG
2823 pvt->model = boot_cpu_data.x86_model;
2824 pvt->fam = boot_cpu_data.x86;
2825
2826 switch (pvt->fam) {
395ae783 2827 case 0xf:
d1ea71cd
BP
2828 fam_type = &family_types[K8_CPUS];
2829 pvt->ops = &family_types[K8_CPUS].ops;
395ae783 2830 break;
df71a053 2831
395ae783 2832 case 0x10:
d1ea71cd
BP
2833 fam_type = &family_types[F10_CPUS];
2834 pvt->ops = &family_types[F10_CPUS].ops;
df71a053
BP
2835 break;
2836
2837 case 0x15:
18b94f66 2838 if (pvt->model == 0x30) {
d1ea71cd
BP
2839 fam_type = &family_types[F15_M30H_CPUS];
2840 pvt->ops = &family_types[F15_M30H_CPUS].ops;
18b94f66 2841 break;
a597d2a5
AG
2842 } else if (pvt->model == 0x60) {
2843 fam_type = &family_types[F15_M60H_CPUS];
2844 pvt->ops = &family_types[F15_M60H_CPUS].ops;
2845 break;
18b94f66
AG
2846 }
2847
d1ea71cd
BP
2848 fam_type = &family_types[F15_CPUS];
2849 pvt->ops = &family_types[F15_CPUS].ops;
395ae783
BP
2850 break;
2851
94c1acf2 2852 case 0x16:
85a8885b
AG
2853 if (pvt->model == 0x30) {
2854 fam_type = &family_types[F16_M30H_CPUS];
2855 pvt->ops = &family_types[F16_M30H_CPUS].ops;
2856 break;
2857 }
d1ea71cd
BP
2858 fam_type = &family_types[F16_CPUS];
2859 pvt->ops = &family_types[F16_CPUS].ops;
94c1acf2
AG
2860 break;
2861
f1cbbec9
YG
2862 case 0x17:
2863 fam_type = &family_types[F17_CPUS];
2864 pvt->ops = &family_types[F17_CPUS].ops;
2865 break;
2866
395ae783 2867 default:
24f9a7fe 2868 amd64_err("Unsupported family!\n");
0092b20d 2869 return NULL;
395ae783 2870 }
0092b20d 2871
df71a053 2872 amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
18b94f66 2873 (pvt->fam == 0xf ?
24f9a7fe
BP
2874 (pvt->ext_model >= K8_REV_F ? "revF or later "
2875 : "revE or earlier ")
2876 : ""), pvt->mc_node_id);
0092b20d 2877 return fam_type;
395ae783
BP
2878}
2879
e339f1ec
TI
2880static const struct attribute_group *amd64_edac_attr_groups[] = {
2881#ifdef CONFIG_EDAC_DEBUG
2882 &amd64_edac_dbg_group,
2883#endif
2884#ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
2885 &amd64_edac_inj_group,
2886#endif
2887 NULL
2888};
2889
3f37a36b 2890static int init_one_instance(unsigned int nid)
7d6034d3 2891{
3f37a36b 2892 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
0092b20d 2893 struct amd64_family_type *fam_type = NULL;
360b7f3c 2894 struct mem_ctl_info *mci = NULL;
ab5a503c 2895 struct edac_mc_layer layers[2];
3f37a36b 2896 struct amd64_pvt *pvt = NULL;
936fc3af 2897 u16 pci_id1, pci_id2;
7d6034d3
DT
2898 int err = 0, ret;
2899
2900 ret = -ENOMEM;
2901 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
2902 if (!pvt)
360b7f3c 2903 goto err_ret;
7d6034d3 2904
360b7f3c 2905 pvt->mc_node_id = nid;
3f37a36b 2906 pvt->F3 = F3;
7d6034d3 2907
395ae783 2908 ret = -EINVAL;
d1ea71cd 2909 fam_type = per_family_init(pvt);
0092b20d 2910 if (!fam_type)
395ae783
BP
2911 goto err_free;
2912
936fc3af
YG
2913 if (pvt->fam >= 0x17) {
2914 pvt->umc = kcalloc(NUM_UMCS, sizeof(struct amd64_umc), GFP_KERNEL);
2915 if (!pvt->umc) {
2916 ret = -ENOMEM;
2917 goto err_free;
2918 }
2919
2920 pci_id1 = fam_type->f0_id;
2921 pci_id2 = fam_type->f6_id;
2922 } else {
2923 pci_id1 = fam_type->f1_id;
2924 pci_id2 = fam_type->f2_id;
2925 }
2926
2927 err = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2);
7d6034d3 2928 if (err)
936fc3af 2929 goto err_post_init;
7d6034d3 2930
360b7f3c 2931 read_mc_regs(pvt);
7d6034d3 2932
7d6034d3
DT
2933 /*
2934 * We need to determine how many memory channels there are. Then use
2935 * that information for calculating the size of the dynamic instance
360b7f3c 2936 * tables in the 'mci' structure.
7d6034d3 2937 */
360b7f3c 2938 ret = -EINVAL;
7d6034d3
DT
2939 pvt->channel_count = pvt->ops->early_channel_count(pvt);
2940 if (pvt->channel_count < 0)
360b7f3c 2941 goto err_siblings;
7d6034d3
DT
2942
2943 ret = -ENOMEM;
ab5a503c
MCC
2944 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
2945 layers[0].size = pvt->csels[0].b_cnt;
2946 layers[0].is_virt_csrow = true;
2947 layers[1].type = EDAC_MC_LAYER_CHANNEL;
f0a56c48
BP
2948
2949 /*
2950 * Always allocate two channels since we can have setups with DIMMs on
2951 * only one channel. Also, this simplifies handling later for the price
2952 * of a couple of KBs tops.
2953 */
2954 layers[1].size = 2;
ab5a503c 2955 layers[1].is_virt_csrow = false;
f0a56c48 2956
ca0907b9 2957 mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
7d6034d3 2958 if (!mci)
360b7f3c 2959 goto err_siblings;
7d6034d3
DT
2960
2961 mci->pvt_info = pvt;
3f37a36b 2962 mci->pdev = &pvt->F3->dev;
7d6034d3 2963
df71a053 2964 setup_mci_misc_attrs(mci, fam_type);
360b7f3c
BP
2965
2966 if (init_csrows(mci))
7d6034d3
DT
2967 mci->edac_cap = EDAC_FLAG_NONE;
2968
7d6034d3 2969 ret = -ENODEV;
e339f1ec 2970 if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) {
956b9ba1 2971 edac_dbg(1, "failed edac_mc_add_mc()\n");
7d6034d3
DT
2972 goto err_add_mc;
2973 }
2974
549d042d
BP
2975 /* register stuff with EDAC MCE */
2976 if (report_gart_errors)
2977 amd_report_gart_errors(true);
2978
df781d03 2979 amd_register_ecc_decoder(decode_bus_error);
549d042d 2980
7d6034d3
DT
2981 return 0;
2982
2983err_add_mc:
2984 edac_mc_free(mci);
2985
360b7f3c
BP
2986err_siblings:
2987 free_mc_sibling_devs(pvt);
7d6034d3 2988
936fc3af
YG
2989err_post_init:
2990 if (pvt->fam >= 0x17)
2991 kfree(pvt->umc);
2992
360b7f3c
BP
2993err_free:
2994 kfree(pvt);
7d6034d3 2995
360b7f3c 2996err_ret:
7d6034d3
DT
2997 return ret;
2998}
2999
3f37a36b 3000static int probe_one_instance(unsigned int nid)
7d6034d3 3001{
2299ef71 3002 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
ae7bb7c6 3003 struct ecc_settings *s;
3f37a36b 3004 int ret;
7d6034d3 3005
ae7bb7c6
BP
3006 ret = -ENOMEM;
3007 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
3008 if (!s)
2299ef71 3009 goto err_out;
ae7bb7c6
BP
3010
3011 ecc_stngs[nid] = s;
3012
2299ef71
BP
3013 if (!ecc_enabled(F3, nid)) {
3014 ret = -ENODEV;
3015
3016 if (!ecc_enable_override)
3017 goto err_enable;
3018
044e7a41
YG
3019 if (boot_cpu_data.x86 >= 0x17) {
3020 amd64_warn("Forcing ECC on is not recommended on newer systems. Please enable ECC in BIOS.");
3021 goto err_enable;
3022 } else
3023 amd64_warn("Forcing ECC on!\n");
2299ef71
BP
3024
3025 if (!enable_ecc_error_reporting(s, nid, F3))
3026 goto err_enable;
3027 }
3028
3f37a36b 3029 ret = init_one_instance(nid);
360b7f3c 3030 if (ret < 0) {
ae7bb7c6 3031 amd64_err("Error probing instance: %d\n", nid);
044e7a41
YG
3032
3033 if (boot_cpu_data.x86 < 0x17)
3034 restore_ecc_error_reporting(s, nid, F3);
360b7f3c 3035 }
7d6034d3
DT
3036
3037 return ret;
2299ef71
BP
3038
3039err_enable:
3040 kfree(s);
3041 ecc_stngs[nid] = NULL;
3042
3043err_out:
3044 return ret;
7d6034d3
DT
3045}
3046
3f37a36b 3047static void remove_one_instance(unsigned int nid)
7d6034d3 3048{
360b7f3c
BP
3049 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3050 struct ecc_settings *s = ecc_stngs[nid];
3f37a36b
BP
3051 struct mem_ctl_info *mci;
3052 struct amd64_pvt *pvt;
7d6034d3 3053
3f37a36b 3054 mci = find_mci_by_dev(&F3->dev);
a4b4bedc
BP
3055 WARN_ON(!mci);
3056
7d6034d3 3057 /* Remove from EDAC CORE tracking list */
3f37a36b 3058 mci = edac_mc_del_mc(&F3->dev);
7d6034d3
DT
3059 if (!mci)
3060 return;
3061
3062 pvt = mci->pvt_info;
3063
360b7f3c 3064 restore_ecc_error_reporting(s, nid, F3);
7d6034d3 3065
360b7f3c 3066 free_mc_sibling_devs(pvt);
7d6034d3 3067
549d042d
BP
3068 /* unregister from EDAC MCE */
3069 amd_report_gart_errors(false);
df781d03 3070 amd_unregister_ecc_decoder(decode_bus_error);
549d042d 3071
360b7f3c
BP
3072 kfree(ecc_stngs[nid]);
3073 ecc_stngs[nid] = NULL;
ae7bb7c6 3074
7d6034d3 3075 /* Free the EDAC CORE resources */
8f68ed97 3076 mci->pvt_info = NULL;
8f68ed97
BP
3077
3078 kfree(pvt);
7d6034d3
DT
3079 edac_mc_free(mci);
3080}
3081
360b7f3c 3082static void setup_pci_device(void)
7d6034d3
DT
3083{
3084 struct mem_ctl_info *mci;
3085 struct amd64_pvt *pvt;
3086
d1ea71cd 3087 if (pci_ctl)
7d6034d3
DT
3088 return;
3089
2ec591ac 3090 mci = edac_mc_find(0);
d1ea71cd
BP
3091 if (!mci)
3092 return;
7d6034d3 3093
d1ea71cd 3094 pvt = mci->pvt_info;
936fc3af
YG
3095 if (pvt->umc)
3096 pci_ctl = edac_pci_create_generic_ctl(&pvt->F0->dev, EDAC_MOD_STR);
3097 else
3098 pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
d1ea71cd
BP
3099 if (!pci_ctl) {
3100 pr_warn("%s(): Unable to create PCI control\n", __func__);
3101 pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
7d6034d3
DT
3102 }
3103}
3104
d6efab74
YG
3105static const struct x86_cpu_id amd64_cpuids[] = {
3106 { X86_VENDOR_AMD, 0xF, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3107 { X86_VENDOR_AMD, 0x10, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3108 { X86_VENDOR_AMD, 0x15, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3109 { X86_VENDOR_AMD, 0x16, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3110 { }
3111};
3112MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
3113
7d6034d3
DT
3114static int __init amd64_edac_init(void)
3115{
360b7f3c 3116 int err = -ENODEV;
3f37a36b 3117 int i;
7d6034d3 3118
9653a5c7 3119 if (amd_cache_northbridges() < 0)
56b34b91 3120 goto err_ret;
7d6034d3 3121
6ba92fea
BP
3122 opstate_init();
3123
cc4d8860 3124 err = -ENOMEM;
ae7bb7c6 3125 ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
2ec591ac 3126 if (!ecc_stngs)
a9f0fbe2 3127 goto err_free;
cc4d8860 3128
50542251 3129 msrs = msrs_alloc();
56b34b91 3130 if (!msrs)
360b7f3c 3131 goto err_free;
50542251 3132
3f37a36b
BP
3133 for (i = 0; i < amd_nb_num(); i++)
3134 if (probe_one_instance(i)) {
3135 /* unwind properly */
3136 while (--i >= 0)
3137 remove_one_instance(i);
7d6034d3 3138
3f37a36b
BP
3139 goto err_pci;
3140 }
7d6034d3 3141
360b7f3c 3142 setup_pci_device();
f5b10c45
TP
3143
3144#ifdef CONFIG_X86_32
3145 amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
3146#endif
3147
de0336b3
BP
3148 printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
3149
360b7f3c 3150 return 0;
7d6034d3 3151
56b34b91
BP
3152err_pci:
3153 msrs_free(msrs);
3154 msrs = NULL;
cc4d8860 3155
360b7f3c 3156err_free:
360b7f3c
BP
3157 kfree(ecc_stngs);
3158 ecc_stngs = NULL;
3159
56b34b91 3160err_ret:
7d6034d3
DT
3161 return err;
3162}
3163
3164static void __exit amd64_edac_exit(void)
3165{
3f37a36b
BP
3166 int i;
3167
d1ea71cd
BP
3168 if (pci_ctl)
3169 edac_pci_release_generic_ctl(pci_ctl);
7d6034d3 3170
3f37a36b
BP
3171 for (i = 0; i < amd_nb_num(); i++)
3172 remove_one_instance(i);
50542251 3173
ae7bb7c6
BP
3174 kfree(ecc_stngs);
3175 ecc_stngs = NULL;
3176
50542251
BP
3177 msrs_free(msrs);
3178 msrs = NULL;
7d6034d3
DT
3179}
3180
3181module_init(amd64_edac_init);
3182module_exit(amd64_edac_exit);
3183
3184MODULE_LICENSE("GPL");
3185MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
3186 "Dave Peterson, Thayne Harbaugh");
3187MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
3188 EDAC_AMD64_VERSION);
3189
3190module_param(edac_op_state, int, 0444);
3191MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");