Merge remote-tracking branch 'asoc/topic/wm8960' into asoc-next
[linux-2.6-block.git] / drivers / edac / amd64_edac.c
CommitLineData
2bc65418 1#include "amd64_edac.h"
23ac4ae8 2#include <asm/amd_nb.h>
2bc65418 3
d1ea71cd 4static struct edac_pci_ctl_info *pci_ctl;
2bc65418
DT
5
6static int report_gart_errors;
7module_param(report_gart_errors, int, 0644);
8
9/*
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver.
12 */
13static int ecc_enable_override;
14module_param(ecc_enable_override, int, 0644);
15
a29d8b8e 16static struct msr __percpu *msrs;
50542251 17
360b7f3c
BP
18/*
19 * count successfully initialized driver instances for setup_pci_device()
20 */
21static atomic_t drv_instances = ATOMIC_INIT(0);
22
2ec591ac 23/* Per-node stuff */
ae7bb7c6 24static struct ecc_settings **ecc_stngs;
2bc65418 25
b70ef010
BP
26/*
27 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
28 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
29 * or higher value'.
30 *
31 *FIXME: Produce a better mapping/linearisation.
32 */
c7e5301a 33static const struct scrubrate {
39094443
BP
34 u32 scrubval; /* bit pattern for scrub rate */
35 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
36} scrubrates[] = {
b70ef010
BP
37 { 0x01, 1600000000UL},
38 { 0x02, 800000000UL},
39 { 0x03, 400000000UL},
40 { 0x04, 200000000UL},
41 { 0x05, 100000000UL},
42 { 0x06, 50000000UL},
43 { 0x07, 25000000UL},
44 { 0x08, 12284069UL},
45 { 0x09, 6274509UL},
46 { 0x0A, 3121951UL},
47 { 0x0B, 1560975UL},
48 { 0x0C, 781440UL},
49 { 0x0D, 390720UL},
50 { 0x0E, 195300UL},
51 { 0x0F, 97650UL},
52 { 0x10, 48854UL},
53 { 0x11, 24427UL},
54 { 0x12, 12213UL},
55 { 0x13, 6101UL},
56 { 0x14, 3051UL},
57 { 0x15, 1523UL},
58 { 0x16, 761UL},
59 { 0x00, 0UL}, /* scrubbing off */
60};
61
66fed2d4
BP
62int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
63 u32 *val, const char *func)
b2b0c605
BP
64{
65 int err = 0;
66
67 err = pci_read_config_dword(pdev, offset, val);
68 if (err)
69 amd64_warn("%s: error reading F%dx%03x.\n",
70 func, PCI_FUNC(pdev->devfn), offset);
71
72 return err;
73}
74
75int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
76 u32 val, const char *func)
77{
78 int err = 0;
79
80 err = pci_write_config_dword(pdev, offset, val);
81 if (err)
82 amd64_warn("%s: error writing to F%dx%03x.\n",
83 func, PCI_FUNC(pdev->devfn), offset);
84
85 return err;
86}
87
7981a28f
AG
88/*
89 * Select DCT to which PCI cfg accesses are routed
90 */
91static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
92{
93 u32 reg = 0;
94
95 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
96 reg &= (pvt->model == 0x30) ? ~3 : ~1;
97 reg |= dct;
98 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
99}
100
b2b0c605
BP
101/*
102 *
103 * Depending on the family, F2 DCT reads need special handling:
104 *
7981a28f 105 * K8: has a single DCT only and no address offsets >= 0x100
b2b0c605
BP
106 *
107 * F10h: each DCT has its own set of regs
108 * DCT0 -> F2x040..
109 * DCT1 -> F2x140..
110 *
94c1acf2 111 * F16h: has only 1 DCT
7981a28f
AG
112 *
113 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
b2b0c605 114 */
7981a28f
AG
115static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
116 int offset, u32 *val)
b2b0c605 117{
7981a28f
AG
118 switch (pvt->fam) {
119 case 0xf:
120 if (dct || offset >= 0x100)
121 return -EINVAL;
122 break;
b2b0c605 123
7981a28f
AG
124 case 0x10:
125 if (dct) {
126 /*
127 * Note: If ganging is enabled, barring the regs
128 * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx
129 * return 0. (cf. Section 2.8.1 F10h BKDG)
130 */
131 if (dct_ganging_enabled(pvt))
132 return 0;
b2b0c605 133
7981a28f
AG
134 offset += 0x100;
135 }
136 break;
73ba8593 137
7981a28f
AG
138 case 0x15:
139 /*
140 * F15h: F2x1xx addresses do not map explicitly to DCT1.
141 * We should select which DCT we access using F1x10C[DctCfgSel]
142 */
143 dct = (dct && pvt->model == 0x30) ? 3 : dct;
144 f15h_select_dct(pvt, dct);
145 break;
73ba8593 146
7981a28f
AG
147 case 0x16:
148 if (dct)
149 return -EINVAL;
150 break;
b2b0c605 151
7981a28f
AG
152 default:
153 break;
b2b0c605 154 }
7981a28f 155 return amd64_read_pci_cfg(pvt->F2, offset, val);
b2b0c605
BP
156}
157
2bc65418
DT
158/*
159 * Memory scrubber control interface. For K8, memory scrubbing is handled by
160 * hardware and can involve L2 cache, dcache as well as the main memory. With
161 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
162 * functionality.
163 *
164 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
165 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
166 * bytes/sec for the setting.
167 *
168 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
169 * other archs, we might not have access to the caches directly.
170 */
171
172/*
173 * scan the scrub rate mapping table for a close or matching bandwidth value to
174 * issue. If requested is too big, then use last maximum value found.
175 */
da92110d 176static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
2bc65418
DT
177{
178 u32 scrubval;
179 int i;
180
181 /*
182 * map the configured rate (new_bw) to a value specific to the AMD64
183 * memory controller and apply to register. Search for the first
184 * bandwidth entry that is greater or equal than the setting requested
185 * and program that. If at last entry, turn off DRAM scrubbing.
168bfeef
AM
186 *
187 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
188 * by falling back to the last element in scrubrates[].
2bc65418 189 */
168bfeef 190 for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
2bc65418
DT
191 /*
192 * skip scrub rates which aren't recommended
193 * (see F10 BKDG, F3x58)
194 */
395ae783 195 if (scrubrates[i].scrubval < min_rate)
2bc65418
DT
196 continue;
197
198 if (scrubrates[i].bandwidth <= new_bw)
199 break;
2bc65418
DT
200 }
201
202 scrubval = scrubrates[i].scrubval;
2bc65418 203
da92110d
AG
204 if (pvt->fam == 0x15 && pvt->model == 0x60) {
205 f15h_select_dct(pvt, 0);
206 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
207 f15h_select_dct(pvt, 1);
208 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
209 } else {
210 pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F);
211 }
2bc65418 212
39094443
BP
213 if (scrubval)
214 return scrubrates[i].bandwidth;
215
2bc65418
DT
216 return 0;
217}
218
d1ea71cd 219static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
2bc65418
DT
220{
221 struct amd64_pvt *pvt = mci->pvt_info;
87b3e0e6 222 u32 min_scrubrate = 0x5;
2bc65418 223
a4b4bedc 224 if (pvt->fam == 0xf)
87b3e0e6
BP
225 min_scrubrate = 0x0;
226
da92110d
AG
227 if (pvt->fam == 0x15) {
228 /* Erratum #505 */
229 if (pvt->model < 0x10)
230 f15h_select_dct(pvt, 0);
73ba8593 231
da92110d
AG
232 if (pvt->model == 0x60)
233 min_scrubrate = 0x6;
234 }
235 return __set_scrub_rate(pvt, bw, min_scrubrate);
2bc65418
DT
236}
237
d1ea71cd 238static int get_scrub_rate(struct mem_ctl_info *mci)
2bc65418
DT
239{
240 struct amd64_pvt *pvt = mci->pvt_info;
241 u32 scrubval = 0;
39094443 242 int i, retval = -EINVAL;
2bc65418 243
da92110d
AG
244 if (pvt->fam == 0x15) {
245 /* Erratum #505 */
246 if (pvt->model < 0x10)
247 f15h_select_dct(pvt, 0);
73ba8593 248
da92110d
AG
249 if (pvt->model == 0x60)
250 amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
251 } else
252 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
2bc65418
DT
253
254 scrubval = scrubval & 0x001F;
255
926311fd 256 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
2bc65418 257 if (scrubrates[i].scrubval == scrubval) {
39094443 258 retval = scrubrates[i].bandwidth;
2bc65418
DT
259 break;
260 }
261 }
39094443 262 return retval;
2bc65418
DT
263}
264
6775763a 265/*
7f19bf75
BP
266 * returns true if the SysAddr given by sys_addr matches the
267 * DRAM base/limit associated with node_id
6775763a 268 */
d1ea71cd 269static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
6775763a 270{
7f19bf75 271 u64 addr;
6775763a
DT
272
273 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
274 * all ones if the most significant implemented address bit is 1.
275 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
276 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
277 * Application Programming.
278 */
279 addr = sys_addr & 0x000000ffffffffffull;
280
7f19bf75
BP
281 return ((addr >= get_dram_base(pvt, nid)) &&
282 (addr <= get_dram_limit(pvt, nid)));
6775763a
DT
283}
284
285/*
286 * Attempt to map a SysAddr to a node. On success, return a pointer to the
287 * mem_ctl_info structure for the node that the SysAddr maps to.
288 *
289 * On failure, return NULL.
290 */
291static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
292 u64 sys_addr)
293{
294 struct amd64_pvt *pvt;
c7e5301a 295 u8 node_id;
6775763a
DT
296 u32 intlv_en, bits;
297
298 /*
299 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
300 * 3.4.4.2) registers to map the SysAddr to a node ID.
301 */
302 pvt = mci->pvt_info;
303
304 /*
305 * The value of this field should be the same for all DRAM Base
306 * registers. Therefore we arbitrarily choose to read it from the
307 * register for node 0.
308 */
7f19bf75 309 intlv_en = dram_intlv_en(pvt, 0);
6775763a
DT
310
311 if (intlv_en == 0) {
7f19bf75 312 for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
d1ea71cd 313 if (base_limit_match(pvt, sys_addr, node_id))
8edc5445 314 goto found;
6775763a 315 }
8edc5445 316 goto err_no_match;
6775763a
DT
317 }
318
72f158fe
BP
319 if (unlikely((intlv_en != 0x01) &&
320 (intlv_en != 0x03) &&
321 (intlv_en != 0x07))) {
24f9a7fe 322 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
6775763a
DT
323 return NULL;
324 }
325
326 bits = (((u32) sys_addr) >> 12) & intlv_en;
327
328 for (node_id = 0; ; ) {
7f19bf75 329 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
6775763a
DT
330 break; /* intlv_sel field matches */
331
7f19bf75 332 if (++node_id >= DRAM_RANGES)
6775763a
DT
333 goto err_no_match;
334 }
335
336 /* sanity test for sys_addr */
d1ea71cd 337 if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
24f9a7fe
BP
338 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
339 "range for node %d with node interleaving enabled.\n",
340 __func__, sys_addr, node_id);
6775763a
DT
341 return NULL;
342 }
343
344found:
b487c33e 345 return edac_mc_find((int)node_id);
6775763a
DT
346
347err_no_match:
956b9ba1
JP
348 edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
349 (unsigned long)sys_addr);
6775763a
DT
350
351 return NULL;
352}
e2ce7255
DT
353
354/*
11c75ead
BP
355 * compute the CS base address of the @csrow on the DRAM controller @dct.
356 * For details see F2x[5C:40] in the processor's BKDG
e2ce7255 357 */
11c75ead
BP
358static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
359 u64 *base, u64 *mask)
e2ce7255 360{
11c75ead
BP
361 u64 csbase, csmask, base_bits, mask_bits;
362 u8 addr_shift;
e2ce7255 363
18b94f66 364 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
11c75ead
BP
365 csbase = pvt->csels[dct].csbases[csrow];
366 csmask = pvt->csels[dct].csmasks[csrow];
10ef6b0d
CG
367 base_bits = GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
368 mask_bits = GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
11c75ead 369 addr_shift = 4;
94c1acf2
AG
370
371 /*
18b94f66
AG
372 * F16h and F15h, models 30h and later need two addr_shift values:
373 * 8 for high and 6 for low (cf. F16h BKDG).
374 */
375 } else if (pvt->fam == 0x16 ||
376 (pvt->fam == 0x15 && pvt->model >= 0x30)) {
94c1acf2
AG
377 csbase = pvt->csels[dct].csbases[csrow];
378 csmask = pvt->csels[dct].csmasks[csrow >> 1];
379
10ef6b0d
CG
380 *base = (csbase & GENMASK_ULL(15, 5)) << 6;
381 *base |= (csbase & GENMASK_ULL(30, 19)) << 8;
94c1acf2
AG
382
383 *mask = ~0ULL;
384 /* poke holes for the csmask */
10ef6b0d
CG
385 *mask &= ~((GENMASK_ULL(15, 5) << 6) |
386 (GENMASK_ULL(30, 19) << 8));
94c1acf2 387
10ef6b0d
CG
388 *mask |= (csmask & GENMASK_ULL(15, 5)) << 6;
389 *mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
94c1acf2
AG
390
391 return;
11c75ead
BP
392 } else {
393 csbase = pvt->csels[dct].csbases[csrow];
394 csmask = pvt->csels[dct].csmasks[csrow >> 1];
395 addr_shift = 8;
e2ce7255 396
a4b4bedc 397 if (pvt->fam == 0x15)
10ef6b0d
CG
398 base_bits = mask_bits =
399 GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
11c75ead 400 else
10ef6b0d
CG
401 base_bits = mask_bits =
402 GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
11c75ead 403 }
e2ce7255 404
11c75ead 405 *base = (csbase & base_bits) << addr_shift;
e2ce7255 406
11c75ead
BP
407 *mask = ~0ULL;
408 /* poke holes for the csmask */
409 *mask &= ~(mask_bits << addr_shift);
410 /* OR them in */
411 *mask |= (csmask & mask_bits) << addr_shift;
e2ce7255
DT
412}
413
11c75ead
BP
414#define for_each_chip_select(i, dct, pvt) \
415 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
416
614ec9d8
BP
417#define chip_select_base(i, dct, pvt) \
418 pvt->csels[dct].csbases[i]
419
11c75ead
BP
420#define for_each_chip_select_mask(i, dct, pvt) \
421 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
422
e2ce7255
DT
423/*
424 * @input_addr is an InputAddr associated with the node given by mci. Return the
425 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
426 */
427static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
428{
429 struct amd64_pvt *pvt;
430 int csrow;
431 u64 base, mask;
432
433 pvt = mci->pvt_info;
434
11c75ead
BP
435 for_each_chip_select(csrow, 0, pvt) {
436 if (!csrow_enabled(csrow, 0, pvt))
e2ce7255
DT
437 continue;
438
11c75ead
BP
439 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
440
441 mask = ~mask;
e2ce7255
DT
442
443 if ((input_addr & mask) == (base & mask)) {
956b9ba1
JP
444 edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
445 (unsigned long)input_addr, csrow,
446 pvt->mc_node_id);
e2ce7255
DT
447
448 return csrow;
449 }
450 }
956b9ba1
JP
451 edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
452 (unsigned long)input_addr, pvt->mc_node_id);
e2ce7255
DT
453
454 return -1;
455}
456
e2ce7255
DT
457/*
458 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
459 * for the node represented by mci. Info is passed back in *hole_base,
460 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
461 * info is invalid. Info may be invalid for either of the following reasons:
462 *
463 * - The revision of the node is not E or greater. In this case, the DRAM Hole
464 * Address Register does not exist.
465 *
466 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
467 * indicating that its contents are not valid.
468 *
469 * The values passed back in *hole_base, *hole_offset, and *hole_size are
470 * complete 32-bit values despite the fact that the bitfields in the DHAR
471 * only represent bits 31-24 of the base and offset values.
472 */
473int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
474 u64 *hole_offset, u64 *hole_size)
475{
476 struct amd64_pvt *pvt = mci->pvt_info;
e2ce7255
DT
477
478 /* only revE and later have the DRAM Hole Address Register */
a4b4bedc 479 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
956b9ba1
JP
480 edac_dbg(1, " revision %d for node %d does not support DHAR\n",
481 pvt->ext_model, pvt->mc_node_id);
e2ce7255
DT
482 return 1;
483 }
484
bc21fa57 485 /* valid for Fam10h and above */
a4b4bedc 486 if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
956b9ba1 487 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
e2ce7255
DT
488 return 1;
489 }
490
c8e518d5 491 if (!dhar_valid(pvt)) {
956b9ba1
JP
492 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n",
493 pvt->mc_node_id);
e2ce7255
DT
494 return 1;
495 }
496
497 /* This node has Memory Hoisting */
498
499 /* +------------------+--------------------+--------------------+-----
500 * | memory | DRAM hole | relocated |
501 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
502 * | | | DRAM hole |
503 * | | | [0x100000000, |
504 * | | | (0x100000000+ |
505 * | | | (0xffffffff-x))] |
506 * +------------------+--------------------+--------------------+-----
507 *
508 * Above is a diagram of physical memory showing the DRAM hole and the
509 * relocated addresses from the DRAM hole. As shown, the DRAM hole
510 * starts at address x (the base address) and extends through address
511 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
512 * addresses in the hole so that they start at 0x100000000.
513 */
514
1f31677e
BP
515 *hole_base = dhar_base(pvt);
516 *hole_size = (1ULL << 32) - *hole_base;
e2ce7255 517
a4b4bedc
BP
518 *hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
519 : k8_dhar_offset(pvt);
e2ce7255 520
956b9ba1
JP
521 edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
522 pvt->mc_node_id, (unsigned long)*hole_base,
523 (unsigned long)*hole_offset, (unsigned long)*hole_size);
e2ce7255
DT
524
525 return 0;
526}
527EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
528
93c2df58
DT
529/*
530 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
531 * assumed that sys_addr maps to the node given by mci.
532 *
533 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
534 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
535 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
536 * then it is also involved in translating a SysAddr to a DramAddr. Sections
537 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
538 * These parts of the documentation are unclear. I interpret them as follows:
539 *
540 * When node n receives a SysAddr, it processes the SysAddr as follows:
541 *
542 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
543 * Limit registers for node n. If the SysAddr is not within the range
544 * specified by the base and limit values, then node n ignores the Sysaddr
545 * (since it does not map to node n). Otherwise continue to step 2 below.
546 *
547 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
548 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
549 * the range of relocated addresses (starting at 0x100000000) from the DRAM
550 * hole. If not, skip to step 3 below. Else get the value of the
551 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
552 * offset defined by this value from the SysAddr.
553 *
554 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
555 * Base register for node n. To obtain the DramAddr, subtract the base
556 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
557 */
558static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
559{
7f19bf75 560 struct amd64_pvt *pvt = mci->pvt_info;
93c2df58 561 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
1f31677e 562 int ret;
93c2df58 563
7f19bf75 564 dram_base = get_dram_base(pvt, pvt->mc_node_id);
93c2df58
DT
565
566 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
567 &hole_size);
568 if (!ret) {
1f31677e
BP
569 if ((sys_addr >= (1ULL << 32)) &&
570 (sys_addr < ((1ULL << 32) + hole_size))) {
93c2df58
DT
571 /* use DHAR to translate SysAddr to DramAddr */
572 dram_addr = sys_addr - hole_offset;
573
956b9ba1
JP
574 edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
575 (unsigned long)sys_addr,
576 (unsigned long)dram_addr);
93c2df58
DT
577
578 return dram_addr;
579 }
580 }
581
582 /*
583 * Translate the SysAddr to a DramAddr as shown near the start of
584 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
585 * only deals with 40-bit values. Therefore we discard bits 63-40 of
586 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
587 * discard are all 1s. Otherwise the bits we discard are all 0s. See
588 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
589 * Programmer's Manual Volume 1 Application Programming.
590 */
10ef6b0d 591 dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
93c2df58 592
956b9ba1
JP
593 edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
594 (unsigned long)sys_addr, (unsigned long)dram_addr);
93c2df58
DT
595 return dram_addr;
596}
597
598/*
599 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
600 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
601 * for node interleaving.
602 */
603static int num_node_interleave_bits(unsigned intlv_en)
604{
605 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
606 int n;
607
608 BUG_ON(intlv_en > 7);
609 n = intlv_shift_table[intlv_en];
610 return n;
611}
612
613/* Translate the DramAddr given by @dram_addr to an InputAddr. */
614static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
615{
616 struct amd64_pvt *pvt;
617 int intlv_shift;
618 u64 input_addr;
619
620 pvt = mci->pvt_info;
621
622 /*
623 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
624 * concerning translating a DramAddr to an InputAddr.
625 */
7f19bf75 626 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
10ef6b0d 627 input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
f678b8cc 628 (dram_addr & 0xfff);
93c2df58 629
956b9ba1
JP
630 edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
631 intlv_shift, (unsigned long)dram_addr,
632 (unsigned long)input_addr);
93c2df58
DT
633
634 return input_addr;
635}
636
637/*
638 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
639 * assumed that @sys_addr maps to the node given by mci.
640 */
641static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
642{
643 u64 input_addr;
644
645 input_addr =
646 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
647
956b9ba1
JP
648 edac_dbg(2, "SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
649 (unsigned long)sys_addr, (unsigned long)input_addr);
93c2df58
DT
650
651 return input_addr;
652}
653
93c2df58
DT
654/* Map the Error address to a PAGE and PAGE OFFSET. */
655static inline void error_address_to_page_and_offset(u64 error_address,
33ca0643 656 struct err_info *err)
93c2df58 657{
33ca0643
BP
658 err->page = (u32) (error_address >> PAGE_SHIFT);
659 err->offset = ((u32) error_address) & ~PAGE_MASK;
93c2df58
DT
660}
661
662/*
663 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
664 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
665 * of a node that detected an ECC memory error. mci represents the node that
666 * the error address maps to (possibly different from the node that detected
667 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
668 * error.
669 */
670static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
671{
672 int csrow;
673
674 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
675
676 if (csrow == -1)
24f9a7fe
BP
677 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
678 "address 0x%lx\n", (unsigned long)sys_addr);
93c2df58
DT
679 return csrow;
680}
e2ce7255 681
bfc04aec 682static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
2da11654 683
2da11654
DT
684/*
685 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
686 * are ECC capable.
687 */
d1ea71cd 688static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
2da11654 689{
cb328507 690 u8 bit;
1f6189ed 691 unsigned long edac_cap = EDAC_FLAG_NONE;
2da11654 692
a4b4bedc 693 bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
2da11654
DT
694 ? 19
695 : 17;
696
584fcff4 697 if (pvt->dclr0 & BIT(bit))
2da11654
DT
698 edac_cap = EDAC_FLAG_SECDED;
699
700 return edac_cap;
701}
702
d1ea71cd 703static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
2da11654 704
d1ea71cd 705static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
68798e17 706{
956b9ba1 707 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
68798e17 708
a597d2a5
AG
709 if (pvt->dram_type == MEM_LRDDR3) {
710 u32 dcsm = pvt->csels[chan].csmasks[0];
711 /*
712 * It's assumed all LRDIMMs in a DCT are going to be of
713 * same 'type' until proven otherwise. So, use a cs
714 * value of '0' here to get dcsm value.
715 */
716 edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
717 }
718
719 edac_dbg(1, "All DIMMs support ECC:%s\n",
720 (dclr & BIT(19)) ? "yes" : "no");
721
68798e17 722
956b9ba1
JP
723 edac_dbg(1, " PAR/ERR parity: %s\n",
724 (dclr & BIT(8)) ? "enabled" : "disabled");
68798e17 725
a4b4bedc 726 if (pvt->fam == 0x10)
956b9ba1
JP
727 edac_dbg(1, " DCT 128bit mode width: %s\n",
728 (dclr & BIT(11)) ? "128b" : "64b");
68798e17 729
956b9ba1
JP
730 edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
731 (dclr & BIT(12)) ? "yes" : "no",
732 (dclr & BIT(13)) ? "yes" : "no",
733 (dclr & BIT(14)) ? "yes" : "no",
734 (dclr & BIT(15)) ? "yes" : "no");
68798e17
BP
735}
736
2da11654 737/* Display and decode various NB registers for debug purposes. */
b2b0c605 738static void dump_misc_regs(struct amd64_pvt *pvt)
2da11654 739{
956b9ba1 740 edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
68798e17 741
956b9ba1
JP
742 edac_dbg(1, " NB two channel DRAM capable: %s\n",
743 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
2da11654 744
956b9ba1
JP
745 edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
746 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
747 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
68798e17 748
d1ea71cd 749 debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
2da11654 750
956b9ba1 751 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
2da11654 752
956b9ba1
JP
753 edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
754 pvt->dhar, dhar_base(pvt),
a4b4bedc
BP
755 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
756 : f10_dhar_offset(pvt));
2da11654 757
956b9ba1 758 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
2da11654 759
d1ea71cd 760 debug_display_dimm_sizes(pvt, 0);
4d796364 761
8de1d91e 762 /* everything below this point is Fam10h and above */
a4b4bedc 763 if (pvt->fam == 0xf)
2da11654 764 return;
4d796364 765
d1ea71cd 766 debug_display_dimm_sizes(pvt, 1);
2da11654 767
a3b7db09 768 amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
ad6a32e9 769
8de1d91e 770 /* Only if NOT ganged does dclr1 have valid info */
68798e17 771 if (!dct_ganging_enabled(pvt))
d1ea71cd 772 debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
2da11654
DT
773}
774
94be4bff 775/*
18b94f66 776 * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
94be4bff 777 */
11c75ead 778static void prep_chip_selects(struct amd64_pvt *pvt)
94be4bff 779{
18b94f66 780 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
11c75ead
BP
781 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
782 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
a597d2a5 783 } else if (pvt->fam == 0x15 && pvt->model == 0x30) {
18b94f66
AG
784 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
785 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
9d858bb1 786 } else {
11c75ead
BP
787 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
788 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
94be4bff
DT
789 }
790}
791
792/*
11c75ead 793 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
94be4bff 794 */
b2b0c605 795static void read_dct_base_mask(struct amd64_pvt *pvt)
94be4bff 796{
11c75ead 797 int cs;
94be4bff 798
11c75ead 799 prep_chip_selects(pvt);
94be4bff 800
11c75ead 801 for_each_chip_select(cs, 0, pvt) {
71d2a32e
BP
802 int reg0 = DCSB0 + (cs * 4);
803 int reg1 = DCSB1 + (cs * 4);
11c75ead
BP
804 u32 *base0 = &pvt->csels[0].csbases[cs];
805 u32 *base1 = &pvt->csels[1].csbases[cs];
b2b0c605 806
7981a28f 807 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
956b9ba1
JP
808 edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
809 cs, *base0, reg0);
94be4bff 810
7981a28f 811 if (pvt->fam == 0xf)
11c75ead 812 continue;
b2b0c605 813
7981a28f 814 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
956b9ba1 815 edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
7981a28f
AG
816 cs, *base1, (pvt->fam == 0x10) ? reg1
817 : reg0);
94be4bff
DT
818 }
819
11c75ead 820 for_each_chip_select_mask(cs, 0, pvt) {
71d2a32e
BP
821 int reg0 = DCSM0 + (cs * 4);
822 int reg1 = DCSM1 + (cs * 4);
11c75ead
BP
823 u32 *mask0 = &pvt->csels[0].csmasks[cs];
824 u32 *mask1 = &pvt->csels[1].csmasks[cs];
b2b0c605 825
7981a28f 826 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
956b9ba1
JP
827 edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
828 cs, *mask0, reg0);
94be4bff 829
7981a28f 830 if (pvt->fam == 0xf)
11c75ead 831 continue;
b2b0c605 832
7981a28f 833 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
956b9ba1 834 edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
7981a28f
AG
835 cs, *mask1, (pvt->fam == 0x10) ? reg1
836 : reg0);
94be4bff
DT
837 }
838}
839
a597d2a5 840static void determine_memory_type(struct amd64_pvt *pvt)
94be4bff 841{
a597d2a5 842 u32 dram_ctrl, dcsm;
94be4bff 843
a597d2a5
AG
844 switch (pvt->fam) {
845 case 0xf:
846 if (pvt->ext_model >= K8_REV_F)
847 goto ddr3;
848
849 pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
850 return;
851
852 case 0x10:
6b4c0bde 853 if (pvt->dchr0 & DDR3_MODE)
a597d2a5
AG
854 goto ddr3;
855
856 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
857 return;
858
859 case 0x15:
860 if (pvt->model < 0x60)
861 goto ddr3;
862
863 /*
864 * Model 0x60h needs special handling:
865 *
866 * We use a Chip Select value of '0' to obtain dcsm.
867 * Theoretically, it is possible to populate LRDIMMs of different
868 * 'Rank' value on a DCT. But this is not the common case. So,
869 * it's reasonable to assume all DIMMs are going to be of same
870 * 'type' until proven otherwise.
871 */
872 amd64_read_dct_pci_cfg(pvt, 0, DRAM_CONTROL, &dram_ctrl);
873 dcsm = pvt->csels[0].csmasks[0];
874
875 if (((dram_ctrl >> 8) & 0x7) == 0x2)
876 pvt->dram_type = MEM_DDR4;
877 else if (pvt->dclr0 & BIT(16))
878 pvt->dram_type = MEM_DDR3;
879 else if (dcsm & 0x3)
880 pvt->dram_type = MEM_LRDDR3;
6b4c0bde 881 else
a597d2a5 882 pvt->dram_type = MEM_RDDR3;
94be4bff 883
a597d2a5
AG
884 return;
885
886 case 0x16:
887 goto ddr3;
888
889 default:
890 WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
891 pvt->dram_type = MEM_EMPTY;
892 }
893 return;
94be4bff 894
a597d2a5
AG
895ddr3:
896 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
94be4bff
DT
897}
898
cb328507 899/* Get the number of DCT channels the memory controller is using. */
ddff876d
DT
900static int k8_early_channel_count(struct amd64_pvt *pvt)
901{
cb328507 902 int flag;
ddff876d 903
9f56da0e 904 if (pvt->ext_model >= K8_REV_F)
ddff876d 905 /* RevF (NPT) and later */
41d8bfab 906 flag = pvt->dclr0 & WIDTH_128;
9f56da0e 907 else
ddff876d
DT
908 /* RevE and earlier */
909 flag = pvt->dclr0 & REVE_WIDTH_128;
ddff876d
DT
910
911 /* not used */
912 pvt->dclr1 = 0;
913
914 return (flag) ? 2 : 1;
915}
916
70046624 917/* On F10h and later ErrAddr is MC4_ADDR[47:1] */
a4b4bedc 918static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
ddff876d 919{
2ec591ac
BP
920 u16 mce_nid = amd_get_nb_id(m->extcpu);
921 struct mem_ctl_info *mci;
70046624
BP
922 u8 start_bit = 1;
923 u8 end_bit = 47;
2ec591ac
BP
924 u64 addr;
925
926 mci = edac_mc_find(mce_nid);
927 if (!mci)
928 return 0;
929
930 pvt = mci->pvt_info;
70046624 931
a4b4bedc 932 if (pvt->fam == 0xf) {
70046624
BP
933 start_bit = 3;
934 end_bit = 39;
935 }
936
10ef6b0d 937 addr = m->addr & GENMASK_ULL(end_bit, start_bit);
c1ae6830
BP
938
939 /*
940 * Erratum 637 workaround
941 */
a4b4bedc 942 if (pvt->fam == 0x15) {
c1ae6830
BP
943 u64 cc6_base, tmp_addr;
944 u32 tmp;
8b84c8df 945 u8 intlv_en;
c1ae6830 946
10ef6b0d 947 if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
c1ae6830
BP
948 return addr;
949
c1ae6830
BP
950
951 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
952 intlv_en = tmp >> 21 & 0x7;
953
954 /* add [47:27] + 3 trailing bits */
10ef6b0d 955 cc6_base = (tmp & GENMASK_ULL(20, 0)) << 3;
c1ae6830
BP
956
957 /* reverse and add DramIntlvEn */
958 cc6_base |= intlv_en ^ 0x7;
959
960 /* pin at [47:24] */
961 cc6_base <<= 24;
962
963 if (!intlv_en)
10ef6b0d 964 return cc6_base | (addr & GENMASK_ULL(23, 0));
c1ae6830
BP
965
966 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
967
968 /* faster log2 */
10ef6b0d 969 tmp_addr = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
c1ae6830
BP
970
971 /* OR DramIntlvSel into bits [14:12] */
10ef6b0d 972 tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
c1ae6830
BP
973
974 /* add remaining [11:0] bits from original MC4_ADDR */
10ef6b0d 975 tmp_addr |= addr & GENMASK_ULL(11, 0);
c1ae6830
BP
976
977 return cc6_base | tmp_addr;
978 }
979
980 return addr;
ddff876d
DT
981}
982
e2c0bffe
DB
983static struct pci_dev *pci_get_related_function(unsigned int vendor,
984 unsigned int device,
985 struct pci_dev *related)
986{
987 struct pci_dev *dev = NULL;
988
989 while ((dev = pci_get_device(vendor, device, dev))) {
990 if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
991 (dev->bus->number == related->bus->number) &&
992 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
993 break;
994 }
995
996 return dev;
997}
998
7f19bf75 999static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
ddff876d 1000{
e2c0bffe 1001 struct amd_northbridge *nb;
18b94f66
AG
1002 struct pci_dev *f1 = NULL;
1003 unsigned int pci_func;
71d2a32e 1004 int off = range << 3;
e2c0bffe 1005 u32 llim;
ddff876d 1006
7f19bf75
BP
1007 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
1008 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
ddff876d 1009
18b94f66 1010 if (pvt->fam == 0xf)
7f19bf75 1011 return;
ddff876d 1012
7f19bf75
BP
1013 if (!dram_rw(pvt, range))
1014 return;
ddff876d 1015
7f19bf75
BP
1016 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
1017 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
f08e457c 1018
e2c0bffe 1019 /* F15h: factor in CC6 save area by reading dst node's limit reg */
18b94f66 1020 if (pvt->fam != 0x15)
e2c0bffe 1021 return;
f08e457c 1022
e2c0bffe
DB
1023 nb = node_to_amd_nb(dram_dst_node(pvt, range));
1024 if (WARN_ON(!nb))
1025 return;
f08e457c 1026
a597d2a5
AG
1027 if (pvt->model == 0x60)
1028 pci_func = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
1029 else if (pvt->model == 0x30)
1030 pci_func = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
1031 else
1032 pci_func = PCI_DEVICE_ID_AMD_15H_NB_F1;
18b94f66
AG
1033
1034 f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
e2c0bffe
DB
1035 if (WARN_ON(!f1))
1036 return;
f08e457c 1037
e2c0bffe 1038 amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
f08e457c 1039
10ef6b0d 1040 pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
f08e457c 1041
e2c0bffe
DB
1042 /* {[39:27],111b} */
1043 pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
f08e457c 1044
10ef6b0d 1045 pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
f08e457c 1046
e2c0bffe
DB
1047 /* [47:40] */
1048 pvt->ranges[range].lim.hi |= llim >> 13;
1049
1050 pci_dev_put(f1);
ddff876d
DT
1051}
1052
f192c7b1 1053static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
33ca0643 1054 struct err_info *err)
ddff876d 1055{
f192c7b1 1056 struct amd64_pvt *pvt = mci->pvt_info;
ddff876d 1057
33ca0643 1058 error_address_to_page_and_offset(sys_addr, err);
ab5a503c
MCC
1059
1060 /*
1061 * Find out which node the error address belongs to. This may be
1062 * different from the node that detected the error.
1063 */
33ca0643
BP
1064 err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
1065 if (!err->src_mci) {
ab5a503c
MCC
1066 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1067 (unsigned long)sys_addr);
33ca0643 1068 err->err_code = ERR_NODE;
ab5a503c
MCC
1069 return;
1070 }
1071
1072 /* Now map the sys_addr to a CSROW */
33ca0643
BP
1073 err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
1074 if (err->csrow < 0) {
1075 err->err_code = ERR_CSROW;
ab5a503c
MCC
1076 return;
1077 }
1078
ddff876d 1079 /* CHIPKILL enabled */
f192c7b1 1080 if (pvt->nbcfg & NBCFG_CHIPKILL) {
33ca0643
BP
1081 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1082 if (err->channel < 0) {
ddff876d
DT
1083 /*
1084 * Syndrome didn't map, so we don't know which of the
1085 * 2 DIMMs is in error. So we need to ID 'both' of them
1086 * as suspect.
1087 */
33ca0643 1088 amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
ab5a503c 1089 "possible error reporting race\n",
33ca0643
BP
1090 err->syndrome);
1091 err->err_code = ERR_CHANNEL;
ddff876d
DT
1092 return;
1093 }
1094 } else {
1095 /*
1096 * non-chipkill ecc mode
1097 *
1098 * The k8 documentation is unclear about how to determine the
1099 * channel number when using non-chipkill memory. This method
1100 * was obtained from email communication with someone at AMD.
1101 * (Wish the email was placed in this comment - norsk)
1102 */
33ca0643 1103 err->channel = ((sys_addr & BIT(3)) != 0);
ddff876d 1104 }
ddff876d
DT
1105}
1106
41d8bfab 1107static int ddr2_cs_size(unsigned i, bool dct_width)
ddff876d 1108{
41d8bfab 1109 unsigned shift = 0;
ddff876d 1110
41d8bfab
BP
1111 if (i <= 2)
1112 shift = i;
1113 else if (!(i & 0x1))
1114 shift = i >> 1;
1433eb99 1115 else
41d8bfab 1116 shift = (i + 1) >> 1;
ddff876d 1117
41d8bfab
BP
1118 return 128 << (shift + !!dct_width);
1119}
1120
1121static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
a597d2a5 1122 unsigned cs_mode, int cs_mask_nr)
41d8bfab
BP
1123{
1124 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1125
1126 if (pvt->ext_model >= K8_REV_F) {
1127 WARN_ON(cs_mode > 11);
1128 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1129 }
1130 else if (pvt->ext_model >= K8_REV_D) {
11b0a314 1131 unsigned diff;
41d8bfab
BP
1132 WARN_ON(cs_mode > 10);
1133
11b0a314
BP
1134 /*
1135 * the below calculation, besides trying to win an obfuscated C
1136 * contest, maps cs_mode values to DIMM chip select sizes. The
1137 * mappings are:
1138 *
1139 * cs_mode CS size (mb)
1140 * ======= ============
1141 * 0 32
1142 * 1 64
1143 * 2 128
1144 * 3 128
1145 * 4 256
1146 * 5 512
1147 * 6 256
1148 * 7 512
1149 * 8 1024
1150 * 9 1024
1151 * 10 2048
1152 *
1153 * Basically, it calculates a value with which to shift the
1154 * smallest CS size of 32MB.
1155 *
1156 * ddr[23]_cs_size have a similar purpose.
1157 */
1158 diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1159
1160 return 32 << (cs_mode - diff);
41d8bfab
BP
1161 }
1162 else {
1163 WARN_ON(cs_mode > 6);
1164 return 32 << cs_mode;
1165 }
ddff876d
DT
1166}
1167
1afd3c98
DT
1168/*
1169 * Get the number of DCT channels in use.
1170 *
1171 * Return:
1172 * number of Memory Channels in operation
1173 * Pass back:
1174 * contents of the DCL0_LOW register
1175 */
7d20d14d 1176static int f1x_early_channel_count(struct amd64_pvt *pvt)
1afd3c98 1177{
6ba5dcdc 1178 int i, j, channels = 0;
1afd3c98 1179
7d20d14d 1180 /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
a4b4bedc 1181 if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
7d20d14d 1182 return 2;
1afd3c98
DT
1183
1184 /*
d16149e8
BP
1185 * Need to check if in unganged mode: In such, there are 2 channels,
1186 * but they are not in 128 bit mode and thus the above 'dclr0' status
1187 * bit will be OFF.
1afd3c98
DT
1188 *
1189 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1190 * their CSEnable bit on. If so, then SINGLE DIMM case.
1191 */
956b9ba1 1192 edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
ddff876d 1193
1afd3c98
DT
1194 /*
1195 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1196 * is more than just one DIMM present in unganged mode. Need to check
1197 * both controllers since DIMMs can be placed in either one.
1198 */
525a1b20
BP
1199 for (i = 0; i < 2; i++) {
1200 u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1afd3c98 1201
57a30854
WW
1202 for (j = 0; j < 4; j++) {
1203 if (DBAM_DIMM(j, dbam) > 0) {
1204 channels++;
1205 break;
1206 }
1207 }
1afd3c98
DT
1208 }
1209
d16149e8
BP
1210 if (channels > 2)
1211 channels = 2;
1212
24f9a7fe 1213 amd64_info("MCT channel count: %d\n", channels);
1afd3c98
DT
1214
1215 return channels;
1afd3c98
DT
1216}
1217
41d8bfab 1218static int ddr3_cs_size(unsigned i, bool dct_width)
1afd3c98 1219{
41d8bfab
BP
1220 unsigned shift = 0;
1221 int cs_size = 0;
1222
1223 if (i == 0 || i == 3 || i == 4)
1224 cs_size = -1;
1225 else if (i <= 2)
1226 shift = i;
1227 else if (i == 12)
1228 shift = 7;
1229 else if (!(i & 0x1))
1230 shift = i >> 1;
1231 else
1232 shift = (i + 1) >> 1;
1233
1234 if (cs_size != -1)
1235 cs_size = (128 * (1 << !!dct_width)) << shift;
1236
1237 return cs_size;
1238}
1239
a597d2a5
AG
1240static int ddr3_lrdimm_cs_size(unsigned i, unsigned rank_multiply)
1241{
1242 unsigned shift = 0;
1243 int cs_size = 0;
1244
1245 if (i < 4 || i == 6)
1246 cs_size = -1;
1247 else if (i == 12)
1248 shift = 7;
1249 else if (!(i & 0x1))
1250 shift = i >> 1;
1251 else
1252 shift = (i + 1) >> 1;
1253
1254 if (cs_size != -1)
1255 cs_size = rank_multiply * (128 << shift);
1256
1257 return cs_size;
1258}
1259
1260static int ddr4_cs_size(unsigned i)
1261{
1262 int cs_size = 0;
1263
1264 if (i == 0)
1265 cs_size = -1;
1266 else if (i == 1)
1267 cs_size = 1024;
1268 else
1269 /* Min cs_size = 1G */
1270 cs_size = 1024 * (1 << (i >> 1));
1271
1272 return cs_size;
1273}
1274
41d8bfab 1275static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
a597d2a5 1276 unsigned cs_mode, int cs_mask_nr)
41d8bfab
BP
1277{
1278 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1279
1280 WARN_ON(cs_mode > 11);
1433eb99
BP
1281
1282 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
41d8bfab 1283 return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1433eb99 1284 else
41d8bfab
BP
1285 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1286}
1287
1288/*
1289 * F15h supports only 64bit DCT interfaces
1290 */
1291static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
a597d2a5 1292 unsigned cs_mode, int cs_mask_nr)
41d8bfab
BP
1293{
1294 WARN_ON(cs_mode > 12);
1433eb99 1295
41d8bfab 1296 return ddr3_cs_size(cs_mode, false);
1afd3c98
DT
1297}
1298
a597d2a5
AG
1299/* F15h M60h supports DDR4 mapping as well.. */
1300static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1301 unsigned cs_mode, int cs_mask_nr)
1302{
1303 int cs_size;
1304 u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr];
1305
1306 WARN_ON(cs_mode > 12);
1307
1308 if (pvt->dram_type == MEM_DDR4) {
1309 if (cs_mode > 9)
1310 return -1;
1311
1312 cs_size = ddr4_cs_size(cs_mode);
1313 } else if (pvt->dram_type == MEM_LRDDR3) {
1314 unsigned rank_multiply = dcsm & 0xf;
1315
1316 if (rank_multiply == 3)
1317 rank_multiply = 4;
1318 cs_size = ddr3_lrdimm_cs_size(cs_mode, rank_multiply);
1319 } else {
1320 /* Minimum cs size is 512mb for F15hM60h*/
1321 if (cs_mode == 0x1)
1322 return -1;
1323
1324 cs_size = ddr3_cs_size(cs_mode, false);
1325 }
1326
1327 return cs_size;
1328}
1329
94c1acf2 1330/*
18b94f66 1331 * F16h and F15h model 30h have only limited cs_modes.
94c1acf2
AG
1332 */
1333static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
a597d2a5 1334 unsigned cs_mode, int cs_mask_nr)
94c1acf2
AG
1335{
1336 WARN_ON(cs_mode > 12);
1337
1338 if (cs_mode == 6 || cs_mode == 8 ||
1339 cs_mode == 9 || cs_mode == 12)
1340 return -1;
1341 else
1342 return ddr3_cs_size(cs_mode, false);
1343}
1344
5a5d2371 1345static void read_dram_ctl_register(struct amd64_pvt *pvt)
6163b5d4 1346{
6163b5d4 1347
a4b4bedc 1348 if (pvt->fam == 0xf)
5a5d2371
BP
1349 return;
1350
7981a28f 1351 if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
956b9ba1
JP
1352 edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1353 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
72381bd5 1354
956b9ba1
JP
1355 edac_dbg(0, " DCTs operate in %s mode\n",
1356 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
72381bd5
BP
1357
1358 if (!dct_ganging_enabled(pvt))
956b9ba1
JP
1359 edac_dbg(0, " Address range split per DCT: %s\n",
1360 (dct_high_range_enabled(pvt) ? "yes" : "no"));
72381bd5 1361
956b9ba1
JP
1362 edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1363 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1364 (dct_memory_cleared(pvt) ? "yes" : "no"));
72381bd5 1365
956b9ba1
JP
1366 edac_dbg(0, " channel interleave: %s, "
1367 "interleave bits selector: 0x%x\n",
1368 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1369 dct_sel_interleave_addr(pvt));
6163b5d4
DT
1370 }
1371
7981a28f 1372 amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
6163b5d4
DT
1373}
1374
18b94f66
AG
1375/*
1376 * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
1377 * 2.10.12 Memory Interleaving Modes).
1378 */
1379static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1380 u8 intlv_en, int num_dcts_intlv,
1381 u32 dct_sel)
1382{
1383 u8 channel = 0;
1384 u8 select;
1385
1386 if (!(intlv_en))
1387 return (u8)(dct_sel);
1388
1389 if (num_dcts_intlv == 2) {
1390 select = (sys_addr >> 8) & 0x3;
1391 channel = select ? 0x3 : 0;
9d0e8d83
AG
1392 } else if (num_dcts_intlv == 4) {
1393 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1394 switch (intlv_addr) {
1395 case 0x4:
1396 channel = (sys_addr >> 8) & 0x3;
1397 break;
1398 case 0x5:
1399 channel = (sys_addr >> 9) & 0x3;
1400 break;
1401 }
1402 }
18b94f66
AG
1403 return channel;
1404}
1405
f71d0a05 1406/*
229a7a11 1407 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
f71d0a05
DT
1408 * Interleaving Modes.
1409 */
b15f0fca 1410static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
229a7a11 1411 bool hi_range_sel, u8 intlv_en)
6163b5d4 1412{
151fa71c 1413 u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
6163b5d4
DT
1414
1415 if (dct_ganging_enabled(pvt))
229a7a11 1416 return 0;
6163b5d4 1417
229a7a11
BP
1418 if (hi_range_sel)
1419 return dct_sel_high;
6163b5d4 1420
229a7a11
BP
1421 /*
1422 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1423 */
1424 if (dct_interleave_enabled(pvt)) {
1425 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1426
1427 /* return DCT select function: 0=DCT0, 1=DCT1 */
1428 if (!intlv_addr)
1429 return sys_addr >> 6 & 1;
1430
1431 if (intlv_addr & 0x2) {
1432 u8 shift = intlv_addr & 0x1 ? 9 : 6;
1433 u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
1434
1435 return ((sys_addr >> shift) & 1) ^ temp;
1436 }
1437
1438 return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1439 }
1440
1441 if (dct_high_range_enabled(pvt))
1442 return ~dct_sel_high & 1;
6163b5d4
DT
1443
1444 return 0;
1445}
1446
c8e518d5 1447/* Convert the sys_addr to the normalized DCT address */
c7e5301a 1448static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
c8e518d5
BP
1449 u64 sys_addr, bool hi_rng,
1450 u32 dct_sel_base_addr)
6163b5d4
DT
1451{
1452 u64 chan_off;
c8e518d5
BP
1453 u64 dram_base = get_dram_base(pvt, range);
1454 u64 hole_off = f10_dhar_offset(pvt);
6f3508f6 1455 u64 dct_sel_base_off = (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
6163b5d4 1456
c8e518d5
BP
1457 if (hi_rng) {
1458 /*
1459 * if
1460 * base address of high range is below 4Gb
1461 * (bits [47:27] at [31:11])
1462 * DRAM address space on this DCT is hoisted above 4Gb &&
1463 * sys_addr > 4Gb
1464 *
1465 * remove hole offset from sys_addr
1466 * else
1467 * remove high range offset from sys_addr
1468 */
1469 if ((!(dct_sel_base_addr >> 16) ||
1470 dct_sel_base_addr < dhar_base(pvt)) &&
972ea17a 1471 dhar_valid(pvt) &&
c8e518d5 1472 (sys_addr >= BIT_64(32)))
bc21fa57 1473 chan_off = hole_off;
6163b5d4
DT
1474 else
1475 chan_off = dct_sel_base_off;
1476 } else {
c8e518d5
BP
1477 /*
1478 * if
1479 * we have a valid hole &&
1480 * sys_addr > 4Gb
1481 *
1482 * remove hole
1483 * else
1484 * remove dram base to normalize to DCT address
1485 */
972ea17a 1486 if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
bc21fa57 1487 chan_off = hole_off;
6163b5d4 1488 else
c8e518d5 1489 chan_off = dram_base;
6163b5d4
DT
1490 }
1491
10ef6b0d 1492 return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
6163b5d4
DT
1493}
1494
6163b5d4
DT
1495/*
1496 * checks if the csrow passed in is marked as SPARED, if so returns the new
1497 * spare row
1498 */
11c75ead 1499static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
6163b5d4 1500{
614ec9d8
BP
1501 int tmp_cs;
1502
1503 if (online_spare_swap_done(pvt, dct) &&
1504 csrow == online_spare_bad_dramcs(pvt, dct)) {
1505
1506 for_each_chip_select(tmp_cs, dct, pvt) {
1507 if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1508 csrow = tmp_cs;
1509 break;
1510 }
1511 }
6163b5d4
DT
1512 }
1513 return csrow;
1514}
1515
1516/*
1517 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1518 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1519 *
1520 * Return:
1521 * -EINVAL: NOT FOUND
1522 * 0..csrow = Chip-Select Row
1523 */
c7e5301a 1524static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
6163b5d4
DT
1525{
1526 struct mem_ctl_info *mci;
1527 struct amd64_pvt *pvt;
11c75ead 1528 u64 cs_base, cs_mask;
6163b5d4
DT
1529 int cs_found = -EINVAL;
1530 int csrow;
1531
2ec591ac 1532 mci = edac_mc_find(nid);
6163b5d4
DT
1533 if (!mci)
1534 return cs_found;
1535
1536 pvt = mci->pvt_info;
1537
956b9ba1 1538 edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
6163b5d4 1539
11c75ead
BP
1540 for_each_chip_select(csrow, dct, pvt) {
1541 if (!csrow_enabled(csrow, dct, pvt))
6163b5d4
DT
1542 continue;
1543
11c75ead 1544 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
6163b5d4 1545
956b9ba1
JP
1546 edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1547 csrow, cs_base, cs_mask);
6163b5d4 1548
11c75ead 1549 cs_mask = ~cs_mask;
6163b5d4 1550
956b9ba1
JP
1551 edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1552 (in_addr & cs_mask), (cs_base & cs_mask));
6163b5d4 1553
11c75ead 1554 if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
18b94f66
AG
1555 if (pvt->fam == 0x15 && pvt->model >= 0x30) {
1556 cs_found = csrow;
1557 break;
1558 }
11c75ead 1559 cs_found = f10_process_possible_spare(pvt, dct, csrow);
6163b5d4 1560
956b9ba1 1561 edac_dbg(1, " MATCH csrow=%d\n", cs_found);
6163b5d4
DT
1562 break;
1563 }
1564 }
1565 return cs_found;
1566}
1567
95b0ef55
BP
1568/*
1569 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1570 * swapped with a region located at the bottom of memory so that the GPU can use
1571 * the interleaved region and thus two channels.
1572 */
b15f0fca 1573static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
95b0ef55
BP
1574{
1575 u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
1576
a4b4bedc 1577 if (pvt->fam == 0x10) {
95b0ef55 1578 /* only revC3 and revE have that feature */
a4b4bedc 1579 if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
95b0ef55
BP
1580 return sys_addr;
1581 }
1582
7981a28f 1583 amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
95b0ef55
BP
1584
1585 if (!(swap_reg & 0x1))
1586 return sys_addr;
1587
1588 swap_base = (swap_reg >> 3) & 0x7f;
1589 swap_limit = (swap_reg >> 11) & 0x7f;
1590 rgn_size = (swap_reg >> 20) & 0x7f;
1591 tmp_addr = sys_addr >> 27;
1592
1593 if (!(sys_addr >> 34) &&
1594 (((tmp_addr >= swap_base) &&
1595 (tmp_addr <= swap_limit)) ||
1596 (tmp_addr < rgn_size)))
1597 return sys_addr ^ (u64)swap_base << 27;
1598
1599 return sys_addr;
1600}
1601
f71d0a05 1602/* For a given @dram_range, check if @sys_addr falls within it. */
e761359a 1603static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
33ca0643 1604 u64 sys_addr, int *chan_sel)
f71d0a05 1605{
229a7a11 1606 int cs_found = -EINVAL;
c8e518d5 1607 u64 chan_addr;
5d4b58e8 1608 u32 dct_sel_base;
11c75ead 1609 u8 channel;
229a7a11 1610 bool high_range = false;
f71d0a05 1611
7f19bf75 1612 u8 node_id = dram_dst_node(pvt, range);
229a7a11 1613 u8 intlv_en = dram_intlv_en(pvt, range);
7f19bf75 1614 u32 intlv_sel = dram_intlv_sel(pvt, range);
f71d0a05 1615
956b9ba1
JP
1616 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1617 range, sys_addr, get_dram_limit(pvt, range));
f71d0a05 1618
355fba60
BP
1619 if (dhar_valid(pvt) &&
1620 dhar_base(pvt) <= sys_addr &&
1621 sys_addr < BIT_64(32)) {
1622 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1623 sys_addr);
1624 return -EINVAL;
1625 }
1626
f030ddfb 1627 if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
f71d0a05
DT
1628 return -EINVAL;
1629
b15f0fca 1630 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
95b0ef55 1631
f71d0a05
DT
1632 dct_sel_base = dct_sel_baseaddr(pvt);
1633
1634 /*
1635 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1636 * select between DCT0 and DCT1.
1637 */
1638 if (dct_high_range_enabled(pvt) &&
1639 !dct_ganging_enabled(pvt) &&
1640 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
229a7a11 1641 high_range = true;
f71d0a05 1642
b15f0fca 1643 channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
f71d0a05 1644
b15f0fca 1645 chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
c8e518d5 1646 high_range, dct_sel_base);
f71d0a05 1647
e2f79dbd
BP
1648 /* Remove node interleaving, see F1x120 */
1649 if (intlv_en)
1650 chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
1651 (chan_addr & 0xfff);
f71d0a05 1652
5d4b58e8 1653 /* remove channel interleave */
f71d0a05
DT
1654 if (dct_interleave_enabled(pvt) &&
1655 !dct_high_range_enabled(pvt) &&
1656 !dct_ganging_enabled(pvt)) {
5d4b58e8
BP
1657
1658 if (dct_sel_interleave_addr(pvt) != 1) {
1659 if (dct_sel_interleave_addr(pvt) == 0x3)
1660 /* hash 9 */
1661 chan_addr = ((chan_addr >> 10) << 9) |
1662 (chan_addr & 0x1ff);
1663 else
1664 /* A[6] or hash 6 */
1665 chan_addr = ((chan_addr >> 7) << 6) |
1666 (chan_addr & 0x3f);
1667 } else
1668 /* A[12] */
1669 chan_addr = ((chan_addr >> 13) << 12) |
1670 (chan_addr & 0xfff);
f71d0a05
DT
1671 }
1672
956b9ba1 1673 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
f71d0a05 1674
b15f0fca 1675 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
f71d0a05 1676
33ca0643 1677 if (cs_found >= 0)
f71d0a05 1678 *chan_sel = channel;
33ca0643 1679
f71d0a05
DT
1680 return cs_found;
1681}
1682
18b94f66
AG
1683static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1684 u64 sys_addr, int *chan_sel)
1685{
1686 int cs_found = -EINVAL;
1687 int num_dcts_intlv = 0;
1688 u64 chan_addr, chan_offset;
1689 u64 dct_base, dct_limit;
1690 u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
1691 u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
1692
1693 u64 dhar_offset = f10_dhar_offset(pvt);
1694 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1695 u8 node_id = dram_dst_node(pvt, range);
1696 u8 intlv_en = dram_intlv_en(pvt, range);
1697
1698 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
1699 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
1700
1701 dct_offset_en = (u8) ((dct_cont_base_reg >> 3) & BIT(0));
1702 dct_sel = (u8) ((dct_cont_base_reg >> 4) & 0x7);
1703
1704 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1705 range, sys_addr, get_dram_limit(pvt, range));
1706
1707 if (!(get_dram_base(pvt, range) <= sys_addr) &&
1708 !(get_dram_limit(pvt, range) >= sys_addr))
1709 return -EINVAL;
1710
1711 if (dhar_valid(pvt) &&
1712 dhar_base(pvt) <= sys_addr &&
1713 sys_addr < BIT_64(32)) {
1714 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1715 sys_addr);
1716 return -EINVAL;
1717 }
1718
1719 /* Verify sys_addr is within DCT Range. */
4fc06b31
AG
1720 dct_base = (u64) dct_sel_baseaddr(pvt);
1721 dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
18b94f66
AG
1722
1723 if (!(dct_cont_base_reg & BIT(0)) &&
4fc06b31
AG
1724 !(dct_base <= (sys_addr >> 27) &&
1725 dct_limit >= (sys_addr >> 27)))
18b94f66
AG
1726 return -EINVAL;
1727
1728 /* Verify number of dct's that participate in channel interleaving. */
1729 num_dcts_intlv = (int) hweight8(intlv_en);
1730
1731 if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
1732 return -EINVAL;
1733
1734 channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
1735 num_dcts_intlv, dct_sel);
1736
1737 /* Verify we stay within the MAX number of channels allowed */
7f3f5240 1738 if (channel > 3)
18b94f66
AG
1739 return -EINVAL;
1740
1741 leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
1742
1743 /* Get normalized DCT addr */
1744 if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
1745 chan_offset = dhar_offset;
1746 else
4fc06b31 1747 chan_offset = dct_base << 27;
18b94f66
AG
1748
1749 chan_addr = sys_addr - chan_offset;
1750
1751 /* remove channel interleave */
1752 if (num_dcts_intlv == 2) {
1753 if (intlv_addr == 0x4)
1754 chan_addr = ((chan_addr >> 9) << 8) |
1755 (chan_addr & 0xff);
1756 else if (intlv_addr == 0x5)
1757 chan_addr = ((chan_addr >> 10) << 9) |
1758 (chan_addr & 0x1ff);
1759 else
1760 return -EINVAL;
1761
1762 } else if (num_dcts_intlv == 4) {
1763 if (intlv_addr == 0x4)
1764 chan_addr = ((chan_addr >> 10) << 8) |
1765 (chan_addr & 0xff);
1766 else if (intlv_addr == 0x5)
1767 chan_addr = ((chan_addr >> 11) << 9) |
1768 (chan_addr & 0x1ff);
1769 else
1770 return -EINVAL;
1771 }
1772
1773 if (dct_offset_en) {
1774 amd64_read_pci_cfg(pvt->F1,
1775 DRAM_CONT_HIGH_OFF + (int) channel * 4,
1776 &tmp);
4fc06b31 1777 chan_addr += (u64) ((tmp >> 11) & 0xfff) << 27;
18b94f66
AG
1778 }
1779
1780 f15h_select_dct(pvt, channel);
1781
1782 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
1783
1784 /*
1785 * Find Chip select:
1786 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
1787 * there is support for 4 DCT's, but only 2 are currently functional.
1788 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
1789 * pvt->csels[1]. So we need to use '1' here to get correct info.
1790 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
1791 */
1792 alias_channel = (channel == 3) ? 1 : channel;
1793
1794 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
1795
1796 if (cs_found >= 0)
1797 *chan_sel = alias_channel;
1798
1799 return cs_found;
1800}
1801
1802static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
1803 u64 sys_addr,
1804 int *chan_sel)
f71d0a05 1805{
e761359a
BP
1806 int cs_found = -EINVAL;
1807 unsigned range;
f71d0a05 1808
7f19bf75 1809 for (range = 0; range < DRAM_RANGES; range++) {
7f19bf75 1810 if (!dram_rw(pvt, range))
f71d0a05
DT
1811 continue;
1812
18b94f66
AG
1813 if (pvt->fam == 0x15 && pvt->model >= 0x30)
1814 cs_found = f15_m30h_match_to_this_node(pvt, range,
1815 sys_addr,
1816 chan_sel);
f71d0a05 1817
18b94f66
AG
1818 else if ((get_dram_base(pvt, range) <= sys_addr) &&
1819 (get_dram_limit(pvt, range) >= sys_addr)) {
b15f0fca 1820 cs_found = f1x_match_to_this_node(pvt, range,
33ca0643 1821 sys_addr, chan_sel);
f71d0a05
DT
1822 if (cs_found >= 0)
1823 break;
1824 }
1825 }
1826 return cs_found;
1827}
1828
1829/*
bdc30a0c
BP
1830 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
1831 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
f71d0a05 1832 *
bdc30a0c
BP
1833 * The @sys_addr is usually an error address received from the hardware
1834 * (MCX_ADDR).
f71d0a05 1835 */
b15f0fca 1836static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
33ca0643 1837 struct err_info *err)
f71d0a05
DT
1838{
1839 struct amd64_pvt *pvt = mci->pvt_info;
f71d0a05 1840
33ca0643 1841 error_address_to_page_and_offset(sys_addr, err);
ab5a503c 1842
33ca0643
BP
1843 err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
1844 if (err->csrow < 0) {
1845 err->err_code = ERR_CSROW;
bdc30a0c
BP
1846 return;
1847 }
1848
bdc30a0c
BP
1849 /*
1850 * We need the syndromes for channel detection only when we're
1851 * ganged. Otherwise @chan should already contain the channel at
1852 * this point.
1853 */
a97fa68e 1854 if (dct_ganging_enabled(pvt))
33ca0643 1855 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
f71d0a05
DT
1856}
1857
f71d0a05 1858/*
8566c4df 1859 * debug routine to display the memory sizes of all logical DIMMs and its
cb328507 1860 * CSROWs
f71d0a05 1861 */
d1ea71cd 1862static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
f71d0a05 1863{
bb89f5a0 1864 int dimm, size0, size1;
525a1b20
BP
1865 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
1866 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
f71d0a05 1867
a4b4bedc 1868 if (pvt->fam == 0xf) {
8566c4df 1869 /* K8 families < revF not supported yet */
1433eb99 1870 if (pvt->ext_model < K8_REV_F)
8566c4df
BP
1871 return;
1872 else
1873 WARN_ON(ctrl != 0);
1874 }
1875
7981a28f
AG
1876 if (pvt->fam == 0x10) {
1877 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
1878 : pvt->dbam0;
1879 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
1880 pvt->csels[1].csbases :
1881 pvt->csels[0].csbases;
1882 } else if (ctrl) {
1883 dbam = pvt->dbam0;
1884 dcsb = pvt->csels[1].csbases;
1885 }
956b9ba1
JP
1886 edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
1887 ctrl, dbam);
f71d0a05 1888
8566c4df
BP
1889 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1890
f71d0a05
DT
1891 /* Dump memory sizes for DIMM and its CSROWs */
1892 for (dimm = 0; dimm < 4; dimm++) {
1893
1894 size0 = 0;
11c75ead 1895 if (dcsb[dimm*2] & DCSB_CS_ENABLE)
a597d2a5
AG
1896 /* For f15m60h, need multiplier for LRDIMM cs_size
1897 * calculation. We pass 'dimm' value to the dbam_to_cs
1898 * mapper so we can find the multiplier from the
1899 * corresponding DCSM.
1900 */
41d8bfab 1901 size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
a597d2a5
AG
1902 DBAM_DIMM(dimm, dbam),
1903 dimm);
f71d0a05
DT
1904
1905 size1 = 0;
11c75ead 1906 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
41d8bfab 1907 size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
a597d2a5
AG
1908 DBAM_DIMM(dimm, dbam),
1909 dimm);
f71d0a05 1910
24f9a7fe 1911 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
bb89f5a0
BP
1912 dimm * 2, size0,
1913 dimm * 2 + 1, size1);
f71d0a05
DT
1914 }
1915}
1916
d1ea71cd 1917static struct amd64_family_type family_types[] = {
4d37607a 1918 [K8_CPUS] = {
0092b20d 1919 .ctl_name = "K8",
8d5b5d9c
BP
1920 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1921 .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
4d37607a 1922 .ops = {
1433eb99 1923 .early_channel_count = k8_early_channel_count,
1433eb99
BP
1924 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1925 .dbam_to_cs = k8_dbam_to_chip_select,
4d37607a
DT
1926 }
1927 },
1928 [F10_CPUS] = {
0092b20d 1929 .ctl_name = "F10h",
8d5b5d9c
BP
1930 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1931 .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
4d37607a 1932 .ops = {
7d20d14d 1933 .early_channel_count = f1x_early_channel_count,
b15f0fca 1934 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1433eb99 1935 .dbam_to_cs = f10_dbam_to_chip_select,
b2b0c605
BP
1936 }
1937 },
1938 [F15_CPUS] = {
1939 .ctl_name = "F15h",
df71a053
BP
1940 .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
1941 .f3_id = PCI_DEVICE_ID_AMD_15H_NB_F3,
b2b0c605 1942 .ops = {
7d20d14d 1943 .early_channel_count = f1x_early_channel_count,
b15f0fca 1944 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
41d8bfab 1945 .dbam_to_cs = f15_dbam_to_chip_select,
4d37607a
DT
1946 }
1947 },
18b94f66
AG
1948 [F15_M30H_CPUS] = {
1949 .ctl_name = "F15h_M30h",
1950 .f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
1951 .f3_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F3,
1952 .ops = {
1953 .early_channel_count = f1x_early_channel_count,
1954 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1955 .dbam_to_cs = f16_dbam_to_chip_select,
18b94f66
AG
1956 }
1957 },
a597d2a5
AG
1958 [F15_M60H_CPUS] = {
1959 .ctl_name = "F15h_M60h",
1960 .f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
1961 .f3_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F3,
1962 .ops = {
1963 .early_channel_count = f1x_early_channel_count,
1964 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1965 .dbam_to_cs = f15_m60h_dbam_to_chip_select,
1966 }
1967 },
94c1acf2
AG
1968 [F16_CPUS] = {
1969 .ctl_name = "F16h",
1970 .f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
1971 .f3_id = PCI_DEVICE_ID_AMD_16H_NB_F3,
1972 .ops = {
1973 .early_channel_count = f1x_early_channel_count,
1974 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1975 .dbam_to_cs = f16_dbam_to_chip_select,
94c1acf2
AG
1976 }
1977 },
85a8885b
AG
1978 [F16_M30H_CPUS] = {
1979 .ctl_name = "F16h_M30h",
1980 .f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
1981 .f3_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F3,
1982 .ops = {
1983 .early_channel_count = f1x_early_channel_count,
1984 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1985 .dbam_to_cs = f16_dbam_to_chip_select,
85a8885b
AG
1986 }
1987 },
4d37607a
DT
1988};
1989
b1289d6f 1990/*
bfc04aec
BP
1991 * These are tables of eigenvectors (one per line) which can be used for the
1992 * construction of the syndrome tables. The modified syndrome search algorithm
1993 * uses those to find the symbol in error and thus the DIMM.
b1289d6f 1994 *
bfc04aec 1995 * Algorithm courtesy of Ross LaFetra from AMD.
b1289d6f 1996 */
c7e5301a 1997static const u16 x4_vectors[] = {
bfc04aec
BP
1998 0x2f57, 0x1afe, 0x66cc, 0xdd88,
1999 0x11eb, 0x3396, 0x7f4c, 0xeac8,
2000 0x0001, 0x0002, 0x0004, 0x0008,
2001 0x1013, 0x3032, 0x4044, 0x8088,
2002 0x106b, 0x30d6, 0x70fc, 0xe0a8,
2003 0x4857, 0xc4fe, 0x13cc, 0x3288,
2004 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
2005 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
2006 0x15c1, 0x2a42, 0x89ac, 0x4758,
2007 0x2b03, 0x1602, 0x4f0c, 0xca08,
2008 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
2009 0x8ba7, 0x465e, 0x244c, 0x1cc8,
2010 0x2b87, 0x164e, 0x642c, 0xdc18,
2011 0x40b9, 0x80de, 0x1094, 0x20e8,
2012 0x27db, 0x1eb6, 0x9dac, 0x7b58,
2013 0x11c1, 0x2242, 0x84ac, 0x4c58,
2014 0x1be5, 0x2d7a, 0x5e34, 0xa718,
2015 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
2016 0x4c97, 0xc87e, 0x11fc, 0x33a8,
2017 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
2018 0x16b3, 0x3d62, 0x4f34, 0x8518,
2019 0x1e2f, 0x391a, 0x5cac, 0xf858,
2020 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
2021 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
2022 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
2023 0x4397, 0xc27e, 0x17fc, 0x3ea8,
2024 0x1617, 0x3d3e, 0x6464, 0xb8b8,
2025 0x23ff, 0x12aa, 0xab6c, 0x56d8,
2026 0x2dfb, 0x1ba6, 0x913c, 0x7328,
2027 0x185d, 0x2ca6, 0x7914, 0x9e28,
2028 0x171b, 0x3e36, 0x7d7c, 0xebe8,
2029 0x4199, 0x82ee, 0x19f4, 0x2e58,
2030 0x4807, 0xc40e, 0x130c, 0x3208,
2031 0x1905, 0x2e0a, 0x5804, 0xac08,
2032 0x213f, 0x132a, 0xadfc, 0x5ba8,
2033 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
b1289d6f
DT
2034};
2035
c7e5301a 2036static const u16 x8_vectors[] = {
bfc04aec
BP
2037 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
2038 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
2039 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
2040 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
2041 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
2042 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
2043 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
2044 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
2045 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
2046 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
2047 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
2048 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
2049 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
2050 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
2051 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
2052 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
2053 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
2054 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
2055 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
2056};
2057
c7e5301a 2058static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
d34a6ecd 2059 unsigned v_dim)
b1289d6f 2060{
bfc04aec
BP
2061 unsigned int i, err_sym;
2062
2063 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
2064 u16 s = syndrome;
d34a6ecd
BP
2065 unsigned v_idx = err_sym * v_dim;
2066 unsigned v_end = (err_sym + 1) * v_dim;
bfc04aec
BP
2067
2068 /* walk over all 16 bits of the syndrome */
2069 for (i = 1; i < (1U << 16); i <<= 1) {
2070
2071 /* if bit is set in that eigenvector... */
2072 if (v_idx < v_end && vectors[v_idx] & i) {
2073 u16 ev_comp = vectors[v_idx++];
2074
2075 /* ... and bit set in the modified syndrome, */
2076 if (s & i) {
2077 /* remove it. */
2078 s ^= ev_comp;
4d37607a 2079
bfc04aec
BP
2080 if (!s)
2081 return err_sym;
2082 }
b1289d6f 2083
bfc04aec
BP
2084 } else if (s & i)
2085 /* can't get to zero, move to next symbol */
2086 break;
2087 }
b1289d6f
DT
2088 }
2089
956b9ba1 2090 edac_dbg(0, "syndrome(%x) not found\n", syndrome);
b1289d6f
DT
2091 return -1;
2092}
d27bf6fa 2093
bfc04aec
BP
2094static int map_err_sym_to_channel(int err_sym, int sym_size)
2095{
2096 if (sym_size == 4)
2097 switch (err_sym) {
2098 case 0x20:
2099 case 0x21:
2100 return 0;
2101 break;
2102 case 0x22:
2103 case 0x23:
2104 return 1;
2105 break;
2106 default:
2107 return err_sym >> 4;
2108 break;
2109 }
2110 /* x8 symbols */
2111 else
2112 switch (err_sym) {
2113 /* imaginary bits not in a DIMM */
2114 case 0x10:
2115 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
2116 err_sym);
2117 return -1;
2118 break;
2119
2120 case 0x11:
2121 return 0;
2122 break;
2123 case 0x12:
2124 return 1;
2125 break;
2126 default:
2127 return err_sym >> 3;
2128 break;
2129 }
2130 return -1;
2131}
2132
2133static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
2134{
2135 struct amd64_pvt *pvt = mci->pvt_info;
ad6a32e9
BP
2136 int err_sym = -1;
2137
a3b7db09 2138 if (pvt->ecc_sym_sz == 8)
ad6a32e9
BP
2139 err_sym = decode_syndrome(syndrome, x8_vectors,
2140 ARRAY_SIZE(x8_vectors),
a3b7db09
BP
2141 pvt->ecc_sym_sz);
2142 else if (pvt->ecc_sym_sz == 4)
ad6a32e9
BP
2143 err_sym = decode_syndrome(syndrome, x4_vectors,
2144 ARRAY_SIZE(x4_vectors),
a3b7db09 2145 pvt->ecc_sym_sz);
ad6a32e9 2146 else {
a3b7db09 2147 amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
ad6a32e9 2148 return err_sym;
bfc04aec 2149 }
ad6a32e9 2150
a3b7db09 2151 return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
bfc04aec
BP
2152}
2153
33ca0643
BP
2154static void __log_bus_error(struct mem_ctl_info *mci, struct err_info *err,
2155 u8 ecc_type)
d27bf6fa 2156{
33ca0643
BP
2157 enum hw_event_mc_err_type err_type;
2158 const char *string;
d27bf6fa 2159
33ca0643
BP
2160 if (ecc_type == 2)
2161 err_type = HW_EVENT_ERR_CORRECTED;
2162 else if (ecc_type == 1)
2163 err_type = HW_EVENT_ERR_UNCORRECTED;
2164 else {
2165 WARN(1, "Something is rotten in the state of Denmark.\n");
d27bf6fa
DT
2166 return;
2167 }
2168
33ca0643
BP
2169 switch (err->err_code) {
2170 case DECODE_OK:
2171 string = "";
2172 break;
2173 case ERR_NODE:
2174 string = "Failed to map error addr to a node";
2175 break;
2176 case ERR_CSROW:
2177 string = "Failed to map error addr to a csrow";
2178 break;
2179 case ERR_CHANNEL:
2180 string = "unknown syndrome - possible error reporting race";
2181 break;
2182 default:
2183 string = "WTF error";
2184 break;
d27bf6fa 2185 }
33ca0643
BP
2186
2187 edac_mc_handle_error(err_type, mci, 1,
2188 err->page, err->offset, err->syndrome,
2189 err->csrow, err->channel, -1,
2190 string, "");
d27bf6fa
DT
2191}
2192
df781d03 2193static inline void decode_bus_error(int node_id, struct mce *m)
d27bf6fa 2194{
0c510cc8
DB
2195 struct mem_ctl_info *mci;
2196 struct amd64_pvt *pvt;
f192c7b1 2197 u8 ecc_type = (m->status >> 45) & 0x3;
66fed2d4
BP
2198 u8 xec = XEC(m->status, 0x1f);
2199 u16 ec = EC(m->status);
33ca0643
BP
2200 u64 sys_addr;
2201 struct err_info err;
d27bf6fa 2202
0c510cc8
DB
2203 mci = edac_mc_find(node_id);
2204 if (!mci)
2205 return;
2206
2207 pvt = mci->pvt_info;
2208
66fed2d4 2209 /* Bail out early if this was an 'observed' error */
5980bb9c 2210 if (PP(ec) == NBSL_PP_OBS)
b70ef010 2211 return;
d27bf6fa 2212
ecaf5606
BP
2213 /* Do only ECC errors */
2214 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
d27bf6fa 2215 return;
d27bf6fa 2216
33ca0643
BP
2217 memset(&err, 0, sizeof(err));
2218
a4b4bedc 2219 sys_addr = get_error_address(pvt, m);
33ca0643 2220
ecaf5606 2221 if (ecc_type == 2)
33ca0643
BP
2222 err.syndrome = extract_syndrome(m->status);
2223
2224 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
2225
2226 __log_bus_error(mci, &err, ecc_type);
d27bf6fa
DT
2227}
2228
0ec449ee 2229/*
8d5b5d9c 2230 * Use pvt->F2 which contains the F2 CPU PCI device to get the related
bbd0c1f6 2231 * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
0ec449ee 2232 */
360b7f3c 2233static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
0ec449ee 2234{
0ec449ee 2235 /* Reserve the ADDRESS MAP Device */
8d5b5d9c
BP
2236 pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2);
2237 if (!pvt->F1) {
24f9a7fe
BP
2238 amd64_err("error address map device not found: "
2239 "vendor %x device 0x%x (broken BIOS?)\n",
2240 PCI_VENDOR_ID_AMD, f1_id);
bbd0c1f6 2241 return -ENODEV;
0ec449ee
DT
2242 }
2243
2244 /* Reserve the MISC Device */
8d5b5d9c
BP
2245 pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2);
2246 if (!pvt->F3) {
2247 pci_dev_put(pvt->F1);
2248 pvt->F1 = NULL;
0ec449ee 2249
24f9a7fe
BP
2250 amd64_err("error F3 device not found: "
2251 "vendor %x device 0x%x (broken BIOS?)\n",
2252 PCI_VENDOR_ID_AMD, f3_id);
0ec449ee 2253
bbd0c1f6 2254 return -ENODEV;
0ec449ee 2255 }
956b9ba1
JP
2256 edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
2257 edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
2258 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
0ec449ee
DT
2259
2260 return 0;
2261}
2262
360b7f3c 2263static void free_mc_sibling_devs(struct amd64_pvt *pvt)
0ec449ee 2264{
8d5b5d9c
BP
2265 pci_dev_put(pvt->F1);
2266 pci_dev_put(pvt->F3);
0ec449ee
DT
2267}
2268
2269/*
2270 * Retrieve the hardware registers of the memory controller (this includes the
2271 * 'Address Map' and 'Misc' device regs)
2272 */
360b7f3c 2273static void read_mc_regs(struct amd64_pvt *pvt)
0ec449ee 2274{
a4b4bedc 2275 unsigned range;
0ec449ee 2276 u64 msr_val;
ad6a32e9 2277 u32 tmp;
0ec449ee
DT
2278
2279 /*
2280 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2281 * those are Read-As-Zero
2282 */
e97f8bb8 2283 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
956b9ba1 2284 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
0ec449ee
DT
2285
2286 /* check first whether TOP_MEM2 is enabled */
2287 rdmsrl(MSR_K8_SYSCFG, msr_val);
2288 if (msr_val & (1U << 21)) {
e97f8bb8 2289 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
956b9ba1 2290 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
0ec449ee 2291 } else
956b9ba1 2292 edac_dbg(0, " TOP_MEM2 disabled\n");
0ec449ee 2293
5980bb9c 2294 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
0ec449ee 2295
5a5d2371 2296 read_dram_ctl_register(pvt);
0ec449ee 2297
7f19bf75
BP
2298 for (range = 0; range < DRAM_RANGES; range++) {
2299 u8 rw;
0ec449ee 2300
7f19bf75
BP
2301 /* read settings for this DRAM range */
2302 read_dram_base_limit_regs(pvt, range);
2303
2304 rw = dram_rw(pvt, range);
2305 if (!rw)
2306 continue;
2307
956b9ba1
JP
2308 edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2309 range,
2310 get_dram_base(pvt, range),
2311 get_dram_limit(pvt, range));
7f19bf75 2312
956b9ba1
JP
2313 edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2314 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2315 (rw & 0x1) ? "R" : "-",
2316 (rw & 0x2) ? "W" : "-",
2317 dram_intlv_sel(pvt, range),
2318 dram_dst_node(pvt, range));
0ec449ee
DT
2319 }
2320
b2b0c605 2321 read_dct_base_mask(pvt);
0ec449ee 2322
bc21fa57 2323 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
7981a28f 2324 amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
0ec449ee 2325
8d5b5d9c 2326 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
0ec449ee 2327
7981a28f
AG
2328 amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
2329 amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
0ec449ee 2330
78da121e 2331 if (!dct_ganging_enabled(pvt)) {
7981a28f
AG
2332 amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
2333 amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
0ec449ee 2334 }
ad6a32e9 2335
a3b7db09 2336 pvt->ecc_sym_sz = 4;
a597d2a5
AG
2337 determine_memory_type(pvt);
2338 edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
a3b7db09 2339
a4b4bedc 2340 if (pvt->fam >= 0x10) {
b2b0c605 2341 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
7981a28f 2342 /* F16h has only DCT0, so no need to read dbam1 */
a4b4bedc 2343 if (pvt->fam != 0x16)
7981a28f 2344 amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
ad6a32e9 2345
a3b7db09 2346 /* F10h, revD and later can do x8 ECC too */
a4b4bedc 2347 if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
a3b7db09
BP
2348 pvt->ecc_sym_sz = 8;
2349 }
b2b0c605 2350 dump_misc_regs(pvt);
0ec449ee
DT
2351}
2352
2353/*
2354 * NOTE: CPU Revision Dependent code
2355 *
2356 * Input:
11c75ead 2357 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
0ec449ee
DT
2358 * k8 private pointer to -->
2359 * DRAM Bank Address mapping register
2360 * node_id
2361 * DCL register where dual_channel_active is
2362 *
2363 * The DBAM register consists of 4 sets of 4 bits each definitions:
2364 *
2365 * Bits: CSROWs
2366 * 0-3 CSROWs 0 and 1
2367 * 4-7 CSROWs 2 and 3
2368 * 8-11 CSROWs 4 and 5
2369 * 12-15 CSROWs 6 and 7
2370 *
2371 * Values range from: 0 to 15
2372 * The meaning of the values depends on CPU revision and dual-channel state,
2373 * see relevant BKDG more info.
2374 *
2375 * The memory controller provides for total of only 8 CSROWs in its current
2376 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2377 * single channel or two (2) DIMMs in dual channel mode.
2378 *
2379 * The following code logic collapses the various tables for CSROW based on CPU
2380 * revision.
2381 *
2382 * Returns:
2383 * The number of PAGE_SIZE pages on the specified CSROW number it
2384 * encompasses
2385 *
2386 */
d1ea71cd 2387static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
0ec449ee 2388{
1433eb99 2389 u32 cs_mode, nr_pages;
f92cae45 2390 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
0ec449ee 2391
10de6497 2392
0ec449ee
DT
2393 /*
2394 * The math on this doesn't look right on the surface because x/2*4 can
2395 * be simplified to x*2 but this expression makes use of the fact that
2396 * it is integral math where 1/2=0. This intermediate value becomes the
2397 * number of bits to shift the DBAM register to extract the proper CSROW
2398 * field.
2399 */
0a5dfc31 2400 cs_mode = DBAM_DIMM(csrow_nr / 2, dbam);
0ec449ee 2401
a597d2a5
AG
2402 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, (csrow_nr / 2))
2403 << (20 - PAGE_SHIFT);
0ec449ee 2404
10de6497
BP
2405 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
2406 csrow_nr, dct, cs_mode);
2407 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
0ec449ee
DT
2408
2409 return nr_pages;
2410}
2411
2412/*
2413 * Initialize the array of csrow attribute instances, based on the values
2414 * from pci config hardware registers.
2415 */
360b7f3c 2416static int init_csrows(struct mem_ctl_info *mci)
0ec449ee 2417{
10de6497 2418 struct amd64_pvt *pvt = mci->pvt_info;
0ec449ee 2419 struct csrow_info *csrow;
de3910eb 2420 struct dimm_info *dimm;
084a4fcc 2421 enum edac_type edac_mode;
10de6497 2422 int i, j, empty = 1;
a895bf8b 2423 int nr_pages = 0;
10de6497 2424 u32 val;
0ec449ee 2425
a97fa68e 2426 amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
0ec449ee 2427
2299ef71 2428 pvt->nbcfg = val;
0ec449ee 2429
956b9ba1
JP
2430 edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2431 pvt->mc_node_id, val,
2432 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
0ec449ee 2433
10de6497
BP
2434 /*
2435 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
2436 */
11c75ead 2437 for_each_chip_select(i, 0, pvt) {
10de6497
BP
2438 bool row_dct0 = !!csrow_enabled(i, 0, pvt);
2439 bool row_dct1 = false;
0ec449ee 2440
a4b4bedc 2441 if (pvt->fam != 0xf)
10de6497
BP
2442 row_dct1 = !!csrow_enabled(i, 1, pvt);
2443
2444 if (!row_dct0 && !row_dct1)
0ec449ee 2445 continue;
0ec449ee 2446
10de6497 2447 csrow = mci->csrows[i];
0ec449ee 2448 empty = 0;
10de6497
BP
2449
2450 edac_dbg(1, "MC node: %d, csrow: %d\n",
2451 pvt->mc_node_id, i);
2452
1eef1282 2453 if (row_dct0) {
d1ea71cd 2454 nr_pages = get_csrow_nr_pages(pvt, 0, i);
1eef1282
MCC
2455 csrow->channels[0]->dimm->nr_pages = nr_pages;
2456 }
11c75ead 2457
10de6497 2458 /* K8 has only one DCT */
a4b4bedc 2459 if (pvt->fam != 0xf && row_dct1) {
d1ea71cd 2460 int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
1eef1282
MCC
2461
2462 csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
2463 nr_pages += row_dct1_pages;
2464 }
0ec449ee 2465
10de6497 2466 edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
0ec449ee
DT
2467
2468 /*
2469 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2470 */
a97fa68e 2471 if (pvt->nbcfg & NBCFG_ECC_ENABLE)
084a4fcc
MCC
2472 edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) ?
2473 EDAC_S4ECD4ED : EDAC_SECDED;
0ec449ee 2474 else
084a4fcc
MCC
2475 edac_mode = EDAC_NONE;
2476
2477 for (j = 0; j < pvt->channel_count; j++) {
de3910eb 2478 dimm = csrow->channels[j]->dimm;
a597d2a5 2479 dimm->mtype = pvt->dram_type;
de3910eb 2480 dimm->edac_mode = edac_mode;
084a4fcc 2481 }
0ec449ee
DT
2482 }
2483
2484 return empty;
2485}
d27bf6fa 2486
f6d6ae96 2487/* get all cores on this DCT */
8b84c8df 2488static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
f6d6ae96
BP
2489{
2490 int cpu;
2491
2492 for_each_online_cpu(cpu)
2493 if (amd_get_nb_id(cpu) == nid)
2494 cpumask_set_cpu(cpu, mask);
2495}
2496
2497/* check MCG_CTL on all the cpus on this node */
d1ea71cd 2498static bool nb_mce_bank_enabled_on_node(u16 nid)
f6d6ae96
BP
2499{
2500 cpumask_var_t mask;
50542251 2501 int cpu, nbe;
f6d6ae96
BP
2502 bool ret = false;
2503
2504 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
24f9a7fe 2505 amd64_warn("%s: Error allocating mask\n", __func__);
f6d6ae96
BP
2506 return false;
2507 }
2508
2509 get_cpus_on_this_dct_cpumask(mask, nid);
2510
f6d6ae96
BP
2511 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2512
2513 for_each_cpu(cpu, mask) {
50542251 2514 struct msr *reg = per_cpu_ptr(msrs, cpu);
5980bb9c 2515 nbe = reg->l & MSR_MCGCTL_NBE;
f6d6ae96 2516
956b9ba1
JP
2517 edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2518 cpu, reg->q,
2519 (nbe ? "enabled" : "disabled"));
f6d6ae96
BP
2520
2521 if (!nbe)
2522 goto out;
f6d6ae96
BP
2523 }
2524 ret = true;
2525
2526out:
f6d6ae96
BP
2527 free_cpumask_var(mask);
2528 return ret;
2529}
2530
c7e5301a 2531static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
f6d6ae96
BP
2532{
2533 cpumask_var_t cmask;
50542251 2534 int cpu;
f6d6ae96
BP
2535
2536 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
24f9a7fe 2537 amd64_warn("%s: error allocating mask\n", __func__);
f6d6ae96
BP
2538 return false;
2539 }
2540
ae7bb7c6 2541 get_cpus_on_this_dct_cpumask(cmask, nid);
f6d6ae96 2542
f6d6ae96
BP
2543 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2544
2545 for_each_cpu(cpu, cmask) {
2546
50542251
BP
2547 struct msr *reg = per_cpu_ptr(msrs, cpu);
2548
f6d6ae96 2549 if (on) {
5980bb9c 2550 if (reg->l & MSR_MCGCTL_NBE)
ae7bb7c6 2551 s->flags.nb_mce_enable = 1;
f6d6ae96 2552
5980bb9c 2553 reg->l |= MSR_MCGCTL_NBE;
f6d6ae96
BP
2554 } else {
2555 /*
d95cf4de 2556 * Turn off NB MCE reporting only when it was off before
f6d6ae96 2557 */
ae7bb7c6 2558 if (!s->flags.nb_mce_enable)
5980bb9c 2559 reg->l &= ~MSR_MCGCTL_NBE;
f6d6ae96 2560 }
f6d6ae96
BP
2561 }
2562 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2563
f6d6ae96
BP
2564 free_cpumask_var(cmask);
2565
2566 return 0;
2567}
2568
c7e5301a 2569static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
2299ef71 2570 struct pci_dev *F3)
f9431992 2571{
2299ef71 2572 bool ret = true;
c9f4f26e 2573 u32 value, mask = 0x3; /* UECC/CECC enable */
f9431992 2574
2299ef71
BP
2575 if (toggle_ecc_err_reporting(s, nid, ON)) {
2576 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2577 return false;
2578 }
2579
c9f4f26e 2580 amd64_read_pci_cfg(F3, NBCTL, &value);
f9431992 2581
ae7bb7c6
BP
2582 s->old_nbctl = value & mask;
2583 s->nbctl_valid = true;
f9431992
DT
2584
2585 value |= mask;
c9f4f26e 2586 amd64_write_pci_cfg(F3, NBCTL, value);
f9431992 2587
a97fa68e 2588 amd64_read_pci_cfg(F3, NBCFG, &value);
f9431992 2589
956b9ba1
JP
2590 edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2591 nid, value, !!(value & NBCFG_ECC_ENABLE));
f9431992 2592
a97fa68e 2593 if (!(value & NBCFG_ECC_ENABLE)) {
24f9a7fe 2594 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
f9431992 2595
ae7bb7c6 2596 s->flags.nb_ecc_prev = 0;
d95cf4de 2597
f9431992 2598 /* Attempt to turn on DRAM ECC Enable */
a97fa68e
BP
2599 value |= NBCFG_ECC_ENABLE;
2600 amd64_write_pci_cfg(F3, NBCFG, value);
f9431992 2601
a97fa68e 2602 amd64_read_pci_cfg(F3, NBCFG, &value);
f9431992 2603
a97fa68e 2604 if (!(value & NBCFG_ECC_ENABLE)) {
24f9a7fe
BP
2605 amd64_warn("Hardware rejected DRAM ECC enable,"
2606 "check memory DIMM configuration.\n");
2299ef71 2607 ret = false;
f9431992 2608 } else {
24f9a7fe 2609 amd64_info("Hardware accepted DRAM ECC Enable\n");
f9431992 2610 }
d95cf4de 2611 } else {
ae7bb7c6 2612 s->flags.nb_ecc_prev = 1;
f9431992 2613 }
d95cf4de 2614
956b9ba1
JP
2615 edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2616 nid, value, !!(value & NBCFG_ECC_ENABLE));
f9431992 2617
2299ef71 2618 return ret;
f9431992
DT
2619}
2620
c7e5301a 2621static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
360b7f3c 2622 struct pci_dev *F3)
f9431992 2623{
c9f4f26e
BP
2624 u32 value, mask = 0x3; /* UECC/CECC enable */
2625
f9431992 2626
ae7bb7c6 2627 if (!s->nbctl_valid)
f9431992
DT
2628 return;
2629
c9f4f26e 2630 amd64_read_pci_cfg(F3, NBCTL, &value);
f9431992 2631 value &= ~mask;
ae7bb7c6 2632 value |= s->old_nbctl;
f9431992 2633
c9f4f26e 2634 amd64_write_pci_cfg(F3, NBCTL, value);
f9431992 2635
ae7bb7c6
BP
2636 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
2637 if (!s->flags.nb_ecc_prev) {
a97fa68e
BP
2638 amd64_read_pci_cfg(F3, NBCFG, &value);
2639 value &= ~NBCFG_ECC_ENABLE;
2640 amd64_write_pci_cfg(F3, NBCFG, value);
d95cf4de
BP
2641 }
2642
2643 /* restore the NB Enable MCGCTL bit */
2299ef71 2644 if (toggle_ecc_err_reporting(s, nid, OFF))
24f9a7fe 2645 amd64_warn("Error restoring NB MCGCTL settings!\n");
f9431992
DT
2646}
2647
2648/*
2299ef71
BP
2649 * EDAC requires that the BIOS have ECC enabled before
2650 * taking over the processing of ECC errors. A command line
2651 * option allows to force-enable hardware ECC later in
2652 * enable_ecc_error_reporting().
f9431992 2653 */
cab4d277
BP
2654static const char *ecc_msg =
2655 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2656 " Either enable ECC checking or force module loading by setting "
2657 "'ecc_enable_override'.\n"
2658 " (Note that use of the override may cause unknown side effects.)\n";
be3468e8 2659
c7e5301a 2660static bool ecc_enabled(struct pci_dev *F3, u16 nid)
f9431992
DT
2661{
2662 u32 value;
2299ef71 2663 u8 ecc_en = 0;
06724535 2664 bool nb_mce_en = false;
f9431992 2665
a97fa68e 2666 amd64_read_pci_cfg(F3, NBCFG, &value);
f9431992 2667
a97fa68e 2668 ecc_en = !!(value & NBCFG_ECC_ENABLE);
2299ef71 2669 amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
f9431992 2670
d1ea71cd 2671 nb_mce_en = nb_mce_bank_enabled_on_node(nid);
06724535 2672 if (!nb_mce_en)
2299ef71
BP
2673 amd64_notice("NB MCE bank disabled, set MSR "
2674 "0x%08x[4] on node %d to enable.\n",
2675 MSR_IA32_MCG_CTL, nid);
f9431992 2676
2299ef71
BP
2677 if (!ecc_en || !nb_mce_en) {
2678 amd64_notice("%s", ecc_msg);
2679 return false;
2680 }
2681 return true;
f9431992
DT
2682}
2683
df71a053
BP
2684static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
2685 struct amd64_family_type *fam)
7d6034d3
DT
2686{
2687 struct amd64_pvt *pvt = mci->pvt_info;
2688
2689 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
2690 mci->edac_ctl_cap = EDAC_FLAG_NONE;
7d6034d3 2691
5980bb9c 2692 if (pvt->nbcap & NBCAP_SECDED)
7d6034d3
DT
2693 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
2694
5980bb9c 2695 if (pvt->nbcap & NBCAP_CHIPKILL)
7d6034d3
DT
2696 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2697
d1ea71cd 2698 mci->edac_cap = determine_edac_cap(pvt);
7d6034d3
DT
2699 mci->mod_name = EDAC_MOD_STR;
2700 mci->mod_ver = EDAC_AMD64_VERSION;
df71a053 2701 mci->ctl_name = fam->ctl_name;
8d5b5d9c 2702 mci->dev_name = pci_name(pvt->F2);
7d6034d3
DT
2703 mci->ctl_page_to_phys = NULL;
2704
7d6034d3 2705 /* memory scrubber interface */
d1ea71cd
BP
2706 mci->set_sdram_scrub_rate = set_scrub_rate;
2707 mci->get_sdram_scrub_rate = get_scrub_rate;
7d6034d3
DT
2708}
2709
0092b20d
BP
2710/*
2711 * returns a pointer to the family descriptor on success, NULL otherwise.
2712 */
d1ea71cd 2713static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
395ae783 2714{
0092b20d
BP
2715 struct amd64_family_type *fam_type = NULL;
2716
18b94f66 2717 pvt->ext_model = boot_cpu_data.x86_model >> 4;
a4b4bedc 2718 pvt->stepping = boot_cpu_data.x86_mask;
18b94f66
AG
2719 pvt->model = boot_cpu_data.x86_model;
2720 pvt->fam = boot_cpu_data.x86;
2721
2722 switch (pvt->fam) {
395ae783 2723 case 0xf:
d1ea71cd
BP
2724 fam_type = &family_types[K8_CPUS];
2725 pvt->ops = &family_types[K8_CPUS].ops;
395ae783 2726 break;
df71a053 2727
395ae783 2728 case 0x10:
d1ea71cd
BP
2729 fam_type = &family_types[F10_CPUS];
2730 pvt->ops = &family_types[F10_CPUS].ops;
df71a053
BP
2731 break;
2732
2733 case 0x15:
18b94f66 2734 if (pvt->model == 0x30) {
d1ea71cd
BP
2735 fam_type = &family_types[F15_M30H_CPUS];
2736 pvt->ops = &family_types[F15_M30H_CPUS].ops;
18b94f66 2737 break;
a597d2a5
AG
2738 } else if (pvt->model == 0x60) {
2739 fam_type = &family_types[F15_M60H_CPUS];
2740 pvt->ops = &family_types[F15_M60H_CPUS].ops;
2741 break;
18b94f66
AG
2742 }
2743
d1ea71cd
BP
2744 fam_type = &family_types[F15_CPUS];
2745 pvt->ops = &family_types[F15_CPUS].ops;
395ae783
BP
2746 break;
2747
94c1acf2 2748 case 0x16:
85a8885b
AG
2749 if (pvt->model == 0x30) {
2750 fam_type = &family_types[F16_M30H_CPUS];
2751 pvt->ops = &family_types[F16_M30H_CPUS].ops;
2752 break;
2753 }
d1ea71cd
BP
2754 fam_type = &family_types[F16_CPUS];
2755 pvt->ops = &family_types[F16_CPUS].ops;
94c1acf2
AG
2756 break;
2757
395ae783 2758 default:
24f9a7fe 2759 amd64_err("Unsupported family!\n");
0092b20d 2760 return NULL;
395ae783 2761 }
0092b20d 2762
df71a053 2763 amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
18b94f66 2764 (pvt->fam == 0xf ?
24f9a7fe
BP
2765 (pvt->ext_model >= K8_REV_F ? "revF or later "
2766 : "revE or earlier ")
2767 : ""), pvt->mc_node_id);
0092b20d 2768 return fam_type;
395ae783
BP
2769}
2770
e339f1ec
TI
2771static const struct attribute_group *amd64_edac_attr_groups[] = {
2772#ifdef CONFIG_EDAC_DEBUG
2773 &amd64_edac_dbg_group,
2774#endif
2775#ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
2776 &amd64_edac_inj_group,
2777#endif
2778 NULL
2779};
2780
d1ea71cd 2781static int init_one_instance(struct pci_dev *F2)
7d6034d3
DT
2782{
2783 struct amd64_pvt *pvt = NULL;
0092b20d 2784 struct amd64_family_type *fam_type = NULL;
360b7f3c 2785 struct mem_ctl_info *mci = NULL;
ab5a503c 2786 struct edac_mc_layer layers[2];
7d6034d3 2787 int err = 0, ret;
1a6775c1 2788 u16 nid = amd_pci_dev_to_node_id(F2);
7d6034d3
DT
2789
2790 ret = -ENOMEM;
2791 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
2792 if (!pvt)
360b7f3c 2793 goto err_ret;
7d6034d3 2794
360b7f3c 2795 pvt->mc_node_id = nid;
8d5b5d9c 2796 pvt->F2 = F2;
7d6034d3 2797
395ae783 2798 ret = -EINVAL;
d1ea71cd 2799 fam_type = per_family_init(pvt);
0092b20d 2800 if (!fam_type)
395ae783
BP
2801 goto err_free;
2802
7d6034d3 2803 ret = -ENODEV;
360b7f3c 2804 err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f3_id);
7d6034d3
DT
2805 if (err)
2806 goto err_free;
2807
360b7f3c 2808 read_mc_regs(pvt);
7d6034d3 2809
7d6034d3
DT
2810 /*
2811 * We need to determine how many memory channels there are. Then use
2812 * that information for calculating the size of the dynamic instance
360b7f3c 2813 * tables in the 'mci' structure.
7d6034d3 2814 */
360b7f3c 2815 ret = -EINVAL;
7d6034d3
DT
2816 pvt->channel_count = pvt->ops->early_channel_count(pvt);
2817 if (pvt->channel_count < 0)
360b7f3c 2818 goto err_siblings;
7d6034d3
DT
2819
2820 ret = -ENOMEM;
ab5a503c
MCC
2821 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
2822 layers[0].size = pvt->csels[0].b_cnt;
2823 layers[0].is_virt_csrow = true;
2824 layers[1].type = EDAC_MC_LAYER_CHANNEL;
f0a56c48
BP
2825
2826 /*
2827 * Always allocate two channels since we can have setups with DIMMs on
2828 * only one channel. Also, this simplifies handling later for the price
2829 * of a couple of KBs tops.
2830 */
2831 layers[1].size = 2;
ab5a503c 2832 layers[1].is_virt_csrow = false;
f0a56c48 2833
ca0907b9 2834 mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
7d6034d3 2835 if (!mci)
360b7f3c 2836 goto err_siblings;
7d6034d3
DT
2837
2838 mci->pvt_info = pvt;
fd687502 2839 mci->pdev = &pvt->F2->dev;
7d6034d3 2840
df71a053 2841 setup_mci_misc_attrs(mci, fam_type);
360b7f3c
BP
2842
2843 if (init_csrows(mci))
7d6034d3
DT
2844 mci->edac_cap = EDAC_FLAG_NONE;
2845
7d6034d3 2846 ret = -ENODEV;
e339f1ec 2847 if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) {
956b9ba1 2848 edac_dbg(1, "failed edac_mc_add_mc()\n");
7d6034d3
DT
2849 goto err_add_mc;
2850 }
2851
549d042d
BP
2852 /* register stuff with EDAC MCE */
2853 if (report_gart_errors)
2854 amd_report_gart_errors(true);
2855
df781d03 2856 amd_register_ecc_decoder(decode_bus_error);
549d042d 2857
360b7f3c
BP
2858 atomic_inc(&drv_instances);
2859
7d6034d3
DT
2860 return 0;
2861
2862err_add_mc:
2863 edac_mc_free(mci);
2864
360b7f3c
BP
2865err_siblings:
2866 free_mc_sibling_devs(pvt);
7d6034d3 2867
360b7f3c
BP
2868err_free:
2869 kfree(pvt);
7d6034d3 2870
360b7f3c 2871err_ret:
7d6034d3
DT
2872 return ret;
2873}
2874
d1ea71cd
BP
2875static int probe_one_instance(struct pci_dev *pdev,
2876 const struct pci_device_id *mc_type)
7d6034d3 2877{
1a6775c1 2878 u16 nid = amd_pci_dev_to_node_id(pdev);
2299ef71 2879 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
ae7bb7c6 2880 struct ecc_settings *s;
2299ef71 2881 int ret = 0;
7d6034d3 2882
7d6034d3 2883 ret = pci_enable_device(pdev);
b8cfa02f 2884 if (ret < 0) {
956b9ba1 2885 edac_dbg(0, "ret=%d\n", ret);
b8cfa02f
BP
2886 return -EIO;
2887 }
7d6034d3 2888
ae7bb7c6
BP
2889 ret = -ENOMEM;
2890 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
2891 if (!s)
2299ef71 2892 goto err_out;
ae7bb7c6
BP
2893
2894 ecc_stngs[nid] = s;
2895
2299ef71
BP
2896 if (!ecc_enabled(F3, nid)) {
2897 ret = -ENODEV;
2898
2899 if (!ecc_enable_override)
2900 goto err_enable;
2901
2902 amd64_warn("Forcing ECC on!\n");
2903
2904 if (!enable_ecc_error_reporting(s, nid, F3))
2905 goto err_enable;
2906 }
2907
d1ea71cd 2908 ret = init_one_instance(pdev);
360b7f3c 2909 if (ret < 0) {
ae7bb7c6 2910 amd64_err("Error probing instance: %d\n", nid);
360b7f3c
BP
2911 restore_ecc_error_reporting(s, nid, F3);
2912 }
7d6034d3
DT
2913
2914 return ret;
2299ef71
BP
2915
2916err_enable:
2917 kfree(s);
2918 ecc_stngs[nid] = NULL;
2919
2920err_out:
2921 return ret;
7d6034d3
DT
2922}
2923
d1ea71cd 2924static void remove_one_instance(struct pci_dev *pdev)
7d6034d3
DT
2925{
2926 struct mem_ctl_info *mci;
2927 struct amd64_pvt *pvt;
1a6775c1 2928 u16 nid = amd_pci_dev_to_node_id(pdev);
360b7f3c
BP
2929 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2930 struct ecc_settings *s = ecc_stngs[nid];
7d6034d3 2931
c5608759 2932 mci = find_mci_by_dev(&pdev->dev);
a4b4bedc
BP
2933 WARN_ON(!mci);
2934
7d6034d3
DT
2935 /* Remove from EDAC CORE tracking list */
2936 mci = edac_mc_del_mc(&pdev->dev);
2937 if (!mci)
2938 return;
2939
2940 pvt = mci->pvt_info;
2941
360b7f3c 2942 restore_ecc_error_reporting(s, nid, F3);
7d6034d3 2943
360b7f3c 2944 free_mc_sibling_devs(pvt);
7d6034d3 2945
549d042d
BP
2946 /* unregister from EDAC MCE */
2947 amd_report_gart_errors(false);
df781d03 2948 amd_unregister_ecc_decoder(decode_bus_error);
549d042d 2949
360b7f3c
BP
2950 kfree(ecc_stngs[nid]);
2951 ecc_stngs[nid] = NULL;
ae7bb7c6 2952
7d6034d3 2953 /* Free the EDAC CORE resources */
8f68ed97 2954 mci->pvt_info = NULL;
8f68ed97
BP
2955
2956 kfree(pvt);
7d6034d3
DT
2957 edac_mc_free(mci);
2958}
2959
2960/*
2961 * This table is part of the interface for loading drivers for PCI devices. The
2962 * PCI core identifies what devices are on a system during boot, and then
2963 * inquiry this table to see if this driver is for a given device found.
2964 */
ba935f40 2965static const struct pci_device_id amd64_pci_table[] = {
a597d2a5
AG
2966 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_K8_NB_MEMCTL) },
2967 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_DRAM) },
2968 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F2) },
2969 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F2) },
2970 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F2) },
2971 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F2) },
2972 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F2) },
7d6034d3
DT
2973 {0, }
2974};
2975MODULE_DEVICE_TABLE(pci, amd64_pci_table);
2976
2977static struct pci_driver amd64_pci_driver = {
2978 .name = EDAC_MOD_STR,
d1ea71cd
BP
2979 .probe = probe_one_instance,
2980 .remove = remove_one_instance,
7d6034d3 2981 .id_table = amd64_pci_table,
735c0f8f 2982 .driver.probe_type = PROBE_FORCE_SYNCHRONOUS,
7d6034d3
DT
2983};
2984
360b7f3c 2985static void setup_pci_device(void)
7d6034d3
DT
2986{
2987 struct mem_ctl_info *mci;
2988 struct amd64_pvt *pvt;
2989
d1ea71cd 2990 if (pci_ctl)
7d6034d3
DT
2991 return;
2992
2ec591ac 2993 mci = edac_mc_find(0);
d1ea71cd
BP
2994 if (!mci)
2995 return;
7d6034d3 2996
d1ea71cd
BP
2997 pvt = mci->pvt_info;
2998 pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
2999 if (!pci_ctl) {
3000 pr_warn("%s(): Unable to create PCI control\n", __func__);
3001 pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
7d6034d3
DT
3002 }
3003}
3004
3005static int __init amd64_edac_init(void)
3006{
360b7f3c 3007 int err = -ENODEV;
7d6034d3 3008
df71a053 3009 printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
7d6034d3
DT
3010
3011 opstate_init();
3012
9653a5c7 3013 if (amd_cache_northbridges() < 0)
56b34b91 3014 goto err_ret;
7d6034d3 3015
cc4d8860 3016 err = -ENOMEM;
ae7bb7c6 3017 ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
2ec591ac 3018 if (!ecc_stngs)
a9f0fbe2 3019 goto err_free;
cc4d8860 3020
50542251 3021 msrs = msrs_alloc();
56b34b91 3022 if (!msrs)
360b7f3c 3023 goto err_free;
50542251 3024
7d6034d3
DT
3025 err = pci_register_driver(&amd64_pci_driver);
3026 if (err)
56b34b91 3027 goto err_pci;
7d6034d3 3028
56b34b91 3029 err = -ENODEV;
360b7f3c
BP
3030 if (!atomic_read(&drv_instances))
3031 goto err_no_instances;
7d6034d3 3032
360b7f3c 3033 setup_pci_device();
f5b10c45
TP
3034
3035#ifdef CONFIG_X86_32
3036 amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
3037#endif
3038
360b7f3c 3039 return 0;
7d6034d3 3040
360b7f3c 3041err_no_instances:
7d6034d3 3042 pci_unregister_driver(&amd64_pci_driver);
cc4d8860 3043
56b34b91
BP
3044err_pci:
3045 msrs_free(msrs);
3046 msrs = NULL;
cc4d8860 3047
360b7f3c 3048err_free:
360b7f3c
BP
3049 kfree(ecc_stngs);
3050 ecc_stngs = NULL;
3051
56b34b91 3052err_ret:
7d6034d3
DT
3053 return err;
3054}
3055
3056static void __exit amd64_edac_exit(void)
3057{
d1ea71cd
BP
3058 if (pci_ctl)
3059 edac_pci_release_generic_ctl(pci_ctl);
7d6034d3
DT
3060
3061 pci_unregister_driver(&amd64_pci_driver);
50542251 3062
ae7bb7c6
BP
3063 kfree(ecc_stngs);
3064 ecc_stngs = NULL;
3065
50542251
BP
3066 msrs_free(msrs);
3067 msrs = NULL;
7d6034d3
DT
3068}
3069
3070module_init(amd64_edac_init);
3071module_exit(amd64_edac_exit);
3072
3073MODULE_LICENSE("GPL");
3074MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
3075 "Dave Peterson, Thayne Harbaugh");
3076MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
3077 EDAC_AMD64_VERSION);
3078
3079module_param(edac_op_state, int, 0444);
3080MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");