Merge tag 'selinux-pr-20181129' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / drivers / edac / amd64_edac.c
CommitLineData
2bc65418 1#include "amd64_edac.h"
23ac4ae8 2#include <asm/amd_nb.h>
2bc65418 3
d1ea71cd 4static struct edac_pci_ctl_info *pci_ctl;
2bc65418
DT
5
6static int report_gart_errors;
7module_param(report_gart_errors, int, 0644);
8
9/*
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver.
12 */
13static int ecc_enable_override;
14module_param(ecc_enable_override, int, 0644);
15
a29d8b8e 16static struct msr __percpu *msrs;
50542251 17
2ec591ac 18/* Per-node stuff */
ae7bb7c6 19static struct ecc_settings **ecc_stngs;
2bc65418 20
b70ef010
BP
21/*
22 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
23 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
24 * or higher value'.
25 *
26 *FIXME: Produce a better mapping/linearisation.
27 */
c7e5301a 28static const struct scrubrate {
39094443
BP
29 u32 scrubval; /* bit pattern for scrub rate */
30 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
31} scrubrates[] = {
b70ef010
BP
32 { 0x01, 1600000000UL},
33 { 0x02, 800000000UL},
34 { 0x03, 400000000UL},
35 { 0x04, 200000000UL},
36 { 0x05, 100000000UL},
37 { 0x06, 50000000UL},
38 { 0x07, 25000000UL},
39 { 0x08, 12284069UL},
40 { 0x09, 6274509UL},
41 { 0x0A, 3121951UL},
42 { 0x0B, 1560975UL},
43 { 0x0C, 781440UL},
44 { 0x0D, 390720UL},
45 { 0x0E, 195300UL},
46 { 0x0F, 97650UL},
47 { 0x10, 48854UL},
48 { 0x11, 24427UL},
49 { 0x12, 12213UL},
50 { 0x13, 6101UL},
51 { 0x14, 3051UL},
52 { 0x15, 1523UL},
53 { 0x16, 761UL},
54 { 0x00, 0UL}, /* scrubbing off */
55};
56
66fed2d4
BP
57int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
58 u32 *val, const char *func)
b2b0c605
BP
59{
60 int err = 0;
61
62 err = pci_read_config_dword(pdev, offset, val);
63 if (err)
64 amd64_warn("%s: error reading F%dx%03x.\n",
65 func, PCI_FUNC(pdev->devfn), offset);
66
67 return err;
68}
69
70int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
71 u32 val, const char *func)
72{
73 int err = 0;
74
75 err = pci_write_config_dword(pdev, offset, val);
76 if (err)
77 amd64_warn("%s: error writing to F%dx%03x.\n",
78 func, PCI_FUNC(pdev->devfn), offset);
79
80 return err;
81}
82
7981a28f
AG
83/*
84 * Select DCT to which PCI cfg accesses are routed
85 */
86static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
87{
88 u32 reg = 0;
89
90 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
91 reg &= (pvt->model == 0x30) ? ~3 : ~1;
92 reg |= dct;
93 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
94}
95
b2b0c605
BP
96/*
97 *
98 * Depending on the family, F2 DCT reads need special handling:
99 *
7981a28f 100 * K8: has a single DCT only and no address offsets >= 0x100
b2b0c605
BP
101 *
102 * F10h: each DCT has its own set of regs
103 * DCT0 -> F2x040..
104 * DCT1 -> F2x140..
105 *
94c1acf2 106 * F16h: has only 1 DCT
7981a28f
AG
107 *
108 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
b2b0c605 109 */
7981a28f
AG
110static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
111 int offset, u32 *val)
b2b0c605 112{
7981a28f
AG
113 switch (pvt->fam) {
114 case 0xf:
115 if (dct || offset >= 0x100)
116 return -EINVAL;
117 break;
b2b0c605 118
7981a28f
AG
119 case 0x10:
120 if (dct) {
121 /*
122 * Note: If ganging is enabled, barring the regs
123 * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx
124 * return 0. (cf. Section 2.8.1 F10h BKDG)
125 */
126 if (dct_ganging_enabled(pvt))
127 return 0;
b2b0c605 128
7981a28f
AG
129 offset += 0x100;
130 }
131 break;
73ba8593 132
7981a28f
AG
133 case 0x15:
134 /*
135 * F15h: F2x1xx addresses do not map explicitly to DCT1.
136 * We should select which DCT we access using F1x10C[DctCfgSel]
137 */
138 dct = (dct && pvt->model == 0x30) ? 3 : dct;
139 f15h_select_dct(pvt, dct);
140 break;
73ba8593 141
7981a28f
AG
142 case 0x16:
143 if (dct)
144 return -EINVAL;
145 break;
b2b0c605 146
7981a28f
AG
147 default:
148 break;
b2b0c605 149 }
7981a28f 150 return amd64_read_pci_cfg(pvt->F2, offset, val);
b2b0c605
BP
151}
152
2bc65418
DT
153/*
154 * Memory scrubber control interface. For K8, memory scrubbing is handled by
155 * hardware and can involve L2 cache, dcache as well as the main memory. With
156 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
157 * functionality.
158 *
159 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
160 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
161 * bytes/sec for the setting.
162 *
163 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
164 * other archs, we might not have access to the caches directly.
165 */
166
8051c0af
YG
167static inline void __f17h_set_scrubval(struct amd64_pvt *pvt, u32 scrubval)
168{
169 /*
170 * Fam17h supports scrub values between 0x5 and 0x14. Also, the values
171 * are shifted down by 0x5, so scrubval 0x5 is written to the register
172 * as 0x0, scrubval 0x6 as 0x1, etc.
173 */
174 if (scrubval >= 0x5 && scrubval <= 0x14) {
175 scrubval -= 0x5;
176 pci_write_bits32(pvt->F6, F17H_SCR_LIMIT_ADDR, scrubval, 0xF);
177 pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 1, 0x1);
178 } else {
179 pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 0, 0x1);
180 }
181}
2bc65418 182/*
8051c0af 183 * Scan the scrub rate mapping table for a close or matching bandwidth value to
2bc65418
DT
184 * issue. If requested is too big, then use last maximum value found.
185 */
da92110d 186static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
2bc65418
DT
187{
188 u32 scrubval;
189 int i;
190
191 /*
192 * map the configured rate (new_bw) to a value specific to the AMD64
193 * memory controller and apply to register. Search for the first
194 * bandwidth entry that is greater or equal than the setting requested
195 * and program that. If at last entry, turn off DRAM scrubbing.
168bfeef
AM
196 *
197 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
198 * by falling back to the last element in scrubrates[].
2bc65418 199 */
168bfeef 200 for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
2bc65418
DT
201 /*
202 * skip scrub rates which aren't recommended
203 * (see F10 BKDG, F3x58)
204 */
395ae783 205 if (scrubrates[i].scrubval < min_rate)
2bc65418
DT
206 continue;
207
208 if (scrubrates[i].bandwidth <= new_bw)
209 break;
2bc65418
DT
210 }
211
212 scrubval = scrubrates[i].scrubval;
2bc65418 213
c4a3e946 214 if (pvt->fam == 0x17 || pvt->fam == 0x18) {
8051c0af
YG
215 __f17h_set_scrubval(pvt, scrubval);
216 } else if (pvt->fam == 0x15 && pvt->model == 0x60) {
da92110d
AG
217 f15h_select_dct(pvt, 0);
218 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
219 f15h_select_dct(pvt, 1);
220 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
221 } else {
222 pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F);
223 }
2bc65418 224
39094443
BP
225 if (scrubval)
226 return scrubrates[i].bandwidth;
227
2bc65418
DT
228 return 0;
229}
230
d1ea71cd 231static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
2bc65418
DT
232{
233 struct amd64_pvt *pvt = mci->pvt_info;
87b3e0e6 234 u32 min_scrubrate = 0x5;
2bc65418 235
a4b4bedc 236 if (pvt->fam == 0xf)
87b3e0e6
BP
237 min_scrubrate = 0x0;
238
da92110d
AG
239 if (pvt->fam == 0x15) {
240 /* Erratum #505 */
241 if (pvt->model < 0x10)
242 f15h_select_dct(pvt, 0);
73ba8593 243
da92110d
AG
244 if (pvt->model == 0x60)
245 min_scrubrate = 0x6;
246 }
247 return __set_scrub_rate(pvt, bw, min_scrubrate);
2bc65418
DT
248}
249
d1ea71cd 250static int get_scrub_rate(struct mem_ctl_info *mci)
2bc65418
DT
251{
252 struct amd64_pvt *pvt = mci->pvt_info;
39094443 253 int i, retval = -EINVAL;
8051c0af 254 u32 scrubval = 0;
2bc65418 255
8051c0af
YG
256 switch (pvt->fam) {
257 case 0x15:
da92110d
AG
258 /* Erratum #505 */
259 if (pvt->model < 0x10)
260 f15h_select_dct(pvt, 0);
73ba8593 261
da92110d
AG
262 if (pvt->model == 0x60)
263 amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
8051c0af
YG
264 break;
265
266 case 0x17:
c4a3e946 267 case 0x18:
8051c0af
YG
268 amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
269 if (scrubval & BIT(0)) {
270 amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
271 scrubval &= 0xF;
272 scrubval += 0x5;
273 } else {
274 scrubval = 0;
275 }
276 break;
277
278 default:
da92110d 279 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
8051c0af
YG
280 break;
281 }
2bc65418
DT
282
283 scrubval = scrubval & 0x001F;
284
926311fd 285 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
2bc65418 286 if (scrubrates[i].scrubval == scrubval) {
39094443 287 retval = scrubrates[i].bandwidth;
2bc65418
DT
288 break;
289 }
290 }
39094443 291 return retval;
2bc65418
DT
292}
293
6775763a 294/*
7f19bf75
BP
295 * returns true if the SysAddr given by sys_addr matches the
296 * DRAM base/limit associated with node_id
6775763a 297 */
d1ea71cd 298static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
6775763a 299{
7f19bf75 300 u64 addr;
6775763a
DT
301
302 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
303 * all ones if the most significant implemented address bit is 1.
304 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
305 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
306 * Application Programming.
307 */
308 addr = sys_addr & 0x000000ffffffffffull;
309
7f19bf75
BP
310 return ((addr >= get_dram_base(pvt, nid)) &&
311 (addr <= get_dram_limit(pvt, nid)));
6775763a
DT
312}
313
314/*
315 * Attempt to map a SysAddr to a node. On success, return a pointer to the
316 * mem_ctl_info structure for the node that the SysAddr maps to.
317 *
318 * On failure, return NULL.
319 */
320static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
321 u64 sys_addr)
322{
323 struct amd64_pvt *pvt;
c7e5301a 324 u8 node_id;
6775763a
DT
325 u32 intlv_en, bits;
326
327 /*
328 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
329 * 3.4.4.2) registers to map the SysAddr to a node ID.
330 */
331 pvt = mci->pvt_info;
332
333 /*
334 * The value of this field should be the same for all DRAM Base
335 * registers. Therefore we arbitrarily choose to read it from the
336 * register for node 0.
337 */
7f19bf75 338 intlv_en = dram_intlv_en(pvt, 0);
6775763a
DT
339
340 if (intlv_en == 0) {
7f19bf75 341 for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
d1ea71cd 342 if (base_limit_match(pvt, sys_addr, node_id))
8edc5445 343 goto found;
6775763a 344 }
8edc5445 345 goto err_no_match;
6775763a
DT
346 }
347
72f158fe
BP
348 if (unlikely((intlv_en != 0x01) &&
349 (intlv_en != 0x03) &&
350 (intlv_en != 0x07))) {
24f9a7fe 351 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
6775763a
DT
352 return NULL;
353 }
354
355 bits = (((u32) sys_addr) >> 12) & intlv_en;
356
357 for (node_id = 0; ; ) {
7f19bf75 358 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
6775763a
DT
359 break; /* intlv_sel field matches */
360
7f19bf75 361 if (++node_id >= DRAM_RANGES)
6775763a
DT
362 goto err_no_match;
363 }
364
365 /* sanity test for sys_addr */
d1ea71cd 366 if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
24f9a7fe
BP
367 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
368 "range for node %d with node interleaving enabled.\n",
369 __func__, sys_addr, node_id);
6775763a
DT
370 return NULL;
371 }
372
373found:
b487c33e 374 return edac_mc_find((int)node_id);
6775763a
DT
375
376err_no_match:
956b9ba1
JP
377 edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
378 (unsigned long)sys_addr);
6775763a
DT
379
380 return NULL;
381}
e2ce7255
DT
382
383/*
11c75ead
BP
384 * compute the CS base address of the @csrow on the DRAM controller @dct.
385 * For details see F2x[5C:40] in the processor's BKDG
e2ce7255 386 */
11c75ead
BP
387static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
388 u64 *base, u64 *mask)
e2ce7255 389{
11c75ead
BP
390 u64 csbase, csmask, base_bits, mask_bits;
391 u8 addr_shift;
e2ce7255 392
18b94f66 393 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
11c75ead
BP
394 csbase = pvt->csels[dct].csbases[csrow];
395 csmask = pvt->csels[dct].csmasks[csrow];
10ef6b0d
CG
396 base_bits = GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
397 mask_bits = GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
11c75ead 398 addr_shift = 4;
94c1acf2
AG
399
400 /*
18b94f66
AG
401 * F16h and F15h, models 30h and later need two addr_shift values:
402 * 8 for high and 6 for low (cf. F16h BKDG).
403 */
404 } else if (pvt->fam == 0x16 ||
405 (pvt->fam == 0x15 && pvt->model >= 0x30)) {
94c1acf2
AG
406 csbase = pvt->csels[dct].csbases[csrow];
407 csmask = pvt->csels[dct].csmasks[csrow >> 1];
408
10ef6b0d
CG
409 *base = (csbase & GENMASK_ULL(15, 5)) << 6;
410 *base |= (csbase & GENMASK_ULL(30, 19)) << 8;
94c1acf2
AG
411
412 *mask = ~0ULL;
413 /* poke holes for the csmask */
10ef6b0d
CG
414 *mask &= ~((GENMASK_ULL(15, 5) << 6) |
415 (GENMASK_ULL(30, 19) << 8));
94c1acf2 416
10ef6b0d
CG
417 *mask |= (csmask & GENMASK_ULL(15, 5)) << 6;
418 *mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
94c1acf2
AG
419
420 return;
11c75ead
BP
421 } else {
422 csbase = pvt->csels[dct].csbases[csrow];
423 csmask = pvt->csels[dct].csmasks[csrow >> 1];
424 addr_shift = 8;
e2ce7255 425
a4b4bedc 426 if (pvt->fam == 0x15)
10ef6b0d
CG
427 base_bits = mask_bits =
428 GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
11c75ead 429 else
10ef6b0d
CG
430 base_bits = mask_bits =
431 GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
11c75ead 432 }
e2ce7255 433
11c75ead 434 *base = (csbase & base_bits) << addr_shift;
e2ce7255 435
11c75ead
BP
436 *mask = ~0ULL;
437 /* poke holes for the csmask */
438 *mask &= ~(mask_bits << addr_shift);
439 /* OR them in */
440 *mask |= (csmask & mask_bits) << addr_shift;
e2ce7255
DT
441}
442
11c75ead
BP
443#define for_each_chip_select(i, dct, pvt) \
444 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
445
614ec9d8
BP
446#define chip_select_base(i, dct, pvt) \
447 pvt->csels[dct].csbases[i]
448
11c75ead
BP
449#define for_each_chip_select_mask(i, dct, pvt) \
450 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
451
e2ce7255
DT
452/*
453 * @input_addr is an InputAddr associated with the node given by mci. Return the
454 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
455 */
456static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
457{
458 struct amd64_pvt *pvt;
459 int csrow;
460 u64 base, mask;
461
462 pvt = mci->pvt_info;
463
11c75ead
BP
464 for_each_chip_select(csrow, 0, pvt) {
465 if (!csrow_enabled(csrow, 0, pvt))
e2ce7255
DT
466 continue;
467
11c75ead
BP
468 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
469
470 mask = ~mask;
e2ce7255
DT
471
472 if ((input_addr & mask) == (base & mask)) {
956b9ba1
JP
473 edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
474 (unsigned long)input_addr, csrow,
475 pvt->mc_node_id);
e2ce7255
DT
476
477 return csrow;
478 }
479 }
956b9ba1
JP
480 edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
481 (unsigned long)input_addr, pvt->mc_node_id);
e2ce7255
DT
482
483 return -1;
484}
485
e2ce7255
DT
486/*
487 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
488 * for the node represented by mci. Info is passed back in *hole_base,
489 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
490 * info is invalid. Info may be invalid for either of the following reasons:
491 *
492 * - The revision of the node is not E or greater. In this case, the DRAM Hole
493 * Address Register does not exist.
494 *
495 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
496 * indicating that its contents are not valid.
497 *
498 * The values passed back in *hole_base, *hole_offset, and *hole_size are
499 * complete 32-bit values despite the fact that the bitfields in the DHAR
500 * only represent bits 31-24 of the base and offset values.
501 */
502int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
503 u64 *hole_offset, u64 *hole_size)
504{
505 struct amd64_pvt *pvt = mci->pvt_info;
e2ce7255
DT
506
507 /* only revE and later have the DRAM Hole Address Register */
a4b4bedc 508 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
956b9ba1
JP
509 edac_dbg(1, " revision %d for node %d does not support DHAR\n",
510 pvt->ext_model, pvt->mc_node_id);
e2ce7255
DT
511 return 1;
512 }
513
bc21fa57 514 /* valid for Fam10h and above */
a4b4bedc 515 if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
956b9ba1 516 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
e2ce7255
DT
517 return 1;
518 }
519
c8e518d5 520 if (!dhar_valid(pvt)) {
956b9ba1
JP
521 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n",
522 pvt->mc_node_id);
e2ce7255
DT
523 return 1;
524 }
525
526 /* This node has Memory Hoisting */
527
528 /* +------------------+--------------------+--------------------+-----
529 * | memory | DRAM hole | relocated |
530 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
531 * | | | DRAM hole |
532 * | | | [0x100000000, |
533 * | | | (0x100000000+ |
534 * | | | (0xffffffff-x))] |
535 * +------------------+--------------------+--------------------+-----
536 *
537 * Above is a diagram of physical memory showing the DRAM hole and the
538 * relocated addresses from the DRAM hole. As shown, the DRAM hole
539 * starts at address x (the base address) and extends through address
540 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
541 * addresses in the hole so that they start at 0x100000000.
542 */
543
1f31677e
BP
544 *hole_base = dhar_base(pvt);
545 *hole_size = (1ULL << 32) - *hole_base;
e2ce7255 546
a4b4bedc
BP
547 *hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
548 : k8_dhar_offset(pvt);
e2ce7255 549
956b9ba1
JP
550 edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
551 pvt->mc_node_id, (unsigned long)*hole_base,
552 (unsigned long)*hole_offset, (unsigned long)*hole_size);
e2ce7255
DT
553
554 return 0;
555}
556EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
557
93c2df58
DT
558/*
559 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
560 * assumed that sys_addr maps to the node given by mci.
561 *
562 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
563 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
564 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
565 * then it is also involved in translating a SysAddr to a DramAddr. Sections
566 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
567 * These parts of the documentation are unclear. I interpret them as follows:
568 *
569 * When node n receives a SysAddr, it processes the SysAddr as follows:
570 *
571 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
572 * Limit registers for node n. If the SysAddr is not within the range
573 * specified by the base and limit values, then node n ignores the Sysaddr
574 * (since it does not map to node n). Otherwise continue to step 2 below.
575 *
576 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
577 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
578 * the range of relocated addresses (starting at 0x100000000) from the DRAM
579 * hole. If not, skip to step 3 below. Else get the value of the
580 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
581 * offset defined by this value from the SysAddr.
582 *
583 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
584 * Base register for node n. To obtain the DramAddr, subtract the base
585 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
586 */
587static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
588{
7f19bf75 589 struct amd64_pvt *pvt = mci->pvt_info;
93c2df58 590 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
1f31677e 591 int ret;
93c2df58 592
7f19bf75 593 dram_base = get_dram_base(pvt, pvt->mc_node_id);
93c2df58
DT
594
595 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
596 &hole_size);
597 if (!ret) {
1f31677e
BP
598 if ((sys_addr >= (1ULL << 32)) &&
599 (sys_addr < ((1ULL << 32) + hole_size))) {
93c2df58
DT
600 /* use DHAR to translate SysAddr to DramAddr */
601 dram_addr = sys_addr - hole_offset;
602
956b9ba1
JP
603 edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
604 (unsigned long)sys_addr,
605 (unsigned long)dram_addr);
93c2df58
DT
606
607 return dram_addr;
608 }
609 }
610
611 /*
612 * Translate the SysAddr to a DramAddr as shown near the start of
613 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
614 * only deals with 40-bit values. Therefore we discard bits 63-40 of
615 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
616 * discard are all 1s. Otherwise the bits we discard are all 0s. See
617 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
618 * Programmer's Manual Volume 1 Application Programming.
619 */
10ef6b0d 620 dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
93c2df58 621
956b9ba1
JP
622 edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
623 (unsigned long)sys_addr, (unsigned long)dram_addr);
93c2df58
DT
624 return dram_addr;
625}
626
627/*
628 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
629 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
630 * for node interleaving.
631 */
632static int num_node_interleave_bits(unsigned intlv_en)
633{
634 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
635 int n;
636
637 BUG_ON(intlv_en > 7);
638 n = intlv_shift_table[intlv_en];
639 return n;
640}
641
642/* Translate the DramAddr given by @dram_addr to an InputAddr. */
643static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
644{
645 struct amd64_pvt *pvt;
646 int intlv_shift;
647 u64 input_addr;
648
649 pvt = mci->pvt_info;
650
651 /*
652 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
653 * concerning translating a DramAddr to an InputAddr.
654 */
7f19bf75 655 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
10ef6b0d 656 input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
f678b8cc 657 (dram_addr & 0xfff);
93c2df58 658
956b9ba1
JP
659 edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
660 intlv_shift, (unsigned long)dram_addr,
661 (unsigned long)input_addr);
93c2df58
DT
662
663 return input_addr;
664}
665
666/*
667 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
668 * assumed that @sys_addr maps to the node given by mci.
669 */
670static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
671{
672 u64 input_addr;
673
674 input_addr =
675 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
676
c19ca6cb 677 edac_dbg(2, "SysAddr 0x%lx translates to InputAddr 0x%lx\n",
956b9ba1 678 (unsigned long)sys_addr, (unsigned long)input_addr);
93c2df58
DT
679
680 return input_addr;
681}
682
93c2df58
DT
683/* Map the Error address to a PAGE and PAGE OFFSET. */
684static inline void error_address_to_page_and_offset(u64 error_address,
33ca0643 685 struct err_info *err)
93c2df58 686{
33ca0643
BP
687 err->page = (u32) (error_address >> PAGE_SHIFT);
688 err->offset = ((u32) error_address) & ~PAGE_MASK;
93c2df58
DT
689}
690
691/*
692 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
693 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
694 * of a node that detected an ECC memory error. mci represents the node that
695 * the error address maps to (possibly different from the node that detected
696 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
697 * error.
698 */
699static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
700{
701 int csrow;
702
703 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
704
705 if (csrow == -1)
24f9a7fe
BP
706 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
707 "address 0x%lx\n", (unsigned long)sys_addr);
93c2df58
DT
708 return csrow;
709}
e2ce7255 710
bfc04aec 711static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
2da11654 712
2da11654
DT
713/*
714 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
715 * are ECC capable.
716 */
d1ea71cd 717static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
2da11654 718{
1f6189ed 719 unsigned long edac_cap = EDAC_FLAG_NONE;
d27f3a34
YG
720 u8 bit;
721
722 if (pvt->umc) {
723 u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
2da11654 724
d27f3a34
YG
725 for (i = 0; i < NUM_UMCS; i++) {
726 if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
727 continue;
2da11654 728
d27f3a34
YG
729 umc_en_mask |= BIT(i);
730
731 /* UMC Configuration bit 12 (DimmEccEn) */
732 if (pvt->umc[i].umc_cfg & BIT(12))
733 dimm_ecc_en_mask |= BIT(i);
734 }
735
736 if (umc_en_mask == dimm_ecc_en_mask)
737 edac_cap = EDAC_FLAG_SECDED;
738 } else {
739 bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
740 ? 19
741 : 17;
742
743 if (pvt->dclr0 & BIT(bit))
744 edac_cap = EDAC_FLAG_SECDED;
745 }
2da11654
DT
746
747 return edac_cap;
748}
749
d1ea71cd 750static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
2da11654 751
d1ea71cd 752static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
68798e17 753{
956b9ba1 754 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
68798e17 755
a597d2a5
AG
756 if (pvt->dram_type == MEM_LRDDR3) {
757 u32 dcsm = pvt->csels[chan].csmasks[0];
758 /*
759 * It's assumed all LRDIMMs in a DCT are going to be of
760 * same 'type' until proven otherwise. So, use a cs
761 * value of '0' here to get dcsm value.
762 */
763 edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
764 }
765
766 edac_dbg(1, "All DIMMs support ECC:%s\n",
767 (dclr & BIT(19)) ? "yes" : "no");
768
68798e17 769
956b9ba1
JP
770 edac_dbg(1, " PAR/ERR parity: %s\n",
771 (dclr & BIT(8)) ? "enabled" : "disabled");
68798e17 772
a4b4bedc 773 if (pvt->fam == 0x10)
956b9ba1
JP
774 edac_dbg(1, " DCT 128bit mode width: %s\n",
775 (dclr & BIT(11)) ? "128b" : "64b");
68798e17 776
956b9ba1
JP
777 edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
778 (dclr & BIT(12)) ? "yes" : "no",
779 (dclr & BIT(13)) ? "yes" : "no",
780 (dclr & BIT(14)) ? "yes" : "no",
781 (dclr & BIT(15)) ? "yes" : "no");
68798e17
BP
782}
783
07ed82ef
YG
784static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
785{
eb77e6b8 786 int dimm, size0, size1, cs0, cs1;
07ed82ef
YG
787
788 edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
789
790 for (dimm = 0; dimm < 4; dimm++) {
791 size0 = 0;
eb77e6b8 792 cs0 = dimm * 2;
07ed82ef 793
eb77e6b8
YG
794 if (csrow_enabled(cs0, ctrl, pvt))
795 size0 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs0);
07ed82ef
YG
796
797 size1 = 0;
eb77e6b8
YG
798 cs1 = dimm * 2 + 1;
799
800 if (csrow_enabled(cs1, ctrl, pvt))
801 size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs1);
07ed82ef
YG
802
803 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
eb77e6b8
YG
804 cs0, size0,
805 cs1, size1);
07ed82ef
YG
806 }
807}
808
809static void __dump_misc_regs_df(struct amd64_pvt *pvt)
810{
811 struct amd64_umc *umc;
812 u32 i, tmp, umc_base;
813
814 for (i = 0; i < NUM_UMCS; i++) {
815 umc_base = get_umc_base(i);
816 umc = &pvt->umc[i];
817
818 edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg);
819 edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
820 edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
821 edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
822
823 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp);
824 edac_dbg(1, "UMC%d ECC bad symbol: 0x%x\n", i, tmp);
825
826 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp);
827 edac_dbg(1, "UMC%d UMC cap: 0x%x\n", i, tmp);
828 edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
829
830 edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
831 i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no",
832 (umc->umc_cap_hi & BIT(31)) ? "yes" : "no");
833 edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n",
834 i, (umc->umc_cfg & BIT(12)) ? "yes" : "no");
835 edac_dbg(1, "UMC%d x4 DIMMs present: %s\n",
836 i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no");
837 edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
838 i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
839
840 if (pvt->dram_type == MEM_LRDDR4) {
841 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ADDR_CFG, &tmp);
842 edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
843 i, 1 << ((tmp >> 4) & 0x3));
844 }
845
846 debug_display_dimm_sizes_df(pvt, i);
847 }
848
849 edac_dbg(1, "F0x104 (DRAM Hole Address): 0x%08x, base: 0x%08x\n",
850 pvt->dhar, dhar_base(pvt));
851}
852
2da11654 853/* Display and decode various NB registers for debug purposes. */
07ed82ef 854static void __dump_misc_regs(struct amd64_pvt *pvt)
2da11654 855{
956b9ba1 856 edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
68798e17 857
956b9ba1
JP
858 edac_dbg(1, " NB two channel DRAM capable: %s\n",
859 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
2da11654 860
956b9ba1
JP
861 edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
862 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
863 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
68798e17 864
d1ea71cd 865 debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
2da11654 866
956b9ba1 867 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
2da11654 868
956b9ba1
JP
869 edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
870 pvt->dhar, dhar_base(pvt),
a4b4bedc
BP
871 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
872 : f10_dhar_offset(pvt));
2da11654 873
d1ea71cd 874 debug_display_dimm_sizes(pvt, 0);
4d796364 875
8de1d91e 876 /* everything below this point is Fam10h and above */
a4b4bedc 877 if (pvt->fam == 0xf)
2da11654 878 return;
4d796364 879
d1ea71cd 880 debug_display_dimm_sizes(pvt, 1);
2da11654 881
8de1d91e 882 /* Only if NOT ganged does dclr1 have valid info */
68798e17 883 if (!dct_ganging_enabled(pvt))
d1ea71cd 884 debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
2da11654
DT
885}
886
07ed82ef
YG
887/* Display and decode various NB registers for debug purposes. */
888static void dump_misc_regs(struct amd64_pvt *pvt)
889{
890 if (pvt->umc)
891 __dump_misc_regs_df(pvt);
892 else
893 __dump_misc_regs(pvt);
894
895 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
896
897 amd64_info("using %s syndromes.\n",
898 ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
899}
900
94be4bff 901/*
18b94f66 902 * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
94be4bff 903 */
11c75ead 904static void prep_chip_selects(struct amd64_pvt *pvt)
94be4bff 905{
18b94f66 906 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
11c75ead
BP
907 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
908 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
a597d2a5 909 } else if (pvt->fam == 0x15 && pvt->model == 0x30) {
18b94f66
AG
910 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
911 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
9d858bb1 912 } else {
11c75ead
BP
913 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
914 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
94be4bff
DT
915 }
916}
917
918/*
11c75ead 919 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
94be4bff 920 */
b2b0c605 921static void read_dct_base_mask(struct amd64_pvt *pvt)
94be4bff 922{
b64ce7cd 923 int base_reg0, base_reg1, mask_reg0, mask_reg1, cs;
94be4bff 924
11c75ead 925 prep_chip_selects(pvt);
94be4bff 926
b64ce7cd
YG
927 if (pvt->umc) {
928 base_reg0 = get_umc_base(0) + UMCCH_BASE_ADDR;
929 base_reg1 = get_umc_base(1) + UMCCH_BASE_ADDR;
930 mask_reg0 = get_umc_base(0) + UMCCH_ADDR_MASK;
931 mask_reg1 = get_umc_base(1) + UMCCH_ADDR_MASK;
932 } else {
933 base_reg0 = DCSB0;
934 base_reg1 = DCSB1;
935 mask_reg0 = DCSM0;
936 mask_reg1 = DCSM1;
937 }
938
11c75ead 939 for_each_chip_select(cs, 0, pvt) {
b64ce7cd
YG
940 int reg0 = base_reg0 + (cs * 4);
941 int reg1 = base_reg1 + (cs * 4);
11c75ead
BP
942 u32 *base0 = &pvt->csels[0].csbases[cs];
943 u32 *base1 = &pvt->csels[1].csbases[cs];
b2b0c605 944
b64ce7cd
YG
945 if (pvt->umc) {
946 if (!amd_smn_read(pvt->mc_node_id, reg0, base0))
947 edac_dbg(0, " DCSB0[%d]=0x%08x reg: 0x%x\n",
948 cs, *base0, reg0);
949
950 if (!amd_smn_read(pvt->mc_node_id, reg1, base1))
951 edac_dbg(0, " DCSB1[%d]=0x%08x reg: 0x%x\n",
952 cs, *base1, reg1);
953 } else {
954 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
955 edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
956 cs, *base0, reg0);
957
958 if (pvt->fam == 0xf)
959 continue;
960
961 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
962 edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
963 cs, *base1, (pvt->fam == 0x10) ? reg1
7981a28f 964 : reg0);
b64ce7cd 965 }
94be4bff
DT
966 }
967
11c75ead 968 for_each_chip_select_mask(cs, 0, pvt) {
b64ce7cd
YG
969 int reg0 = mask_reg0 + (cs * 4);
970 int reg1 = mask_reg1 + (cs * 4);
11c75ead
BP
971 u32 *mask0 = &pvt->csels[0].csmasks[cs];
972 u32 *mask1 = &pvt->csels[1].csmasks[cs];
b2b0c605 973
b64ce7cd
YG
974 if (pvt->umc) {
975 if (!amd_smn_read(pvt->mc_node_id, reg0, mask0))
976 edac_dbg(0, " DCSM0[%d]=0x%08x reg: 0x%x\n",
977 cs, *mask0, reg0);
978
979 if (!amd_smn_read(pvt->mc_node_id, reg1, mask1))
980 edac_dbg(0, " DCSM1[%d]=0x%08x reg: 0x%x\n",
981 cs, *mask1, reg1);
982 } else {
983 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
984 edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
985 cs, *mask0, reg0);
986
987 if (pvt->fam == 0xf)
988 continue;
989
990 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
991 edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
992 cs, *mask1, (pvt->fam == 0x10) ? reg1
7981a28f 993 : reg0);
b64ce7cd 994 }
94be4bff
DT
995 }
996}
997
a597d2a5 998static void determine_memory_type(struct amd64_pvt *pvt)
94be4bff 999{
a597d2a5 1000 u32 dram_ctrl, dcsm;
94be4bff 1001
a597d2a5
AG
1002 switch (pvt->fam) {
1003 case 0xf:
1004 if (pvt->ext_model >= K8_REV_F)
1005 goto ddr3;
1006
1007 pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
1008 return;
1009
1010 case 0x10:
6b4c0bde 1011 if (pvt->dchr0 & DDR3_MODE)
a597d2a5
AG
1012 goto ddr3;
1013
1014 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
1015 return;
1016
1017 case 0x15:
1018 if (pvt->model < 0x60)
1019 goto ddr3;
1020
1021 /*
1022 * Model 0x60h needs special handling:
1023 *
1024 * We use a Chip Select value of '0' to obtain dcsm.
1025 * Theoretically, it is possible to populate LRDIMMs of different
1026 * 'Rank' value on a DCT. But this is not the common case. So,
1027 * it's reasonable to assume all DIMMs are going to be of same
1028 * 'type' until proven otherwise.
1029 */
1030 amd64_read_dct_pci_cfg(pvt, 0, DRAM_CONTROL, &dram_ctrl);
1031 dcsm = pvt->csels[0].csmasks[0];
1032
1033 if (((dram_ctrl >> 8) & 0x7) == 0x2)
1034 pvt->dram_type = MEM_DDR4;
1035 else if (pvt->dclr0 & BIT(16))
1036 pvt->dram_type = MEM_DDR3;
1037 else if (dcsm & 0x3)
1038 pvt->dram_type = MEM_LRDDR3;
6b4c0bde 1039 else
a597d2a5 1040 pvt->dram_type = MEM_RDDR3;
94be4bff 1041
a597d2a5
AG
1042 return;
1043
1044 case 0x16:
1045 goto ddr3;
1046
b64ce7cd 1047 case 0x17:
c4a3e946 1048 case 0x18:
b64ce7cd
YG
1049 if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
1050 pvt->dram_type = MEM_LRDDR4;
1051 else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
1052 pvt->dram_type = MEM_RDDR4;
1053 else
1054 pvt->dram_type = MEM_DDR4;
1055 return;
1056
a597d2a5
AG
1057 default:
1058 WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
1059 pvt->dram_type = MEM_EMPTY;
1060 }
1061 return;
94be4bff 1062
a597d2a5
AG
1063ddr3:
1064 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
94be4bff
DT
1065}
1066
cb328507 1067/* Get the number of DCT channels the memory controller is using. */
ddff876d
DT
1068static int k8_early_channel_count(struct amd64_pvt *pvt)
1069{
cb328507 1070 int flag;
ddff876d 1071
9f56da0e 1072 if (pvt->ext_model >= K8_REV_F)
ddff876d 1073 /* RevF (NPT) and later */
41d8bfab 1074 flag = pvt->dclr0 & WIDTH_128;
9f56da0e 1075 else
ddff876d
DT
1076 /* RevE and earlier */
1077 flag = pvt->dclr0 & REVE_WIDTH_128;
ddff876d
DT
1078
1079 /* not used */
1080 pvt->dclr1 = 0;
1081
1082 return (flag) ? 2 : 1;
1083}
1084
70046624 1085/* On F10h and later ErrAddr is MC4_ADDR[47:1] */
a4b4bedc 1086static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
ddff876d 1087{
2ec591ac
BP
1088 u16 mce_nid = amd_get_nb_id(m->extcpu);
1089 struct mem_ctl_info *mci;
70046624
BP
1090 u8 start_bit = 1;
1091 u8 end_bit = 47;
2ec591ac
BP
1092 u64 addr;
1093
1094 mci = edac_mc_find(mce_nid);
1095 if (!mci)
1096 return 0;
1097
1098 pvt = mci->pvt_info;
70046624 1099
a4b4bedc 1100 if (pvt->fam == 0xf) {
70046624
BP
1101 start_bit = 3;
1102 end_bit = 39;
1103 }
1104
10ef6b0d 1105 addr = m->addr & GENMASK_ULL(end_bit, start_bit);
c1ae6830
BP
1106
1107 /*
1108 * Erratum 637 workaround
1109 */
a4b4bedc 1110 if (pvt->fam == 0x15) {
c1ae6830
BP
1111 u64 cc6_base, tmp_addr;
1112 u32 tmp;
8b84c8df 1113 u8 intlv_en;
c1ae6830 1114
10ef6b0d 1115 if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
c1ae6830
BP
1116 return addr;
1117
c1ae6830
BP
1118
1119 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
1120 intlv_en = tmp >> 21 & 0x7;
1121
1122 /* add [47:27] + 3 trailing bits */
10ef6b0d 1123 cc6_base = (tmp & GENMASK_ULL(20, 0)) << 3;
c1ae6830
BP
1124
1125 /* reverse and add DramIntlvEn */
1126 cc6_base |= intlv_en ^ 0x7;
1127
1128 /* pin at [47:24] */
1129 cc6_base <<= 24;
1130
1131 if (!intlv_en)
10ef6b0d 1132 return cc6_base | (addr & GENMASK_ULL(23, 0));
c1ae6830
BP
1133
1134 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
1135
1136 /* faster log2 */
10ef6b0d 1137 tmp_addr = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
c1ae6830
BP
1138
1139 /* OR DramIntlvSel into bits [14:12] */
10ef6b0d 1140 tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
c1ae6830
BP
1141
1142 /* add remaining [11:0] bits from original MC4_ADDR */
10ef6b0d 1143 tmp_addr |= addr & GENMASK_ULL(11, 0);
c1ae6830
BP
1144
1145 return cc6_base | tmp_addr;
1146 }
1147
1148 return addr;
ddff876d
DT
1149}
1150
e2c0bffe
DB
1151static struct pci_dev *pci_get_related_function(unsigned int vendor,
1152 unsigned int device,
1153 struct pci_dev *related)
1154{
1155 struct pci_dev *dev = NULL;
1156
1157 while ((dev = pci_get_device(vendor, device, dev))) {
1158 if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
1159 (dev->bus->number == related->bus->number) &&
1160 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1161 break;
1162 }
1163
1164 return dev;
1165}
1166
7f19bf75 1167static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
ddff876d 1168{
e2c0bffe 1169 struct amd_northbridge *nb;
18b94f66
AG
1170 struct pci_dev *f1 = NULL;
1171 unsigned int pci_func;
71d2a32e 1172 int off = range << 3;
e2c0bffe 1173 u32 llim;
ddff876d 1174
7f19bf75
BP
1175 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
1176 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
ddff876d 1177
18b94f66 1178 if (pvt->fam == 0xf)
7f19bf75 1179 return;
ddff876d 1180
7f19bf75
BP
1181 if (!dram_rw(pvt, range))
1182 return;
ddff876d 1183
7f19bf75
BP
1184 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
1185 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
f08e457c 1186
e2c0bffe 1187 /* F15h: factor in CC6 save area by reading dst node's limit reg */
18b94f66 1188 if (pvt->fam != 0x15)
e2c0bffe 1189 return;
f08e457c 1190
e2c0bffe
DB
1191 nb = node_to_amd_nb(dram_dst_node(pvt, range));
1192 if (WARN_ON(!nb))
1193 return;
f08e457c 1194
a597d2a5
AG
1195 if (pvt->model == 0x60)
1196 pci_func = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
1197 else if (pvt->model == 0x30)
1198 pci_func = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
1199 else
1200 pci_func = PCI_DEVICE_ID_AMD_15H_NB_F1;
18b94f66
AG
1201
1202 f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
e2c0bffe
DB
1203 if (WARN_ON(!f1))
1204 return;
f08e457c 1205
e2c0bffe 1206 amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
f08e457c 1207
10ef6b0d 1208 pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
f08e457c 1209
e2c0bffe
DB
1210 /* {[39:27],111b} */
1211 pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
f08e457c 1212
10ef6b0d 1213 pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
f08e457c 1214
e2c0bffe
DB
1215 /* [47:40] */
1216 pvt->ranges[range].lim.hi |= llim >> 13;
1217
1218 pci_dev_put(f1);
ddff876d
DT
1219}
1220
f192c7b1 1221static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
33ca0643 1222 struct err_info *err)
ddff876d 1223{
f192c7b1 1224 struct amd64_pvt *pvt = mci->pvt_info;
ddff876d 1225
33ca0643 1226 error_address_to_page_and_offset(sys_addr, err);
ab5a503c
MCC
1227
1228 /*
1229 * Find out which node the error address belongs to. This may be
1230 * different from the node that detected the error.
1231 */
33ca0643
BP
1232 err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
1233 if (!err->src_mci) {
ab5a503c
MCC
1234 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1235 (unsigned long)sys_addr);
33ca0643 1236 err->err_code = ERR_NODE;
ab5a503c
MCC
1237 return;
1238 }
1239
1240 /* Now map the sys_addr to a CSROW */
33ca0643
BP
1241 err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
1242 if (err->csrow < 0) {
1243 err->err_code = ERR_CSROW;
ab5a503c
MCC
1244 return;
1245 }
1246
ddff876d 1247 /* CHIPKILL enabled */
f192c7b1 1248 if (pvt->nbcfg & NBCFG_CHIPKILL) {
33ca0643
BP
1249 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1250 if (err->channel < 0) {
ddff876d
DT
1251 /*
1252 * Syndrome didn't map, so we don't know which of the
1253 * 2 DIMMs is in error. So we need to ID 'both' of them
1254 * as suspect.
1255 */
33ca0643 1256 amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
ab5a503c 1257 "possible error reporting race\n",
33ca0643
BP
1258 err->syndrome);
1259 err->err_code = ERR_CHANNEL;
ddff876d
DT
1260 return;
1261 }
1262 } else {
1263 /*
1264 * non-chipkill ecc mode
1265 *
1266 * The k8 documentation is unclear about how to determine the
1267 * channel number when using non-chipkill memory. This method
1268 * was obtained from email communication with someone at AMD.
1269 * (Wish the email was placed in this comment - norsk)
1270 */
33ca0643 1271 err->channel = ((sys_addr & BIT(3)) != 0);
ddff876d 1272 }
ddff876d
DT
1273}
1274
41d8bfab 1275static int ddr2_cs_size(unsigned i, bool dct_width)
ddff876d 1276{
41d8bfab 1277 unsigned shift = 0;
ddff876d 1278
41d8bfab
BP
1279 if (i <= 2)
1280 shift = i;
1281 else if (!(i & 0x1))
1282 shift = i >> 1;
1433eb99 1283 else
41d8bfab 1284 shift = (i + 1) >> 1;
ddff876d 1285
41d8bfab
BP
1286 return 128 << (shift + !!dct_width);
1287}
1288
1289static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
a597d2a5 1290 unsigned cs_mode, int cs_mask_nr)
41d8bfab
BP
1291{
1292 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1293
1294 if (pvt->ext_model >= K8_REV_F) {
1295 WARN_ON(cs_mode > 11);
1296 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1297 }
1298 else if (pvt->ext_model >= K8_REV_D) {
11b0a314 1299 unsigned diff;
41d8bfab
BP
1300 WARN_ON(cs_mode > 10);
1301
11b0a314
BP
1302 /*
1303 * the below calculation, besides trying to win an obfuscated C
1304 * contest, maps cs_mode values to DIMM chip select sizes. The
1305 * mappings are:
1306 *
1307 * cs_mode CS size (mb)
1308 * ======= ============
1309 * 0 32
1310 * 1 64
1311 * 2 128
1312 * 3 128
1313 * 4 256
1314 * 5 512
1315 * 6 256
1316 * 7 512
1317 * 8 1024
1318 * 9 1024
1319 * 10 2048
1320 *
1321 * Basically, it calculates a value with which to shift the
1322 * smallest CS size of 32MB.
1323 *
1324 * ddr[23]_cs_size have a similar purpose.
1325 */
1326 diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1327
1328 return 32 << (cs_mode - diff);
41d8bfab
BP
1329 }
1330 else {
1331 WARN_ON(cs_mode > 6);
1332 return 32 << cs_mode;
1333 }
ddff876d
DT
1334}
1335
1afd3c98
DT
1336/*
1337 * Get the number of DCT channels in use.
1338 *
1339 * Return:
1340 * number of Memory Channels in operation
1341 * Pass back:
1342 * contents of the DCL0_LOW register
1343 */
7d20d14d 1344static int f1x_early_channel_count(struct amd64_pvt *pvt)
1afd3c98 1345{
6ba5dcdc 1346 int i, j, channels = 0;
1afd3c98 1347
7d20d14d 1348 /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
a4b4bedc 1349 if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
7d20d14d 1350 return 2;
1afd3c98
DT
1351
1352 /*
d16149e8
BP
1353 * Need to check if in unganged mode: In such, there are 2 channels,
1354 * but they are not in 128 bit mode and thus the above 'dclr0' status
1355 * bit will be OFF.
1afd3c98
DT
1356 *
1357 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1358 * their CSEnable bit on. If so, then SINGLE DIMM case.
1359 */
956b9ba1 1360 edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
ddff876d 1361
1afd3c98
DT
1362 /*
1363 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1364 * is more than just one DIMM present in unganged mode. Need to check
1365 * both controllers since DIMMs can be placed in either one.
1366 */
525a1b20
BP
1367 for (i = 0; i < 2; i++) {
1368 u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1afd3c98 1369
57a30854
WW
1370 for (j = 0; j < 4; j++) {
1371 if (DBAM_DIMM(j, dbam) > 0) {
1372 channels++;
1373 break;
1374 }
1375 }
1afd3c98
DT
1376 }
1377
d16149e8
BP
1378 if (channels > 2)
1379 channels = 2;
1380
24f9a7fe 1381 amd64_info("MCT channel count: %d\n", channels);
1afd3c98
DT
1382
1383 return channels;
1afd3c98
DT
1384}
1385
f1cbbec9
YG
1386static int f17_early_channel_count(struct amd64_pvt *pvt)
1387{
1388 int i, channels = 0;
1389
1390 /* SDP Control bit 31 (SdpInit) is clear for unused UMC channels */
1391 for (i = 0; i < NUM_UMCS; i++)
1392 channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT);
1393
1394 amd64_info("MCT channel count: %d\n", channels);
1395
1396 return channels;
1397}
1398
41d8bfab 1399static int ddr3_cs_size(unsigned i, bool dct_width)
1afd3c98 1400{
41d8bfab
BP
1401 unsigned shift = 0;
1402 int cs_size = 0;
1403
1404 if (i == 0 || i == 3 || i == 4)
1405 cs_size = -1;
1406 else if (i <= 2)
1407 shift = i;
1408 else if (i == 12)
1409 shift = 7;
1410 else if (!(i & 0x1))
1411 shift = i >> 1;
1412 else
1413 shift = (i + 1) >> 1;
1414
1415 if (cs_size != -1)
1416 cs_size = (128 * (1 << !!dct_width)) << shift;
1417
1418 return cs_size;
1419}
1420
a597d2a5
AG
1421static int ddr3_lrdimm_cs_size(unsigned i, unsigned rank_multiply)
1422{
1423 unsigned shift = 0;
1424 int cs_size = 0;
1425
1426 if (i < 4 || i == 6)
1427 cs_size = -1;
1428 else if (i == 12)
1429 shift = 7;
1430 else if (!(i & 0x1))
1431 shift = i >> 1;
1432 else
1433 shift = (i + 1) >> 1;
1434
1435 if (cs_size != -1)
1436 cs_size = rank_multiply * (128 << shift);
1437
1438 return cs_size;
1439}
1440
1441static int ddr4_cs_size(unsigned i)
1442{
1443 int cs_size = 0;
1444
1445 if (i == 0)
1446 cs_size = -1;
1447 else if (i == 1)
1448 cs_size = 1024;
1449 else
1450 /* Min cs_size = 1G */
1451 cs_size = 1024 * (1 << (i >> 1));
1452
1453 return cs_size;
1454}
1455
41d8bfab 1456static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
a597d2a5 1457 unsigned cs_mode, int cs_mask_nr)
41d8bfab
BP
1458{
1459 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1460
1461 WARN_ON(cs_mode > 11);
1433eb99
BP
1462
1463 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
41d8bfab 1464 return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1433eb99 1465 else
41d8bfab
BP
1466 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1467}
1468
1469/*
1470 * F15h supports only 64bit DCT interfaces
1471 */
1472static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
a597d2a5 1473 unsigned cs_mode, int cs_mask_nr)
41d8bfab
BP
1474{
1475 WARN_ON(cs_mode > 12);
1433eb99 1476
41d8bfab 1477 return ddr3_cs_size(cs_mode, false);
1afd3c98
DT
1478}
1479
a597d2a5
AG
1480/* F15h M60h supports DDR4 mapping as well.. */
1481static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1482 unsigned cs_mode, int cs_mask_nr)
1483{
1484 int cs_size;
1485 u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr];
1486
1487 WARN_ON(cs_mode > 12);
1488
1489 if (pvt->dram_type == MEM_DDR4) {
1490 if (cs_mode > 9)
1491 return -1;
1492
1493 cs_size = ddr4_cs_size(cs_mode);
1494 } else if (pvt->dram_type == MEM_LRDDR3) {
1495 unsigned rank_multiply = dcsm & 0xf;
1496
1497 if (rank_multiply == 3)
1498 rank_multiply = 4;
1499 cs_size = ddr3_lrdimm_cs_size(cs_mode, rank_multiply);
1500 } else {
1501 /* Minimum cs size is 512mb for F15hM60h*/
1502 if (cs_mode == 0x1)
1503 return -1;
1504
1505 cs_size = ddr3_cs_size(cs_mode, false);
1506 }
1507
1508 return cs_size;
1509}
1510
94c1acf2 1511/*
18b94f66 1512 * F16h and F15h model 30h have only limited cs_modes.
94c1acf2
AG
1513 */
1514static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
a597d2a5 1515 unsigned cs_mode, int cs_mask_nr)
94c1acf2
AG
1516{
1517 WARN_ON(cs_mode > 12);
1518
1519 if (cs_mode == 6 || cs_mode == 8 ||
1520 cs_mode == 9 || cs_mode == 12)
1521 return -1;
1522 else
1523 return ddr3_cs_size(cs_mode, false);
1524}
1525
f1cbbec9
YG
1526static int f17_base_addr_to_cs_size(struct amd64_pvt *pvt, u8 umc,
1527 unsigned int cs_mode, int csrow_nr)
1528{
1529 u32 base_addr = pvt->csels[umc].csbases[csrow_nr];
1530
1531 /* Each mask is used for every two base addresses. */
1532 u32 addr_mask = pvt->csels[umc].csmasks[csrow_nr >> 1];
1533
1534 /* Register [31:1] = Address [39:9]. Size is in kBs here. */
1535 u32 size = ((addr_mask >> 1) - (base_addr >> 1) + 1) >> 1;
1536
1537 edac_dbg(1, "BaseAddr: 0x%x, AddrMask: 0x%x\n", base_addr, addr_mask);
1538
1539 /* Return size in MBs. */
1540 return size >> 10;
1541}
1542
5a5d2371 1543static void read_dram_ctl_register(struct amd64_pvt *pvt)
6163b5d4 1544{
6163b5d4 1545
a4b4bedc 1546 if (pvt->fam == 0xf)
5a5d2371
BP
1547 return;
1548
7981a28f 1549 if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
956b9ba1
JP
1550 edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1551 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
72381bd5 1552
956b9ba1
JP
1553 edac_dbg(0, " DCTs operate in %s mode\n",
1554 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
72381bd5
BP
1555
1556 if (!dct_ganging_enabled(pvt))
956b9ba1
JP
1557 edac_dbg(0, " Address range split per DCT: %s\n",
1558 (dct_high_range_enabled(pvt) ? "yes" : "no"));
72381bd5 1559
956b9ba1
JP
1560 edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1561 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1562 (dct_memory_cleared(pvt) ? "yes" : "no"));
72381bd5 1563
956b9ba1
JP
1564 edac_dbg(0, " channel interleave: %s, "
1565 "interleave bits selector: 0x%x\n",
1566 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1567 dct_sel_interleave_addr(pvt));
6163b5d4
DT
1568 }
1569
7981a28f 1570 amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
6163b5d4
DT
1571}
1572
18b94f66
AG
1573/*
1574 * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
1575 * 2.10.12 Memory Interleaving Modes).
1576 */
1577static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1578 u8 intlv_en, int num_dcts_intlv,
1579 u32 dct_sel)
1580{
1581 u8 channel = 0;
1582 u8 select;
1583
1584 if (!(intlv_en))
1585 return (u8)(dct_sel);
1586
1587 if (num_dcts_intlv == 2) {
1588 select = (sys_addr >> 8) & 0x3;
1589 channel = select ? 0x3 : 0;
9d0e8d83
AG
1590 } else if (num_dcts_intlv == 4) {
1591 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1592 switch (intlv_addr) {
1593 case 0x4:
1594 channel = (sys_addr >> 8) & 0x3;
1595 break;
1596 case 0x5:
1597 channel = (sys_addr >> 9) & 0x3;
1598 break;
1599 }
1600 }
18b94f66
AG
1601 return channel;
1602}
1603
f71d0a05 1604/*
229a7a11 1605 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
f71d0a05
DT
1606 * Interleaving Modes.
1607 */
b15f0fca 1608static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
229a7a11 1609 bool hi_range_sel, u8 intlv_en)
6163b5d4 1610{
151fa71c 1611 u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
6163b5d4
DT
1612
1613 if (dct_ganging_enabled(pvt))
229a7a11 1614 return 0;
6163b5d4 1615
229a7a11
BP
1616 if (hi_range_sel)
1617 return dct_sel_high;
6163b5d4 1618
229a7a11
BP
1619 /*
1620 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1621 */
1622 if (dct_interleave_enabled(pvt)) {
1623 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1624
1625 /* return DCT select function: 0=DCT0, 1=DCT1 */
1626 if (!intlv_addr)
1627 return sys_addr >> 6 & 1;
1628
1629 if (intlv_addr & 0x2) {
1630 u8 shift = intlv_addr & 0x1 ? 9 : 6;
dc0a50a8 1631 u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) & 1;
229a7a11
BP
1632
1633 return ((sys_addr >> shift) & 1) ^ temp;
1634 }
1635
dc0a50a8
YG
1636 if (intlv_addr & 0x4) {
1637 u8 shift = intlv_addr & 0x1 ? 9 : 8;
1638
1639 return (sys_addr >> shift) & 1;
1640 }
1641
229a7a11
BP
1642 return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1643 }
1644
1645 if (dct_high_range_enabled(pvt))
1646 return ~dct_sel_high & 1;
6163b5d4
DT
1647
1648 return 0;
1649}
1650
c8e518d5 1651/* Convert the sys_addr to the normalized DCT address */
c7e5301a 1652static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
c8e518d5
BP
1653 u64 sys_addr, bool hi_rng,
1654 u32 dct_sel_base_addr)
6163b5d4
DT
1655{
1656 u64 chan_off;
c8e518d5
BP
1657 u64 dram_base = get_dram_base(pvt, range);
1658 u64 hole_off = f10_dhar_offset(pvt);
6f3508f6 1659 u64 dct_sel_base_off = (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
6163b5d4 1660
c8e518d5
BP
1661 if (hi_rng) {
1662 /*
1663 * if
1664 * base address of high range is below 4Gb
1665 * (bits [47:27] at [31:11])
1666 * DRAM address space on this DCT is hoisted above 4Gb &&
1667 * sys_addr > 4Gb
1668 *
1669 * remove hole offset from sys_addr
1670 * else
1671 * remove high range offset from sys_addr
1672 */
1673 if ((!(dct_sel_base_addr >> 16) ||
1674 dct_sel_base_addr < dhar_base(pvt)) &&
972ea17a 1675 dhar_valid(pvt) &&
c8e518d5 1676 (sys_addr >= BIT_64(32)))
bc21fa57 1677 chan_off = hole_off;
6163b5d4
DT
1678 else
1679 chan_off = dct_sel_base_off;
1680 } else {
c8e518d5
BP
1681 /*
1682 * if
1683 * we have a valid hole &&
1684 * sys_addr > 4Gb
1685 *
1686 * remove hole
1687 * else
1688 * remove dram base to normalize to DCT address
1689 */
972ea17a 1690 if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
bc21fa57 1691 chan_off = hole_off;
6163b5d4 1692 else
c8e518d5 1693 chan_off = dram_base;
6163b5d4
DT
1694 }
1695
10ef6b0d 1696 return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
6163b5d4
DT
1697}
1698
6163b5d4
DT
1699/*
1700 * checks if the csrow passed in is marked as SPARED, if so returns the new
1701 * spare row
1702 */
11c75ead 1703static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
6163b5d4 1704{
614ec9d8
BP
1705 int tmp_cs;
1706
1707 if (online_spare_swap_done(pvt, dct) &&
1708 csrow == online_spare_bad_dramcs(pvt, dct)) {
1709
1710 for_each_chip_select(tmp_cs, dct, pvt) {
1711 if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1712 csrow = tmp_cs;
1713 break;
1714 }
1715 }
6163b5d4
DT
1716 }
1717 return csrow;
1718}
1719
1720/*
1721 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1722 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1723 *
1724 * Return:
1725 * -EINVAL: NOT FOUND
1726 * 0..csrow = Chip-Select Row
1727 */
c7e5301a 1728static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
6163b5d4
DT
1729{
1730 struct mem_ctl_info *mci;
1731 struct amd64_pvt *pvt;
11c75ead 1732 u64 cs_base, cs_mask;
6163b5d4
DT
1733 int cs_found = -EINVAL;
1734 int csrow;
1735
2ec591ac 1736 mci = edac_mc_find(nid);
6163b5d4
DT
1737 if (!mci)
1738 return cs_found;
1739
1740 pvt = mci->pvt_info;
1741
956b9ba1 1742 edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
6163b5d4 1743
11c75ead
BP
1744 for_each_chip_select(csrow, dct, pvt) {
1745 if (!csrow_enabled(csrow, dct, pvt))
6163b5d4
DT
1746 continue;
1747
11c75ead 1748 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
6163b5d4 1749
956b9ba1
JP
1750 edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1751 csrow, cs_base, cs_mask);
6163b5d4 1752
11c75ead 1753 cs_mask = ~cs_mask;
6163b5d4 1754
956b9ba1
JP
1755 edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1756 (in_addr & cs_mask), (cs_base & cs_mask));
6163b5d4 1757
11c75ead 1758 if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
18b94f66
AG
1759 if (pvt->fam == 0x15 && pvt->model >= 0x30) {
1760 cs_found = csrow;
1761 break;
1762 }
11c75ead 1763 cs_found = f10_process_possible_spare(pvt, dct, csrow);
6163b5d4 1764
956b9ba1 1765 edac_dbg(1, " MATCH csrow=%d\n", cs_found);
6163b5d4
DT
1766 break;
1767 }
1768 }
1769 return cs_found;
1770}
1771
95b0ef55
BP
1772/*
1773 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1774 * swapped with a region located at the bottom of memory so that the GPU can use
1775 * the interleaved region and thus two channels.
1776 */
b15f0fca 1777static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
95b0ef55
BP
1778{
1779 u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
1780
a4b4bedc 1781 if (pvt->fam == 0x10) {
95b0ef55 1782 /* only revC3 and revE have that feature */
a4b4bedc 1783 if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
95b0ef55
BP
1784 return sys_addr;
1785 }
1786
7981a28f 1787 amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
95b0ef55
BP
1788
1789 if (!(swap_reg & 0x1))
1790 return sys_addr;
1791
1792 swap_base = (swap_reg >> 3) & 0x7f;
1793 swap_limit = (swap_reg >> 11) & 0x7f;
1794 rgn_size = (swap_reg >> 20) & 0x7f;
1795 tmp_addr = sys_addr >> 27;
1796
1797 if (!(sys_addr >> 34) &&
1798 (((tmp_addr >= swap_base) &&
1799 (tmp_addr <= swap_limit)) ||
1800 (tmp_addr < rgn_size)))
1801 return sys_addr ^ (u64)swap_base << 27;
1802
1803 return sys_addr;
1804}
1805
f71d0a05 1806/* For a given @dram_range, check if @sys_addr falls within it. */
e761359a 1807static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
33ca0643 1808 u64 sys_addr, int *chan_sel)
f71d0a05 1809{
229a7a11 1810 int cs_found = -EINVAL;
c8e518d5 1811 u64 chan_addr;
5d4b58e8 1812 u32 dct_sel_base;
11c75ead 1813 u8 channel;
229a7a11 1814 bool high_range = false;
f71d0a05 1815
7f19bf75 1816 u8 node_id = dram_dst_node(pvt, range);
229a7a11 1817 u8 intlv_en = dram_intlv_en(pvt, range);
7f19bf75 1818 u32 intlv_sel = dram_intlv_sel(pvt, range);
f71d0a05 1819
956b9ba1
JP
1820 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1821 range, sys_addr, get_dram_limit(pvt, range));
f71d0a05 1822
355fba60
BP
1823 if (dhar_valid(pvt) &&
1824 dhar_base(pvt) <= sys_addr &&
1825 sys_addr < BIT_64(32)) {
1826 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1827 sys_addr);
1828 return -EINVAL;
1829 }
1830
f030ddfb 1831 if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
f71d0a05
DT
1832 return -EINVAL;
1833
b15f0fca 1834 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
95b0ef55 1835
f71d0a05
DT
1836 dct_sel_base = dct_sel_baseaddr(pvt);
1837
1838 /*
1839 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1840 * select between DCT0 and DCT1.
1841 */
1842 if (dct_high_range_enabled(pvt) &&
1843 !dct_ganging_enabled(pvt) &&
1844 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
229a7a11 1845 high_range = true;
f71d0a05 1846
b15f0fca 1847 channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
f71d0a05 1848
b15f0fca 1849 chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
c8e518d5 1850 high_range, dct_sel_base);
f71d0a05 1851
e2f79dbd
BP
1852 /* Remove node interleaving, see F1x120 */
1853 if (intlv_en)
1854 chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
1855 (chan_addr & 0xfff);
f71d0a05 1856
5d4b58e8 1857 /* remove channel interleave */
f71d0a05
DT
1858 if (dct_interleave_enabled(pvt) &&
1859 !dct_high_range_enabled(pvt) &&
1860 !dct_ganging_enabled(pvt)) {
5d4b58e8
BP
1861
1862 if (dct_sel_interleave_addr(pvt) != 1) {
1863 if (dct_sel_interleave_addr(pvt) == 0x3)
1864 /* hash 9 */
1865 chan_addr = ((chan_addr >> 10) << 9) |
1866 (chan_addr & 0x1ff);
1867 else
1868 /* A[6] or hash 6 */
1869 chan_addr = ((chan_addr >> 7) << 6) |
1870 (chan_addr & 0x3f);
1871 } else
1872 /* A[12] */
1873 chan_addr = ((chan_addr >> 13) << 12) |
1874 (chan_addr & 0xfff);
f71d0a05
DT
1875 }
1876
956b9ba1 1877 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
f71d0a05 1878
b15f0fca 1879 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
f71d0a05 1880
33ca0643 1881 if (cs_found >= 0)
f71d0a05 1882 *chan_sel = channel;
33ca0643 1883
f71d0a05
DT
1884 return cs_found;
1885}
1886
18b94f66
AG
1887static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1888 u64 sys_addr, int *chan_sel)
1889{
1890 int cs_found = -EINVAL;
1891 int num_dcts_intlv = 0;
1892 u64 chan_addr, chan_offset;
1893 u64 dct_base, dct_limit;
1894 u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
1895 u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
1896
1897 u64 dhar_offset = f10_dhar_offset(pvt);
1898 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1899 u8 node_id = dram_dst_node(pvt, range);
1900 u8 intlv_en = dram_intlv_en(pvt, range);
1901
1902 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
1903 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
1904
1905 dct_offset_en = (u8) ((dct_cont_base_reg >> 3) & BIT(0));
1906 dct_sel = (u8) ((dct_cont_base_reg >> 4) & 0x7);
1907
1908 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1909 range, sys_addr, get_dram_limit(pvt, range));
1910
1911 if (!(get_dram_base(pvt, range) <= sys_addr) &&
1912 !(get_dram_limit(pvt, range) >= sys_addr))
1913 return -EINVAL;
1914
1915 if (dhar_valid(pvt) &&
1916 dhar_base(pvt) <= sys_addr &&
1917 sys_addr < BIT_64(32)) {
1918 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1919 sys_addr);
1920 return -EINVAL;
1921 }
1922
1923 /* Verify sys_addr is within DCT Range. */
4fc06b31
AG
1924 dct_base = (u64) dct_sel_baseaddr(pvt);
1925 dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
18b94f66
AG
1926
1927 if (!(dct_cont_base_reg & BIT(0)) &&
4fc06b31
AG
1928 !(dct_base <= (sys_addr >> 27) &&
1929 dct_limit >= (sys_addr >> 27)))
18b94f66
AG
1930 return -EINVAL;
1931
1932 /* Verify number of dct's that participate in channel interleaving. */
1933 num_dcts_intlv = (int) hweight8(intlv_en);
1934
1935 if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
1936 return -EINVAL;
1937
dc0a50a8
YG
1938 if (pvt->model >= 0x60)
1939 channel = f1x_determine_channel(pvt, sys_addr, false, intlv_en);
1940 else
1941 channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
1942 num_dcts_intlv, dct_sel);
18b94f66
AG
1943
1944 /* Verify we stay within the MAX number of channels allowed */
7f3f5240 1945 if (channel > 3)
18b94f66
AG
1946 return -EINVAL;
1947
1948 leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
1949
1950 /* Get normalized DCT addr */
1951 if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
1952 chan_offset = dhar_offset;
1953 else
4fc06b31 1954 chan_offset = dct_base << 27;
18b94f66
AG
1955
1956 chan_addr = sys_addr - chan_offset;
1957
1958 /* remove channel interleave */
1959 if (num_dcts_intlv == 2) {
1960 if (intlv_addr == 0x4)
1961 chan_addr = ((chan_addr >> 9) << 8) |
1962 (chan_addr & 0xff);
1963 else if (intlv_addr == 0x5)
1964 chan_addr = ((chan_addr >> 10) << 9) |
1965 (chan_addr & 0x1ff);
1966 else
1967 return -EINVAL;
1968
1969 } else if (num_dcts_intlv == 4) {
1970 if (intlv_addr == 0x4)
1971 chan_addr = ((chan_addr >> 10) << 8) |
1972 (chan_addr & 0xff);
1973 else if (intlv_addr == 0x5)
1974 chan_addr = ((chan_addr >> 11) << 9) |
1975 (chan_addr & 0x1ff);
1976 else
1977 return -EINVAL;
1978 }
1979
1980 if (dct_offset_en) {
1981 amd64_read_pci_cfg(pvt->F1,
1982 DRAM_CONT_HIGH_OFF + (int) channel * 4,
1983 &tmp);
4fc06b31 1984 chan_addr += (u64) ((tmp >> 11) & 0xfff) << 27;
18b94f66
AG
1985 }
1986
1987 f15h_select_dct(pvt, channel);
1988
1989 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
1990
1991 /*
1992 * Find Chip select:
1993 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
1994 * there is support for 4 DCT's, but only 2 are currently functional.
1995 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
1996 * pvt->csels[1]. So we need to use '1' here to get correct info.
1997 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
1998 */
1999 alias_channel = (channel == 3) ? 1 : channel;
2000
2001 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
2002
2003 if (cs_found >= 0)
2004 *chan_sel = alias_channel;
2005
2006 return cs_found;
2007}
2008
2009static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
2010 u64 sys_addr,
2011 int *chan_sel)
f71d0a05 2012{
e761359a
BP
2013 int cs_found = -EINVAL;
2014 unsigned range;
f71d0a05 2015
7f19bf75 2016 for (range = 0; range < DRAM_RANGES; range++) {
7f19bf75 2017 if (!dram_rw(pvt, range))
f71d0a05
DT
2018 continue;
2019
18b94f66
AG
2020 if (pvt->fam == 0x15 && pvt->model >= 0x30)
2021 cs_found = f15_m30h_match_to_this_node(pvt, range,
2022 sys_addr,
2023 chan_sel);
f71d0a05 2024
18b94f66
AG
2025 else if ((get_dram_base(pvt, range) <= sys_addr) &&
2026 (get_dram_limit(pvt, range) >= sys_addr)) {
b15f0fca 2027 cs_found = f1x_match_to_this_node(pvt, range,
33ca0643 2028 sys_addr, chan_sel);
f71d0a05
DT
2029 if (cs_found >= 0)
2030 break;
2031 }
2032 }
2033 return cs_found;
2034}
2035
2036/*
bdc30a0c
BP
2037 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
2038 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
f71d0a05 2039 *
bdc30a0c
BP
2040 * The @sys_addr is usually an error address received from the hardware
2041 * (MCX_ADDR).
f71d0a05 2042 */
b15f0fca 2043static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
33ca0643 2044 struct err_info *err)
f71d0a05
DT
2045{
2046 struct amd64_pvt *pvt = mci->pvt_info;
f71d0a05 2047
33ca0643 2048 error_address_to_page_and_offset(sys_addr, err);
ab5a503c 2049
33ca0643
BP
2050 err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
2051 if (err->csrow < 0) {
2052 err->err_code = ERR_CSROW;
bdc30a0c
BP
2053 return;
2054 }
2055
bdc30a0c
BP
2056 /*
2057 * We need the syndromes for channel detection only when we're
2058 * ganged. Otherwise @chan should already contain the channel at
2059 * this point.
2060 */
a97fa68e 2061 if (dct_ganging_enabled(pvt))
33ca0643 2062 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
f71d0a05
DT
2063}
2064
f71d0a05 2065/*
8566c4df 2066 * debug routine to display the memory sizes of all logical DIMMs and its
cb328507 2067 * CSROWs
f71d0a05 2068 */
d1ea71cd 2069static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
f71d0a05 2070{
bb89f5a0 2071 int dimm, size0, size1;
525a1b20
BP
2072 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
2073 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
f71d0a05 2074
a4b4bedc 2075 if (pvt->fam == 0xf) {
8566c4df 2076 /* K8 families < revF not supported yet */
1433eb99 2077 if (pvt->ext_model < K8_REV_F)
8566c4df
BP
2078 return;
2079 else
2080 WARN_ON(ctrl != 0);
2081 }
2082
7981a28f
AG
2083 if (pvt->fam == 0x10) {
2084 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
2085 : pvt->dbam0;
2086 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
2087 pvt->csels[1].csbases :
2088 pvt->csels[0].csbases;
2089 } else if (ctrl) {
2090 dbam = pvt->dbam0;
2091 dcsb = pvt->csels[1].csbases;
2092 }
956b9ba1
JP
2093 edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
2094 ctrl, dbam);
f71d0a05 2095
8566c4df
BP
2096 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
2097
f71d0a05
DT
2098 /* Dump memory sizes for DIMM and its CSROWs */
2099 for (dimm = 0; dimm < 4; dimm++) {
2100
2101 size0 = 0;
11c75ead 2102 if (dcsb[dimm*2] & DCSB_CS_ENABLE)
07ed82ef
YG
2103 /*
2104 * For F15m60h, we need multiplier for LRDIMM cs_size
2105 * calculation. We pass dimm value to the dbam_to_cs
a597d2a5
AG
2106 * mapper so we can find the multiplier from the
2107 * corresponding DCSM.
2108 */
41d8bfab 2109 size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
a597d2a5
AG
2110 DBAM_DIMM(dimm, dbam),
2111 dimm);
f71d0a05
DT
2112
2113 size1 = 0;
11c75ead 2114 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
41d8bfab 2115 size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
a597d2a5
AG
2116 DBAM_DIMM(dimm, dbam),
2117 dimm);
f71d0a05 2118
24f9a7fe 2119 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
bb89f5a0
BP
2120 dimm * 2, size0,
2121 dimm * 2 + 1, size1);
f71d0a05
DT
2122 }
2123}
2124
d1ea71cd 2125static struct amd64_family_type family_types[] = {
4d37607a 2126 [K8_CPUS] = {
0092b20d 2127 .ctl_name = "K8",
8d5b5d9c 2128 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
3f37a36b 2129 .f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
4d37607a 2130 .ops = {
1433eb99 2131 .early_channel_count = k8_early_channel_count,
1433eb99
BP
2132 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
2133 .dbam_to_cs = k8_dbam_to_chip_select,
4d37607a
DT
2134 }
2135 },
2136 [F10_CPUS] = {
0092b20d 2137 .ctl_name = "F10h",
8d5b5d9c 2138 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
3f37a36b 2139 .f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
4d37607a 2140 .ops = {
7d20d14d 2141 .early_channel_count = f1x_early_channel_count,
b15f0fca 2142 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1433eb99 2143 .dbam_to_cs = f10_dbam_to_chip_select,
b2b0c605
BP
2144 }
2145 },
2146 [F15_CPUS] = {
2147 .ctl_name = "F15h",
df71a053 2148 .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
3f37a36b 2149 .f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2,
b2b0c605 2150 .ops = {
7d20d14d 2151 .early_channel_count = f1x_early_channel_count,
b15f0fca 2152 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
41d8bfab 2153 .dbam_to_cs = f15_dbam_to_chip_select,
4d37607a
DT
2154 }
2155 },
18b94f66
AG
2156 [F15_M30H_CPUS] = {
2157 .ctl_name = "F15h_M30h",
2158 .f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
3f37a36b 2159 .f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
18b94f66
AG
2160 .ops = {
2161 .early_channel_count = f1x_early_channel_count,
2162 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2163 .dbam_to_cs = f16_dbam_to_chip_select,
18b94f66
AG
2164 }
2165 },
a597d2a5
AG
2166 [F15_M60H_CPUS] = {
2167 .ctl_name = "F15h_M60h",
2168 .f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
3f37a36b 2169 .f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2,
a597d2a5
AG
2170 .ops = {
2171 .early_channel_count = f1x_early_channel_count,
2172 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2173 .dbam_to_cs = f15_m60h_dbam_to_chip_select,
2174 }
2175 },
94c1acf2
AG
2176 [F16_CPUS] = {
2177 .ctl_name = "F16h",
2178 .f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
3f37a36b 2179 .f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2,
94c1acf2
AG
2180 .ops = {
2181 .early_channel_count = f1x_early_channel_count,
2182 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2183 .dbam_to_cs = f16_dbam_to_chip_select,
94c1acf2
AG
2184 }
2185 },
85a8885b
AG
2186 [F16_M30H_CPUS] = {
2187 .ctl_name = "F16h_M30h",
2188 .f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
3f37a36b 2189 .f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
85a8885b
AG
2190 .ops = {
2191 .early_channel_count = f1x_early_channel_count,
2192 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2193 .dbam_to_cs = f16_dbam_to_chip_select,
85a8885b
AG
2194 }
2195 },
f1cbbec9
YG
2196 [F17_CPUS] = {
2197 .ctl_name = "F17h",
2198 .f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0,
2199 .f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
2200 .ops = {
2201 .early_channel_count = f17_early_channel_count,
2202 .dbam_to_cs = f17_base_addr_to_cs_size,
2203 }
2204 },
8960de4a
MJ
2205 [F17_M10H_CPUS] = {
2206 .ctl_name = "F17h_M10h",
2207 .f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0,
2208 .f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6,
2209 .ops = {
2210 .early_channel_count = f17_early_channel_count,
2211 .dbam_to_cs = f17_base_addr_to_cs_size,
2212 }
2213 },
4d37607a
DT
2214};
2215
b1289d6f 2216/*
bfc04aec
BP
2217 * These are tables of eigenvectors (one per line) which can be used for the
2218 * construction of the syndrome tables. The modified syndrome search algorithm
2219 * uses those to find the symbol in error and thus the DIMM.
b1289d6f 2220 *
bfc04aec 2221 * Algorithm courtesy of Ross LaFetra from AMD.
b1289d6f 2222 */
c7e5301a 2223static const u16 x4_vectors[] = {
bfc04aec
BP
2224 0x2f57, 0x1afe, 0x66cc, 0xdd88,
2225 0x11eb, 0x3396, 0x7f4c, 0xeac8,
2226 0x0001, 0x0002, 0x0004, 0x0008,
2227 0x1013, 0x3032, 0x4044, 0x8088,
2228 0x106b, 0x30d6, 0x70fc, 0xe0a8,
2229 0x4857, 0xc4fe, 0x13cc, 0x3288,
2230 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
2231 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
2232 0x15c1, 0x2a42, 0x89ac, 0x4758,
2233 0x2b03, 0x1602, 0x4f0c, 0xca08,
2234 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
2235 0x8ba7, 0x465e, 0x244c, 0x1cc8,
2236 0x2b87, 0x164e, 0x642c, 0xdc18,
2237 0x40b9, 0x80de, 0x1094, 0x20e8,
2238 0x27db, 0x1eb6, 0x9dac, 0x7b58,
2239 0x11c1, 0x2242, 0x84ac, 0x4c58,
2240 0x1be5, 0x2d7a, 0x5e34, 0xa718,
2241 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
2242 0x4c97, 0xc87e, 0x11fc, 0x33a8,
2243 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
2244 0x16b3, 0x3d62, 0x4f34, 0x8518,
2245 0x1e2f, 0x391a, 0x5cac, 0xf858,
2246 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
2247 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
2248 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
2249 0x4397, 0xc27e, 0x17fc, 0x3ea8,
2250 0x1617, 0x3d3e, 0x6464, 0xb8b8,
2251 0x23ff, 0x12aa, 0xab6c, 0x56d8,
2252 0x2dfb, 0x1ba6, 0x913c, 0x7328,
2253 0x185d, 0x2ca6, 0x7914, 0x9e28,
2254 0x171b, 0x3e36, 0x7d7c, 0xebe8,
2255 0x4199, 0x82ee, 0x19f4, 0x2e58,
2256 0x4807, 0xc40e, 0x130c, 0x3208,
2257 0x1905, 0x2e0a, 0x5804, 0xac08,
2258 0x213f, 0x132a, 0xadfc, 0x5ba8,
2259 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
b1289d6f
DT
2260};
2261
c7e5301a 2262static const u16 x8_vectors[] = {
bfc04aec
BP
2263 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
2264 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
2265 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
2266 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
2267 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
2268 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
2269 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
2270 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
2271 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
2272 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
2273 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
2274 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
2275 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
2276 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
2277 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
2278 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
2279 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
2280 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
2281 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
2282};
2283
c7e5301a 2284static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
d34a6ecd 2285 unsigned v_dim)
b1289d6f 2286{
bfc04aec
BP
2287 unsigned int i, err_sym;
2288
2289 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
2290 u16 s = syndrome;
d34a6ecd
BP
2291 unsigned v_idx = err_sym * v_dim;
2292 unsigned v_end = (err_sym + 1) * v_dim;
bfc04aec
BP
2293
2294 /* walk over all 16 bits of the syndrome */
2295 for (i = 1; i < (1U << 16); i <<= 1) {
2296
2297 /* if bit is set in that eigenvector... */
2298 if (v_idx < v_end && vectors[v_idx] & i) {
2299 u16 ev_comp = vectors[v_idx++];
2300
2301 /* ... and bit set in the modified syndrome, */
2302 if (s & i) {
2303 /* remove it. */
2304 s ^= ev_comp;
4d37607a 2305
bfc04aec
BP
2306 if (!s)
2307 return err_sym;
2308 }
b1289d6f 2309
bfc04aec
BP
2310 } else if (s & i)
2311 /* can't get to zero, move to next symbol */
2312 break;
2313 }
b1289d6f
DT
2314 }
2315
956b9ba1 2316 edac_dbg(0, "syndrome(%x) not found\n", syndrome);
b1289d6f
DT
2317 return -1;
2318}
d27bf6fa 2319
bfc04aec
BP
2320static int map_err_sym_to_channel(int err_sym, int sym_size)
2321{
2322 if (sym_size == 4)
2323 switch (err_sym) {
2324 case 0x20:
2325 case 0x21:
2326 return 0;
2327 break;
2328 case 0x22:
2329 case 0x23:
2330 return 1;
2331 break;
2332 default:
2333 return err_sym >> 4;
2334 break;
2335 }
2336 /* x8 symbols */
2337 else
2338 switch (err_sym) {
2339 /* imaginary bits not in a DIMM */
2340 case 0x10:
2341 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
2342 err_sym);
2343 return -1;
2344 break;
2345
2346 case 0x11:
2347 return 0;
2348 break;
2349 case 0x12:
2350 return 1;
2351 break;
2352 default:
2353 return err_sym >> 3;
2354 break;
2355 }
2356 return -1;
2357}
2358
2359static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
2360{
2361 struct amd64_pvt *pvt = mci->pvt_info;
ad6a32e9
BP
2362 int err_sym = -1;
2363
a3b7db09 2364 if (pvt->ecc_sym_sz == 8)
ad6a32e9
BP
2365 err_sym = decode_syndrome(syndrome, x8_vectors,
2366 ARRAY_SIZE(x8_vectors),
a3b7db09
BP
2367 pvt->ecc_sym_sz);
2368 else if (pvt->ecc_sym_sz == 4)
ad6a32e9
BP
2369 err_sym = decode_syndrome(syndrome, x4_vectors,
2370 ARRAY_SIZE(x4_vectors),
a3b7db09 2371 pvt->ecc_sym_sz);
ad6a32e9 2372 else {
a3b7db09 2373 amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
ad6a32e9 2374 return err_sym;
bfc04aec 2375 }
ad6a32e9 2376
a3b7db09 2377 return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
bfc04aec
BP
2378}
2379
e70984d9 2380static void __log_ecc_error(struct mem_ctl_info *mci, struct err_info *err,
33ca0643 2381 u8 ecc_type)
d27bf6fa 2382{
33ca0643
BP
2383 enum hw_event_mc_err_type err_type;
2384 const char *string;
d27bf6fa 2385
33ca0643
BP
2386 if (ecc_type == 2)
2387 err_type = HW_EVENT_ERR_CORRECTED;
2388 else if (ecc_type == 1)
2389 err_type = HW_EVENT_ERR_UNCORRECTED;
d12a969e
YG
2390 else if (ecc_type == 3)
2391 err_type = HW_EVENT_ERR_DEFERRED;
33ca0643
BP
2392 else {
2393 WARN(1, "Something is rotten in the state of Denmark.\n");
d27bf6fa
DT
2394 return;
2395 }
2396
33ca0643
BP
2397 switch (err->err_code) {
2398 case DECODE_OK:
2399 string = "";
2400 break;
2401 case ERR_NODE:
2402 string = "Failed to map error addr to a node";
2403 break;
2404 case ERR_CSROW:
2405 string = "Failed to map error addr to a csrow";
2406 break;
2407 case ERR_CHANNEL:
713ad546
YG
2408 string = "Unknown syndrome - possible error reporting race";
2409 break;
2410 case ERR_SYND:
2411 string = "MCA_SYND not valid - unknown syndrome and csrow";
2412 break;
2413 case ERR_NORM_ADDR:
2414 string = "Cannot decode normalized address";
33ca0643
BP
2415 break;
2416 default:
2417 string = "WTF error";
2418 break;
d27bf6fa 2419 }
33ca0643
BP
2420
2421 edac_mc_handle_error(err_type, mci, 1,
2422 err->page, err->offset, err->syndrome,
2423 err->csrow, err->channel, -1,
2424 string, "");
d27bf6fa
DT
2425}
2426
df781d03 2427static inline void decode_bus_error(int node_id, struct mce *m)
d27bf6fa 2428{
0c510cc8
DB
2429 struct mem_ctl_info *mci;
2430 struct amd64_pvt *pvt;
f192c7b1 2431 u8 ecc_type = (m->status >> 45) & 0x3;
66fed2d4
BP
2432 u8 xec = XEC(m->status, 0x1f);
2433 u16 ec = EC(m->status);
33ca0643
BP
2434 u64 sys_addr;
2435 struct err_info err;
d27bf6fa 2436
0c510cc8
DB
2437 mci = edac_mc_find(node_id);
2438 if (!mci)
2439 return;
2440
2441 pvt = mci->pvt_info;
2442
66fed2d4 2443 /* Bail out early if this was an 'observed' error */
5980bb9c 2444 if (PP(ec) == NBSL_PP_OBS)
b70ef010 2445 return;
d27bf6fa 2446
ecaf5606
BP
2447 /* Do only ECC errors */
2448 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
d27bf6fa 2449 return;
d27bf6fa 2450
33ca0643
BP
2451 memset(&err, 0, sizeof(err));
2452
a4b4bedc 2453 sys_addr = get_error_address(pvt, m);
33ca0643 2454
ecaf5606 2455 if (ecc_type == 2)
33ca0643
BP
2456 err.syndrome = extract_syndrome(m->status);
2457
2458 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
2459
e70984d9 2460 __log_ecc_error(mci, &err, ecc_type);
d27bf6fa
DT
2461}
2462
713ad546
YG
2463/*
2464 * To find the UMC channel represented by this bank we need to match on its
2465 * instance_id. The instance_id of a bank is held in the lower 32 bits of its
2466 * IPID.
2467 */
2468static int find_umc_channel(struct amd64_pvt *pvt, struct mce *m)
2469{
2470 u32 umc_instance_id[] = {0x50f00, 0x150f00};
2471 u32 instance_id = m->ipid & GENMASK(31, 0);
2472 int i, channel = -1;
2473
2474 for (i = 0; i < ARRAY_SIZE(umc_instance_id); i++)
2475 if (umc_instance_id[i] == instance_id)
2476 channel = i;
2477
2478 return channel;
2479}
2480
2481static void decode_umc_error(int node_id, struct mce *m)
2482{
2483 u8 ecc_type = (m->status >> 45) & 0x3;
2484 struct mem_ctl_info *mci;
2485 struct amd64_pvt *pvt;
2486 struct err_info err;
2487 u64 sys_addr;
2488
2489 mci = edac_mc_find(node_id);
2490 if (!mci)
2491 return;
2492
2493 pvt = mci->pvt_info;
2494
2495 memset(&err, 0, sizeof(err));
2496
2497 if (m->status & MCI_STATUS_DEFERRED)
2498 ecc_type = 3;
2499
2500 err.channel = find_umc_channel(pvt, m);
2501 if (err.channel < 0) {
2502 err.err_code = ERR_CHANNEL;
2503 goto log_error;
2504 }
2505
2506 if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
2507 err.err_code = ERR_NORM_ADDR;
2508 goto log_error;
2509 }
2510
2511 error_address_to_page_and_offset(sys_addr, &err);
2512
2513 if (!(m->status & MCI_STATUS_SYNDV)) {
2514 err.err_code = ERR_SYND;
2515 goto log_error;
2516 }
2517
2518 if (ecc_type == 2) {
2519 u8 length = (m->synd >> 18) & 0x3f;
2520
2521 if (length)
2522 err.syndrome = (m->synd >> 32) & GENMASK(length - 1, 0);
2523 else
2524 err.err_code = ERR_CHANNEL;
2525 }
2526
2527 err.csrow = m->synd & 0x7;
2528
2529log_error:
2530 __log_ecc_error(mci, &err, ecc_type);
2531}
2532
0ec449ee 2533/*
3f37a36b
BP
2534 * Use pvt->F3 which contains the F3 CPU PCI device to get the related
2535 * F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
936fc3af 2536 * Reserve F0 and F6 on systems with a UMC.
0ec449ee 2537 */
936fc3af
YG
2538static int
2539reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
2540{
2541 if (pvt->umc) {
2542 pvt->F0 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2543 if (!pvt->F0) {
5246c540 2544 amd64_err("F0 not found, device 0x%x (broken BIOS?)\n", pci_id1);
936fc3af
YG
2545 return -ENODEV;
2546 }
2547
2548 pvt->F6 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2549 if (!pvt->F6) {
2550 pci_dev_put(pvt->F0);
2551 pvt->F0 = NULL;
2552
5246c540 2553 amd64_err("F6 not found: device 0x%x (broken BIOS?)\n", pci_id2);
936fc3af
YG
2554 return -ENODEV;
2555 }
5246c540 2556
936fc3af
YG
2557 edac_dbg(1, "F0: %s\n", pci_name(pvt->F0));
2558 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2559 edac_dbg(1, "F6: %s\n", pci_name(pvt->F6));
2560
2561 return 0;
2562 }
2563
0ec449ee 2564 /* Reserve the ADDRESS MAP Device */
936fc3af 2565 pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
8d5b5d9c 2566 if (!pvt->F1) {
5246c540 2567 amd64_err("F1 not found: device 0x%x (broken BIOS?)\n", pci_id1);
bbd0c1f6 2568 return -ENODEV;
0ec449ee
DT
2569 }
2570
3f37a36b 2571 /* Reserve the DCT Device */
936fc3af 2572 pvt->F2 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
3f37a36b 2573 if (!pvt->F2) {
8d5b5d9c
BP
2574 pci_dev_put(pvt->F1);
2575 pvt->F1 = NULL;
0ec449ee 2576
5246c540
BP
2577 amd64_err("F2 not found: device 0x%x (broken BIOS?)\n", pci_id2);
2578 return -ENODEV;
0ec449ee 2579 }
936fc3af 2580
956b9ba1
JP
2581 edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
2582 edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
2583 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
0ec449ee
DT
2584
2585 return 0;
2586}
2587
360b7f3c 2588static void free_mc_sibling_devs(struct amd64_pvt *pvt)
0ec449ee 2589{
936fc3af
YG
2590 if (pvt->umc) {
2591 pci_dev_put(pvt->F0);
2592 pci_dev_put(pvt->F6);
2593 } else {
2594 pci_dev_put(pvt->F1);
2595 pci_dev_put(pvt->F2);
2596 }
0ec449ee
DT
2597}
2598
b64ce7cd
YG
2599static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
2600{
2601 pvt->ecc_sym_sz = 4;
2602
2603 if (pvt->umc) {
2604 u8 i;
2605
2606 for (i = 0; i < NUM_UMCS; i++) {
2607 /* Check enabled channels only: */
2608 if ((pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) &&
2609 (pvt->umc[i].ecc_ctrl & BIT(7))) {
2610 pvt->ecc_sym_sz = 8;
2611 break;
2612 }
2613 }
2614
2615 return;
2616 }
2617
2618 if (pvt->fam >= 0x10) {
2619 u32 tmp;
2620
2621 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2622 /* F16h has only DCT0, so no need to read dbam1. */
2623 if (pvt->fam != 0x16)
2624 amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
2625
2626 /* F10h, revD and later can do x8 ECC too. */
2627 if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
2628 pvt->ecc_sym_sz = 8;
2629 }
2630}
2631
2632/*
2633 * Retrieve the hardware registers of the memory controller.
2634 */
2635static void __read_mc_regs_df(struct amd64_pvt *pvt)
2636{
2637 u8 nid = pvt->mc_node_id;
2638 struct amd64_umc *umc;
2639 u32 i, umc_base;
2640
2641 /* Read registers from each UMC */
2642 for (i = 0; i < NUM_UMCS; i++) {
2643
2644 umc_base = get_umc_base(i);
2645 umc = &pvt->umc[i];
2646
07ed82ef
YG
2647 amd_smn_read(nid, umc_base + UMCCH_DIMM_CFG, &umc->dimm_cfg);
2648 amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
b64ce7cd
YG
2649 amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
2650 amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
07ed82ef 2651 amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi);
b64ce7cd
YG
2652 }
2653}
2654
0ec449ee
DT
2655/*
2656 * Retrieve the hardware registers of the memory controller (this includes the
2657 * 'Address Map' and 'Misc' device regs)
2658 */
360b7f3c 2659static void read_mc_regs(struct amd64_pvt *pvt)
0ec449ee 2660{
b64ce7cd 2661 unsigned int range;
0ec449ee 2662 u64 msr_val;
0ec449ee
DT
2663
2664 /*
2665 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
b64ce7cd 2666 * those are Read-As-Zero.
0ec449ee 2667 */
e97f8bb8 2668 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
956b9ba1 2669 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
0ec449ee 2670
b64ce7cd 2671 /* Check first whether TOP_MEM2 is enabled: */
0ec449ee 2672 rdmsrl(MSR_K8_SYSCFG, msr_val);
b64ce7cd 2673 if (msr_val & BIT(21)) {
e97f8bb8 2674 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
956b9ba1 2675 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
b64ce7cd 2676 } else {
956b9ba1 2677 edac_dbg(0, " TOP_MEM2 disabled\n");
b64ce7cd
YG
2678 }
2679
2680 if (pvt->umc) {
2681 __read_mc_regs_df(pvt);
2682 amd64_read_pci_cfg(pvt->F0, DF_DHAR, &pvt->dhar);
2683
2684 goto skip;
2685 }
0ec449ee 2686
5980bb9c 2687 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
0ec449ee 2688
5a5d2371 2689 read_dram_ctl_register(pvt);
0ec449ee 2690
7f19bf75
BP
2691 for (range = 0; range < DRAM_RANGES; range++) {
2692 u8 rw;
0ec449ee 2693
7f19bf75
BP
2694 /* read settings for this DRAM range */
2695 read_dram_base_limit_regs(pvt, range);
2696
2697 rw = dram_rw(pvt, range);
2698 if (!rw)
2699 continue;
2700
956b9ba1
JP
2701 edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2702 range,
2703 get_dram_base(pvt, range),
2704 get_dram_limit(pvt, range));
7f19bf75 2705
956b9ba1
JP
2706 edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2707 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2708 (rw & 0x1) ? "R" : "-",
2709 (rw & 0x2) ? "W" : "-",
2710 dram_intlv_sel(pvt, range),
2711 dram_dst_node(pvt, range));
0ec449ee
DT
2712 }
2713
bc21fa57 2714 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
7981a28f 2715 amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
0ec449ee 2716
8d5b5d9c 2717 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
0ec449ee 2718
7981a28f
AG
2719 amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
2720 amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
0ec449ee 2721
78da121e 2722 if (!dct_ganging_enabled(pvt)) {
7981a28f
AG
2723 amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
2724 amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
0ec449ee 2725 }
ad6a32e9 2726
b64ce7cd
YG
2727skip:
2728 read_dct_base_mask(pvt);
2729
a597d2a5
AG
2730 determine_memory_type(pvt);
2731 edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
a3b7db09 2732
b64ce7cd 2733 determine_ecc_sym_sz(pvt);
ad6a32e9 2734
b2b0c605 2735 dump_misc_regs(pvt);
0ec449ee
DT
2736}
2737
2738/*
2739 * NOTE: CPU Revision Dependent code
2740 *
2741 * Input:
11c75ead 2742 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
0ec449ee
DT
2743 * k8 private pointer to -->
2744 * DRAM Bank Address mapping register
2745 * node_id
2746 * DCL register where dual_channel_active is
2747 *
2748 * The DBAM register consists of 4 sets of 4 bits each definitions:
2749 *
2750 * Bits: CSROWs
2751 * 0-3 CSROWs 0 and 1
2752 * 4-7 CSROWs 2 and 3
2753 * 8-11 CSROWs 4 and 5
2754 * 12-15 CSROWs 6 and 7
2755 *
2756 * Values range from: 0 to 15
2757 * The meaning of the values depends on CPU revision and dual-channel state,
2758 * see relevant BKDG more info.
2759 *
2760 * The memory controller provides for total of only 8 CSROWs in its current
2761 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2762 * single channel or two (2) DIMMs in dual channel mode.
2763 *
2764 * The following code logic collapses the various tables for CSROW based on CPU
2765 * revision.
2766 *
2767 * Returns:
2768 * The number of PAGE_SIZE pages on the specified CSROW number it
2769 * encompasses
2770 *
2771 */
eb77e6b8 2772static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
0ec449ee 2773{
f92cae45 2774 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
eb77e6b8
YG
2775 int csrow_nr = csrow_nr_orig;
2776 u32 cs_mode, nr_pages;
0ec449ee 2777
eb77e6b8
YG
2778 if (!pvt->umc)
2779 csrow_nr >>= 1;
10de6497 2780
eb77e6b8 2781 cs_mode = DBAM_DIMM(csrow_nr, dbam);
0ec449ee 2782
eb77e6b8
YG
2783 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
2784 nr_pages <<= 20 - PAGE_SHIFT;
0ec449ee 2785
10de6497 2786 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
eb77e6b8 2787 csrow_nr_orig, dct, cs_mode);
10de6497 2788 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
0ec449ee
DT
2789
2790 return nr_pages;
2791}
2792
2793/*
2794 * Initialize the array of csrow attribute instances, based on the values
2795 * from pci config hardware registers.
2796 */
360b7f3c 2797static int init_csrows(struct mem_ctl_info *mci)
0ec449ee 2798{
10de6497 2799 struct amd64_pvt *pvt = mci->pvt_info;
2d09d8f3 2800 enum edac_type edac_mode = EDAC_NONE;
0ec449ee 2801 struct csrow_info *csrow;
de3910eb 2802 struct dimm_info *dimm;
10de6497 2803 int i, j, empty = 1;
a895bf8b 2804 int nr_pages = 0;
10de6497 2805 u32 val;
0ec449ee 2806
2d09d8f3
YG
2807 if (!pvt->umc) {
2808 amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
0ec449ee 2809
2d09d8f3 2810 pvt->nbcfg = val;
0ec449ee 2811
2d09d8f3
YG
2812 edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2813 pvt->mc_node_id, val,
2814 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
2815 }
0ec449ee 2816
10de6497
BP
2817 /*
2818 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
2819 */
11c75ead 2820 for_each_chip_select(i, 0, pvt) {
10de6497
BP
2821 bool row_dct0 = !!csrow_enabled(i, 0, pvt);
2822 bool row_dct1 = false;
0ec449ee 2823
a4b4bedc 2824 if (pvt->fam != 0xf)
10de6497
BP
2825 row_dct1 = !!csrow_enabled(i, 1, pvt);
2826
2827 if (!row_dct0 && !row_dct1)
0ec449ee 2828 continue;
0ec449ee 2829
10de6497 2830 csrow = mci->csrows[i];
0ec449ee 2831 empty = 0;
10de6497
BP
2832
2833 edac_dbg(1, "MC node: %d, csrow: %d\n",
2834 pvt->mc_node_id, i);
2835
1eef1282 2836 if (row_dct0) {
d1ea71cd 2837 nr_pages = get_csrow_nr_pages(pvt, 0, i);
1eef1282
MCC
2838 csrow->channels[0]->dimm->nr_pages = nr_pages;
2839 }
11c75ead 2840
10de6497 2841 /* K8 has only one DCT */
a4b4bedc 2842 if (pvt->fam != 0xf && row_dct1) {
d1ea71cd 2843 int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
1eef1282
MCC
2844
2845 csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
2846 nr_pages += row_dct1_pages;
2847 }
0ec449ee 2848
10de6497 2849 edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
0ec449ee 2850
2d09d8f3
YG
2851 /* Determine DIMM ECC mode: */
2852 if (pvt->umc) {
2853 if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED)
2854 edac_mode = EDAC_S4ECD4ED;
2855 else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED)
2856 edac_mode = EDAC_SECDED;
2857
2858 } else if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
2859 edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
2860 ? EDAC_S4ECD4ED
2861 : EDAC_SECDED;
2862 }
084a4fcc
MCC
2863
2864 for (j = 0; j < pvt->channel_count; j++) {
de3910eb 2865 dimm = csrow->channels[j]->dimm;
a597d2a5 2866 dimm->mtype = pvt->dram_type;
de3910eb 2867 dimm->edac_mode = edac_mode;
084a4fcc 2868 }
0ec449ee
DT
2869 }
2870
2871 return empty;
2872}
d27bf6fa 2873
f6d6ae96 2874/* get all cores on this DCT */
8b84c8df 2875static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
f6d6ae96
BP
2876{
2877 int cpu;
2878
2879 for_each_online_cpu(cpu)
2880 if (amd_get_nb_id(cpu) == nid)
2881 cpumask_set_cpu(cpu, mask);
2882}
2883
2884/* check MCG_CTL on all the cpus on this node */
d1ea71cd 2885static bool nb_mce_bank_enabled_on_node(u16 nid)
f6d6ae96
BP
2886{
2887 cpumask_var_t mask;
50542251 2888 int cpu, nbe;
f6d6ae96
BP
2889 bool ret = false;
2890
2891 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
24f9a7fe 2892 amd64_warn("%s: Error allocating mask\n", __func__);
f6d6ae96
BP
2893 return false;
2894 }
2895
2896 get_cpus_on_this_dct_cpumask(mask, nid);
2897
f6d6ae96
BP
2898 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2899
2900 for_each_cpu(cpu, mask) {
50542251 2901 struct msr *reg = per_cpu_ptr(msrs, cpu);
5980bb9c 2902 nbe = reg->l & MSR_MCGCTL_NBE;
f6d6ae96 2903
956b9ba1
JP
2904 edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2905 cpu, reg->q,
2906 (nbe ? "enabled" : "disabled"));
f6d6ae96
BP
2907
2908 if (!nbe)
2909 goto out;
f6d6ae96
BP
2910 }
2911 ret = true;
2912
2913out:
f6d6ae96
BP
2914 free_cpumask_var(mask);
2915 return ret;
2916}
2917
c7e5301a 2918static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
f6d6ae96
BP
2919{
2920 cpumask_var_t cmask;
50542251 2921 int cpu;
f6d6ae96
BP
2922
2923 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
24f9a7fe 2924 amd64_warn("%s: error allocating mask\n", __func__);
0de27884 2925 return -ENOMEM;
f6d6ae96
BP
2926 }
2927
ae7bb7c6 2928 get_cpus_on_this_dct_cpumask(cmask, nid);
f6d6ae96 2929
f6d6ae96
BP
2930 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2931
2932 for_each_cpu(cpu, cmask) {
2933
50542251
BP
2934 struct msr *reg = per_cpu_ptr(msrs, cpu);
2935
f6d6ae96 2936 if (on) {
5980bb9c 2937 if (reg->l & MSR_MCGCTL_NBE)
ae7bb7c6 2938 s->flags.nb_mce_enable = 1;
f6d6ae96 2939
5980bb9c 2940 reg->l |= MSR_MCGCTL_NBE;
f6d6ae96
BP
2941 } else {
2942 /*
d95cf4de 2943 * Turn off NB MCE reporting only when it was off before
f6d6ae96 2944 */
ae7bb7c6 2945 if (!s->flags.nb_mce_enable)
5980bb9c 2946 reg->l &= ~MSR_MCGCTL_NBE;
f6d6ae96 2947 }
f6d6ae96
BP
2948 }
2949 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2950
f6d6ae96
BP
2951 free_cpumask_var(cmask);
2952
2953 return 0;
2954}
2955
c7e5301a 2956static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
2299ef71 2957 struct pci_dev *F3)
f9431992 2958{
2299ef71 2959 bool ret = true;
c9f4f26e 2960 u32 value, mask = 0x3; /* UECC/CECC enable */
f9431992 2961
2299ef71
BP
2962 if (toggle_ecc_err_reporting(s, nid, ON)) {
2963 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2964 return false;
2965 }
2966
c9f4f26e 2967 amd64_read_pci_cfg(F3, NBCTL, &value);
f9431992 2968
ae7bb7c6
BP
2969 s->old_nbctl = value & mask;
2970 s->nbctl_valid = true;
f9431992
DT
2971
2972 value |= mask;
c9f4f26e 2973 amd64_write_pci_cfg(F3, NBCTL, value);
f9431992 2974
a97fa68e 2975 amd64_read_pci_cfg(F3, NBCFG, &value);
f9431992 2976
956b9ba1
JP
2977 edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2978 nid, value, !!(value & NBCFG_ECC_ENABLE));
f9431992 2979
a97fa68e 2980 if (!(value & NBCFG_ECC_ENABLE)) {
24f9a7fe 2981 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
f9431992 2982
ae7bb7c6 2983 s->flags.nb_ecc_prev = 0;
d95cf4de 2984
f9431992 2985 /* Attempt to turn on DRAM ECC Enable */
a97fa68e
BP
2986 value |= NBCFG_ECC_ENABLE;
2987 amd64_write_pci_cfg(F3, NBCFG, value);
f9431992 2988
a97fa68e 2989 amd64_read_pci_cfg(F3, NBCFG, &value);
f9431992 2990
a97fa68e 2991 if (!(value & NBCFG_ECC_ENABLE)) {
24f9a7fe
BP
2992 amd64_warn("Hardware rejected DRAM ECC enable,"
2993 "check memory DIMM configuration.\n");
2299ef71 2994 ret = false;
f9431992 2995 } else {
24f9a7fe 2996 amd64_info("Hardware accepted DRAM ECC Enable\n");
f9431992 2997 }
d95cf4de 2998 } else {
ae7bb7c6 2999 s->flags.nb_ecc_prev = 1;
f9431992 3000 }
d95cf4de 3001
956b9ba1
JP
3002 edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3003 nid, value, !!(value & NBCFG_ECC_ENABLE));
f9431992 3004
2299ef71 3005 return ret;
f9431992
DT
3006}
3007
c7e5301a 3008static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
360b7f3c 3009 struct pci_dev *F3)
f9431992 3010{
c9f4f26e
BP
3011 u32 value, mask = 0x3; /* UECC/CECC enable */
3012
ae7bb7c6 3013 if (!s->nbctl_valid)
f9431992
DT
3014 return;
3015
c9f4f26e 3016 amd64_read_pci_cfg(F3, NBCTL, &value);
f9431992 3017 value &= ~mask;
ae7bb7c6 3018 value |= s->old_nbctl;
f9431992 3019
c9f4f26e 3020 amd64_write_pci_cfg(F3, NBCTL, value);
f9431992 3021
ae7bb7c6
BP
3022 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
3023 if (!s->flags.nb_ecc_prev) {
a97fa68e
BP
3024 amd64_read_pci_cfg(F3, NBCFG, &value);
3025 value &= ~NBCFG_ECC_ENABLE;
3026 amd64_write_pci_cfg(F3, NBCFG, value);
d95cf4de
BP
3027 }
3028
3029 /* restore the NB Enable MCGCTL bit */
2299ef71 3030 if (toggle_ecc_err_reporting(s, nid, OFF))
24f9a7fe 3031 amd64_warn("Error restoring NB MCGCTL settings!\n");
f9431992
DT
3032}
3033
3034/*
2299ef71
BP
3035 * EDAC requires that the BIOS have ECC enabled before
3036 * taking over the processing of ECC errors. A command line
3037 * option allows to force-enable hardware ECC later in
3038 * enable_ecc_error_reporting().
f9431992 3039 */
cab4d277
BP
3040static const char *ecc_msg =
3041 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
3042 " Either enable ECC checking or force module loading by setting "
3043 "'ecc_enable_override'.\n"
3044 " (Note that use of the override may cause unknown side effects.)\n";
be3468e8 3045
c7e5301a 3046static bool ecc_enabled(struct pci_dev *F3, u16 nid)
f9431992 3047{
06724535 3048 bool nb_mce_en = false;
196b79fc
YG
3049 u8 ecc_en = 0, i;
3050 u32 value;
f9431992 3051
196b79fc
YG
3052 if (boot_cpu_data.x86 >= 0x17) {
3053 u8 umc_en_mask = 0, ecc_en_mask = 0;
f9431992 3054
196b79fc
YG
3055 for (i = 0; i < NUM_UMCS; i++) {
3056 u32 base = get_umc_base(i);
3057
3058 /* Only check enabled UMCs. */
3059 if (amd_smn_read(nid, base + UMCCH_SDP_CTRL, &value))
3060 continue;
3061
3062 if (!(value & UMC_SDP_INIT))
3063 continue;
3064
3065 umc_en_mask |= BIT(i);
3066
3067 if (amd_smn_read(nid, base + UMCCH_UMC_CAP_HI, &value))
3068 continue;
3069
3070 if (value & UMC_ECC_ENABLED)
3071 ecc_en_mask |= BIT(i);
3072 }
3073
3074 /* Check whether at least one UMC is enabled: */
3075 if (umc_en_mask)
3076 ecc_en = umc_en_mask == ecc_en_mask;
11ab1cae
YG
3077 else
3078 edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
196b79fc
YG
3079
3080 /* Assume UMC MCA banks are enabled. */
3081 nb_mce_en = true;
3082 } else {
3083 amd64_read_pci_cfg(F3, NBCFG, &value);
f9431992 3084
196b79fc
YG
3085 ecc_en = !!(value & NBCFG_ECC_ENABLE);
3086
3087 nb_mce_en = nb_mce_bank_enabled_on_node(nid);
3088 if (!nb_mce_en)
11ab1cae 3089 edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
196b79fc
YG
3090 MSR_IA32_MCG_CTL, nid);
3091 }
3092
11ab1cae
YG
3093 amd64_info("Node %d: DRAM ECC %s.\n",
3094 nid, (ecc_en ? "enabled" : "disabled"));
f9431992 3095
2299ef71 3096 if (!ecc_en || !nb_mce_en) {
11ab1cae 3097 amd64_info("%s", ecc_msg);
2299ef71
BP
3098 return false;
3099 }
3100 return true;
f9431992
DT
3101}
3102
2d09d8f3
YG
3103static inline void
3104f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
3105{
3106 u8 i, ecc_en = 1, cpk_en = 1;
3107
3108 for (i = 0; i < NUM_UMCS; i++) {
3109 if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
3110 ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
3111 cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
3112 }
3113 }
3114
3115 /* Set chipkill only if ECC is enabled: */
3116 if (ecc_en) {
3117 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3118
3119 if (cpk_en)
3120 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3121 }
3122}
3123
df71a053
BP
3124static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
3125 struct amd64_family_type *fam)
7d6034d3
DT
3126{
3127 struct amd64_pvt *pvt = mci->pvt_info;
3128
3129 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
3130 mci->edac_ctl_cap = EDAC_FLAG_NONE;
7d6034d3 3131
2d09d8f3
YG
3132 if (pvt->umc) {
3133 f17h_determine_edac_ctl_cap(mci, pvt);
3134 } else {
3135 if (pvt->nbcap & NBCAP_SECDED)
3136 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
7d6034d3 3137
2d09d8f3
YG
3138 if (pvt->nbcap & NBCAP_CHIPKILL)
3139 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3140 }
7d6034d3 3141
d1ea71cd 3142 mci->edac_cap = determine_edac_cap(pvt);
7d6034d3 3143 mci->mod_name = EDAC_MOD_STR;
df71a053 3144 mci->ctl_name = fam->ctl_name;
e7934b70 3145 mci->dev_name = pci_name(pvt->F3);
7d6034d3
DT
3146 mci->ctl_page_to_phys = NULL;
3147
7d6034d3 3148 /* memory scrubber interface */
d1ea71cd
BP
3149 mci->set_sdram_scrub_rate = set_scrub_rate;
3150 mci->get_sdram_scrub_rate = get_scrub_rate;
7d6034d3
DT
3151}
3152
0092b20d
BP
3153/*
3154 * returns a pointer to the family descriptor on success, NULL otherwise.
3155 */
d1ea71cd 3156static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
395ae783 3157{
0092b20d
BP
3158 struct amd64_family_type *fam_type = NULL;
3159
18b94f66 3160 pvt->ext_model = boot_cpu_data.x86_model >> 4;
b399151c 3161 pvt->stepping = boot_cpu_data.x86_stepping;
18b94f66
AG
3162 pvt->model = boot_cpu_data.x86_model;
3163 pvt->fam = boot_cpu_data.x86;
3164
3165 switch (pvt->fam) {
395ae783 3166 case 0xf:
d1ea71cd
BP
3167 fam_type = &family_types[K8_CPUS];
3168 pvt->ops = &family_types[K8_CPUS].ops;
395ae783 3169 break;
df71a053 3170
395ae783 3171 case 0x10:
d1ea71cd
BP
3172 fam_type = &family_types[F10_CPUS];
3173 pvt->ops = &family_types[F10_CPUS].ops;
df71a053
BP
3174 break;
3175
3176 case 0x15:
18b94f66 3177 if (pvt->model == 0x30) {
d1ea71cd
BP
3178 fam_type = &family_types[F15_M30H_CPUS];
3179 pvt->ops = &family_types[F15_M30H_CPUS].ops;
18b94f66 3180 break;
a597d2a5
AG
3181 } else if (pvt->model == 0x60) {
3182 fam_type = &family_types[F15_M60H_CPUS];
3183 pvt->ops = &family_types[F15_M60H_CPUS].ops;
3184 break;
18b94f66
AG
3185 }
3186
d1ea71cd
BP
3187 fam_type = &family_types[F15_CPUS];
3188 pvt->ops = &family_types[F15_CPUS].ops;
395ae783
BP
3189 break;
3190
94c1acf2 3191 case 0x16:
85a8885b
AG
3192 if (pvt->model == 0x30) {
3193 fam_type = &family_types[F16_M30H_CPUS];
3194 pvt->ops = &family_types[F16_M30H_CPUS].ops;
3195 break;
3196 }
d1ea71cd
BP
3197 fam_type = &family_types[F16_CPUS];
3198 pvt->ops = &family_types[F16_CPUS].ops;
94c1acf2
AG
3199 break;
3200
f1cbbec9 3201 case 0x17:
8960de4a
MJ
3202 if (pvt->model >= 0x10 && pvt->model <= 0x2f) {
3203 fam_type = &family_types[F17_M10H_CPUS];
3204 pvt->ops = &family_types[F17_M10H_CPUS].ops;
3205 break;
3206 }
c4a3e946
PW
3207 /* fall through */
3208 case 0x18:
f1cbbec9
YG
3209 fam_type = &family_types[F17_CPUS];
3210 pvt->ops = &family_types[F17_CPUS].ops;
c4a3e946
PW
3211
3212 if (pvt->fam == 0x18)
3213 family_types[F17_CPUS].ctl_name = "F18h";
f1cbbec9
YG
3214 break;
3215
395ae783 3216 default:
24f9a7fe 3217 amd64_err("Unsupported family!\n");
0092b20d 3218 return NULL;
395ae783 3219 }
0092b20d 3220
df71a053 3221 amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
18b94f66 3222 (pvt->fam == 0xf ?
24f9a7fe
BP
3223 (pvt->ext_model >= K8_REV_F ? "revF or later "
3224 : "revE or earlier ")
3225 : ""), pvt->mc_node_id);
0092b20d 3226 return fam_type;
395ae783
BP
3227}
3228
e339f1ec
TI
3229static const struct attribute_group *amd64_edac_attr_groups[] = {
3230#ifdef CONFIG_EDAC_DEBUG
3231 &amd64_edac_dbg_group,
3232#endif
3233#ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
3234 &amd64_edac_inj_group,
3235#endif
3236 NULL
3237};
3238
3f37a36b 3239static int init_one_instance(unsigned int nid)
7d6034d3 3240{
3f37a36b 3241 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
0092b20d 3242 struct amd64_family_type *fam_type = NULL;
360b7f3c 3243 struct mem_ctl_info *mci = NULL;
ab5a503c 3244 struct edac_mc_layer layers[2];
3f37a36b 3245 struct amd64_pvt *pvt = NULL;
936fc3af 3246 u16 pci_id1, pci_id2;
7d6034d3
DT
3247 int err = 0, ret;
3248
3249 ret = -ENOMEM;
3250 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
3251 if (!pvt)
360b7f3c 3252 goto err_ret;
7d6034d3 3253
360b7f3c 3254 pvt->mc_node_id = nid;
3f37a36b 3255 pvt->F3 = F3;
7d6034d3 3256
395ae783 3257 ret = -EINVAL;
d1ea71cd 3258 fam_type = per_family_init(pvt);
0092b20d 3259 if (!fam_type)
395ae783
BP
3260 goto err_free;
3261
936fc3af
YG
3262 if (pvt->fam >= 0x17) {
3263 pvt->umc = kcalloc(NUM_UMCS, sizeof(struct amd64_umc), GFP_KERNEL);
3264 if (!pvt->umc) {
3265 ret = -ENOMEM;
3266 goto err_free;
3267 }
3268
3269 pci_id1 = fam_type->f0_id;
3270 pci_id2 = fam_type->f6_id;
3271 } else {
3272 pci_id1 = fam_type->f1_id;
3273 pci_id2 = fam_type->f2_id;
3274 }
3275
3276 err = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2);
7d6034d3 3277 if (err)
936fc3af 3278 goto err_post_init;
7d6034d3 3279
360b7f3c 3280 read_mc_regs(pvt);
7d6034d3 3281
7d6034d3
DT
3282 /*
3283 * We need to determine how many memory channels there are. Then use
3284 * that information for calculating the size of the dynamic instance
360b7f3c 3285 * tables in the 'mci' structure.
7d6034d3 3286 */
360b7f3c 3287 ret = -EINVAL;
7d6034d3
DT
3288 pvt->channel_count = pvt->ops->early_channel_count(pvt);
3289 if (pvt->channel_count < 0)
360b7f3c 3290 goto err_siblings;
7d6034d3
DT
3291
3292 ret = -ENOMEM;
ab5a503c
MCC
3293 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
3294 layers[0].size = pvt->csels[0].b_cnt;
3295 layers[0].is_virt_csrow = true;
3296 layers[1].type = EDAC_MC_LAYER_CHANNEL;
f0a56c48
BP
3297
3298 /*
3299 * Always allocate two channels since we can have setups with DIMMs on
3300 * only one channel. Also, this simplifies handling later for the price
3301 * of a couple of KBs tops.
3302 */
3303 layers[1].size = 2;
ab5a503c 3304 layers[1].is_virt_csrow = false;
f0a56c48 3305
ca0907b9 3306 mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
7d6034d3 3307 if (!mci)
360b7f3c 3308 goto err_siblings;
7d6034d3
DT
3309
3310 mci->pvt_info = pvt;
3f37a36b 3311 mci->pdev = &pvt->F3->dev;
7d6034d3 3312
df71a053 3313 setup_mci_misc_attrs(mci, fam_type);
360b7f3c
BP
3314
3315 if (init_csrows(mci))
7d6034d3
DT
3316 mci->edac_cap = EDAC_FLAG_NONE;
3317
7d6034d3 3318 ret = -ENODEV;
e339f1ec 3319 if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) {
956b9ba1 3320 edac_dbg(1, "failed edac_mc_add_mc()\n");
7d6034d3
DT
3321 goto err_add_mc;
3322 }
3323
7d6034d3
DT
3324 return 0;
3325
3326err_add_mc:
3327 edac_mc_free(mci);
3328
360b7f3c
BP
3329err_siblings:
3330 free_mc_sibling_devs(pvt);
7d6034d3 3331
936fc3af
YG
3332err_post_init:
3333 if (pvt->fam >= 0x17)
3334 kfree(pvt->umc);
3335
360b7f3c
BP
3336err_free:
3337 kfree(pvt);
7d6034d3 3338
360b7f3c 3339err_ret:
7d6034d3
DT
3340 return ret;
3341}
3342
3f37a36b 3343static int probe_one_instance(unsigned int nid)
7d6034d3 3344{
2299ef71 3345 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
ae7bb7c6 3346 struct ecc_settings *s;
3f37a36b 3347 int ret;
7d6034d3 3348
ae7bb7c6
BP
3349 ret = -ENOMEM;
3350 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
3351 if (!s)
2299ef71 3352 goto err_out;
ae7bb7c6
BP
3353
3354 ecc_stngs[nid] = s;
3355
2299ef71 3356 if (!ecc_enabled(F3, nid)) {
4688c9b4 3357 ret = 0;
2299ef71
BP
3358
3359 if (!ecc_enable_override)
3360 goto err_enable;
3361
044e7a41
YG
3362 if (boot_cpu_data.x86 >= 0x17) {
3363 amd64_warn("Forcing ECC on is not recommended on newer systems. Please enable ECC in BIOS.");
3364 goto err_enable;
3365 } else
3366 amd64_warn("Forcing ECC on!\n");
2299ef71
BP
3367
3368 if (!enable_ecc_error_reporting(s, nid, F3))
3369 goto err_enable;
3370 }
3371
3f37a36b 3372 ret = init_one_instance(nid);
360b7f3c 3373 if (ret < 0) {
ae7bb7c6 3374 amd64_err("Error probing instance: %d\n", nid);
044e7a41
YG
3375
3376 if (boot_cpu_data.x86 < 0x17)
3377 restore_ecc_error_reporting(s, nid, F3);
2b9b2c46
YG
3378
3379 goto err_enable;
360b7f3c 3380 }
7d6034d3
DT
3381
3382 return ret;
2299ef71
BP
3383
3384err_enable:
3385 kfree(s);
3386 ecc_stngs[nid] = NULL;
3387
3388err_out:
3389 return ret;
7d6034d3
DT
3390}
3391
3f37a36b 3392static void remove_one_instance(unsigned int nid)
7d6034d3 3393{
360b7f3c
BP
3394 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3395 struct ecc_settings *s = ecc_stngs[nid];
3f37a36b
BP
3396 struct mem_ctl_info *mci;
3397 struct amd64_pvt *pvt;
7d6034d3 3398
3f37a36b 3399 mci = find_mci_by_dev(&F3->dev);
a4b4bedc
BP
3400 WARN_ON(!mci);
3401
7d6034d3 3402 /* Remove from EDAC CORE tracking list */
3f37a36b 3403 mci = edac_mc_del_mc(&F3->dev);
7d6034d3
DT
3404 if (!mci)
3405 return;
3406
3407 pvt = mci->pvt_info;
3408
360b7f3c 3409 restore_ecc_error_reporting(s, nid, F3);
7d6034d3 3410
360b7f3c 3411 free_mc_sibling_devs(pvt);
7d6034d3 3412
360b7f3c
BP
3413 kfree(ecc_stngs[nid]);
3414 ecc_stngs[nid] = NULL;
ae7bb7c6 3415
7d6034d3 3416 /* Free the EDAC CORE resources */
8f68ed97 3417 mci->pvt_info = NULL;
8f68ed97
BP
3418
3419 kfree(pvt);
7d6034d3
DT
3420 edac_mc_free(mci);
3421}
3422
360b7f3c 3423static void setup_pci_device(void)
7d6034d3
DT
3424{
3425 struct mem_ctl_info *mci;
3426 struct amd64_pvt *pvt;
3427
d1ea71cd 3428 if (pci_ctl)
7d6034d3
DT
3429 return;
3430
2ec591ac 3431 mci = edac_mc_find(0);
d1ea71cd
BP
3432 if (!mci)
3433 return;
7d6034d3 3434
d1ea71cd 3435 pvt = mci->pvt_info;
936fc3af
YG
3436 if (pvt->umc)
3437 pci_ctl = edac_pci_create_generic_ctl(&pvt->F0->dev, EDAC_MOD_STR);
3438 else
3439 pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
d1ea71cd
BP
3440 if (!pci_ctl) {
3441 pr_warn("%s(): Unable to create PCI control\n", __func__);
3442 pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
7d6034d3
DT
3443 }
3444}
3445
d6efab74
YG
3446static const struct x86_cpu_id amd64_cpuids[] = {
3447 { X86_VENDOR_AMD, 0xF, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3448 { X86_VENDOR_AMD, 0x10, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3449 { X86_VENDOR_AMD, 0x15, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3450 { X86_VENDOR_AMD, 0x16, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
95d3af6b 3451 { X86_VENDOR_AMD, 0x17, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
c4a3e946 3452 { X86_VENDOR_HYGON, 0x18, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
d6efab74
YG
3453 { }
3454};
3455MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
3456
7d6034d3
DT
3457static int __init amd64_edac_init(void)
3458{
301375e7 3459 const char *owner;
360b7f3c 3460 int err = -ENODEV;
3f37a36b 3461 int i;
7d6034d3 3462
301375e7
TK
3463 owner = edac_get_owner();
3464 if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
3465 return -EBUSY;
3466
1bd9900b
YG
3467 if (!x86_match_cpu(amd64_cpuids))
3468 return -ENODEV;
3469
9653a5c7 3470 if (amd_cache_northbridges() < 0)
1bd9900b 3471 return -ENODEV;
7d6034d3 3472
6ba92fea
BP
3473 opstate_init();
3474
cc4d8860 3475 err = -ENOMEM;
6396bb22 3476 ecc_stngs = kcalloc(amd_nb_num(), sizeof(ecc_stngs[0]), GFP_KERNEL);
2ec591ac 3477 if (!ecc_stngs)
a9f0fbe2 3478 goto err_free;
cc4d8860 3479
50542251 3480 msrs = msrs_alloc();
56b34b91 3481 if (!msrs)
360b7f3c 3482 goto err_free;
50542251 3483
2287c636
YG
3484 for (i = 0; i < amd_nb_num(); i++) {
3485 err = probe_one_instance(i);
3486 if (err) {
3f37a36b
BP
3487 /* unwind properly */
3488 while (--i >= 0)
3489 remove_one_instance(i);
7d6034d3 3490
3f37a36b
BP
3491 goto err_pci;
3492 }
2287c636 3493 }
7d6034d3 3494
4688c9b4
YG
3495 if (!edac_has_mcs()) {
3496 err = -ENODEV;
3497 goto err_pci;
3498 }
3499
234365f5
YG
3500 /* register stuff with EDAC MCE */
3501 if (report_gart_errors)
3502 amd_report_gart_errors(true);
3503
3504 if (boot_cpu_data.x86 >= 0x17)
3505 amd_register_ecc_decoder(decode_umc_error);
3506 else
3507 amd_register_ecc_decoder(decode_bus_error);
3508
360b7f3c 3509 setup_pci_device();
f5b10c45
TP
3510
3511#ifdef CONFIG_X86_32
3512 amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
3513#endif
3514
de0336b3
BP
3515 printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
3516
360b7f3c 3517 return 0;
7d6034d3 3518
56b34b91
BP
3519err_pci:
3520 msrs_free(msrs);
3521 msrs = NULL;
cc4d8860 3522
360b7f3c 3523err_free:
360b7f3c
BP
3524 kfree(ecc_stngs);
3525 ecc_stngs = NULL;
3526
7d6034d3
DT
3527 return err;
3528}
3529
3530static void __exit amd64_edac_exit(void)
3531{
3f37a36b
BP
3532 int i;
3533
d1ea71cd
BP
3534 if (pci_ctl)
3535 edac_pci_release_generic_ctl(pci_ctl);
7d6034d3 3536
234365f5
YG
3537 /* unregister from EDAC MCE */
3538 amd_report_gart_errors(false);
3539
3540 if (boot_cpu_data.x86 >= 0x17)
3541 amd_unregister_ecc_decoder(decode_umc_error);
3542 else
3543 amd_unregister_ecc_decoder(decode_bus_error);
3544
3f37a36b
BP
3545 for (i = 0; i < amd_nb_num(); i++)
3546 remove_one_instance(i);
50542251 3547
ae7bb7c6
BP
3548 kfree(ecc_stngs);
3549 ecc_stngs = NULL;
3550
50542251
BP
3551 msrs_free(msrs);
3552 msrs = NULL;
7d6034d3
DT
3553}
3554
3555module_init(amd64_edac_init);
3556module_exit(amd64_edac_exit);
3557
3558MODULE_LICENSE("GPL");
3559MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
3560 "Dave Peterson, Thayne Harbaugh");
3561MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
3562 EDAC_AMD64_VERSION);
3563
3564module_param(edac_op_state, int, 0444);
3565MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");