1 // SPDX-License-Identifier: GPL-2.0-only
3 * Shared support code for AMD K8 northbridges and derivatives.
4 * Copyright 2006 Andi Kleen, SUSE Labs.
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/init.h>
12 #include <linux/errno.h>
13 #include <linux/export.h>
14 #include <linux/spinlock.h>
15 #include <linux/pci_ids.h>
16 #include <asm/amd_nb.h>
18 #define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450
19 #define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0
20 #define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480
21 #define PCI_DEVICE_ID_AMD_17H_M60H_ROOT 0x1630
22 #define PCI_DEVICE_ID_AMD_17H_MA0H_ROOT 0x14b5
23 #define PCI_DEVICE_ID_AMD_19H_M10H_ROOT 0x14a4
24 #define PCI_DEVICE_ID_AMD_19H_M40H_ROOT 0x14b5
25 #define PCI_DEVICE_ID_AMD_19H_M60H_ROOT 0x14d8
26 #define PCI_DEVICE_ID_AMD_19H_M70H_ROOT 0x14e8
27 #define PCI_DEVICE_ID_AMD_MI200_ROOT 0x14bb
29 #define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464
30 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
31 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494
32 #define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c
33 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
34 #define PCI_DEVICE_ID_AMD_17H_MA0H_DF_F4 0x1728
35 #define PCI_DEVICE_ID_AMD_19H_DF_F4 0x1654
36 #define PCI_DEVICE_ID_AMD_19H_M10H_DF_F4 0x14b1
37 #define PCI_DEVICE_ID_AMD_19H_M40H_DF_F4 0x167d
38 #define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4 0x166e
39 #define PCI_DEVICE_ID_AMD_19H_M60H_DF_F4 0x14e4
40 #define PCI_DEVICE_ID_AMD_19H_M70H_DF_F4 0x14f4
41 #define PCI_DEVICE_ID_AMD_19H_M78H_DF_F4 0x12fc
42 #define PCI_DEVICE_ID_AMD_MI200_DF_F4 0x14d4
44 /* Protect the PCI config register pairs used for SMN. */
45 static DEFINE_MUTEX(smn_mutex);
47 static u32 *flush_words;
49 static const struct pci_device_id amd_root_ids[] = {
50 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
51 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
52 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) },
53 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) },
54 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_ROOT) },
55 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_ROOT) },
56 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_ROOT) },
57 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_ROOT) },
58 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_ROOT) },
59 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_ROOT) },
63 #define PCI_DEVICE_ID_AMD_CNB17H_F4 0x1704
65 static const struct pci_device_id amd_nb_misc_ids[] = {
66 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
67 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
68 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
69 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
70 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
71 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
72 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
73 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
74 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
75 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
76 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
77 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
78 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F3) },
79 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
80 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
81 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
82 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F3) },
83 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F3) },
84 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
85 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) },
86 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) },
87 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) },
88 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F3) },
92 static const struct pci_device_id amd_nb_link_ids[] = {
93 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
94 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
95 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
96 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
97 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
98 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
99 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
100 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
101 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) },
102 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
103 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F4) },
104 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
105 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F4) },
106 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F4) },
107 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) },
108 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
109 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F4) },
113 static const struct pci_device_id hygon_root_ids[] = {
114 { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) },
118 static const struct pci_device_id hygon_nb_misc_ids[] = {
119 { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
123 static const struct pci_device_id hygon_nb_link_ids[] = {
124 { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) },
128 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
129 { 0x00, 0x18, 0x20 },
130 { 0xff, 0x00, 0x20 },
131 { 0xfe, 0x00, 0x20 },
135 static struct amd_northbridge_info amd_northbridges;
139 return amd_northbridges.num;
141 EXPORT_SYMBOL_GPL(amd_nb_num);
143 bool amd_nb_has_feature(unsigned int feature)
145 return ((amd_northbridges.flags & feature) == feature);
147 EXPORT_SYMBOL_GPL(amd_nb_has_feature);
149 struct amd_northbridge *node_to_amd_nb(int node)
151 return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
153 EXPORT_SYMBOL_GPL(node_to_amd_nb);
155 static struct pci_dev *next_northbridge(struct pci_dev *dev,
156 const struct pci_device_id *ids)
159 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
162 } while (!pci_match_id(ids, dev));
166 static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
168 struct pci_dev *root;
171 if (node >= amd_northbridges.num)
174 root = node_to_amd_nb(node)->root;
178 mutex_lock(&smn_mutex);
180 err = pci_write_config_dword(root, 0x60, address);
182 pr_warn("Error programming SMN address 0x%x.\n", address);
186 err = (write ? pci_write_config_dword(root, 0x64, *value)
187 : pci_read_config_dword(root, 0x64, value));
189 pr_warn("Error %s SMN address 0x%x.\n",
190 (write ? "writing to" : "reading from"), address);
193 mutex_unlock(&smn_mutex);
199 int amd_smn_read(u16 node, u32 address, u32 *value)
201 return __amd_smn_rw(node, address, value, false);
203 EXPORT_SYMBOL_GPL(amd_smn_read);
205 int amd_smn_write(u16 node, u32 address, u32 value)
207 return __amd_smn_rw(node, address, &value, true);
209 EXPORT_SYMBOL_GPL(amd_smn_write);
212 static int amd_cache_northbridges(void)
214 const struct pci_device_id *misc_ids = amd_nb_misc_ids;
215 const struct pci_device_id *link_ids = amd_nb_link_ids;
216 const struct pci_device_id *root_ids = amd_root_ids;
217 struct pci_dev *root, *misc, *link;
218 struct amd_northbridge *nb;
219 u16 roots_per_misc = 0;
224 if (amd_northbridges.num)
227 if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
228 root_ids = hygon_root_ids;
229 misc_ids = hygon_nb_misc_ids;
230 link_ids = hygon_nb_link_ids;
234 while ((misc = next_northbridge(misc, misc_ids)))
241 while ((root = next_northbridge(root, root_ids)))
245 roots_per_misc = root_count / misc_count;
248 * There should be _exactly_ N roots for each DF/SMN
251 if (!roots_per_misc || (root_count % roots_per_misc)) {
252 pr_info("Unsupported AMD DF/PCI configuration found\n");
257 nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL);
261 amd_northbridges.nb = nb;
262 amd_northbridges.num = misc_count;
264 link = misc = root = NULL;
265 for (i = 0; i < amd_northbridges.num; i++) {
266 node_to_amd_nb(i)->root = root =
267 next_northbridge(root, root_ids);
268 node_to_amd_nb(i)->misc = misc =
269 next_northbridge(misc, misc_ids);
270 node_to_amd_nb(i)->link = link =
271 next_northbridge(link, link_ids);
274 * If there are more PCI root devices than data fabric/
275 * system management network interfaces, then the (N)
276 * PCI roots per DF/SMN interface are functionally the
277 * same (for DF/SMN access) and N-1 are redundant. N-1
278 * PCI roots should be skipped per DF/SMN interface so
279 * the following DF/SMN interfaces get mapped to
282 for (j = 1; j < roots_per_misc; j++)
283 root = next_northbridge(root, root_ids);
286 if (amd_gart_present())
287 amd_northbridges.flags |= AMD_NB_GART;
290 * Check for L3 cache presence.
292 if (!cpuid_edx(0x80000006))
296 * Some CPU families support L3 Cache Index Disable. There are some
297 * limitations because of E382 and E388 on family 0x10.
299 if (boot_cpu_data.x86 == 0x10 &&
300 boot_cpu_data.x86_model >= 0x8 &&
301 (boot_cpu_data.x86_model > 0x9 ||
302 boot_cpu_data.x86_stepping >= 0x1))
303 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
305 if (boot_cpu_data.x86 == 0x15)
306 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
308 /* L3 cache partitioning is supported on family 0x15 */
309 if (boot_cpu_data.x86 == 0x15)
310 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
316 * Ignores subdevice/subvendor but as far as I can figure out
317 * they're useless anyways
319 bool __init early_is_amd_nb(u32 device)
321 const struct pci_device_id *misc_ids = amd_nb_misc_ids;
322 const struct pci_device_id *id;
323 u32 vendor = device & 0xffff;
325 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
326 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
329 if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
330 misc_ids = hygon_nb_misc_ids;
333 for (id = misc_ids; id->vendor; id++)
334 if (vendor == id->vendor && device == id->device)
339 struct resource *amd_get_mmconfig_range(struct resource *res)
343 unsigned int segn_busn_bits;
345 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
346 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
349 /* assume all cpus from fam10h have mmconfig */
350 if (boot_cpu_data.x86 < 0x10)
353 address = MSR_FAM10H_MMIO_CONF_BASE;
354 rdmsrl(address, msr);
356 /* mmconfig is not enabled */
357 if (!(msr & FAM10H_MMIO_CONF_ENABLE))
360 base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
362 segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
363 FAM10H_MMIO_CONF_BUSRANGE_MASK;
365 res->flags = IORESOURCE_MEM;
367 res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
371 int amd_get_subcaches(int cpu)
373 struct pci_dev *link = node_to_amd_nb(topology_die_id(cpu))->link;
376 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
379 pci_read_config_dword(link, 0x1d4, &mask);
381 return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
384 int amd_set_subcaches(int cpu, unsigned long mask)
386 static unsigned int reset, ban;
387 struct amd_northbridge *nb = node_to_amd_nb(topology_die_id(cpu));
391 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
394 /* if necessary, collect reset state of L3 partitioning and BAN mode */
396 pci_read_config_dword(nb->link, 0x1d4, &reset);
397 pci_read_config_dword(nb->misc, 0x1b8, &ban);
401 /* deactivate BAN mode if any subcaches are to be disabled */
403 pci_read_config_dword(nb->misc, 0x1b8, ®);
404 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
407 cuid = cpu_data(cpu).cpu_core_id;
409 mask |= (0xf ^ (1 << cuid)) << 26;
411 pci_write_config_dword(nb->link, 0x1d4, mask);
413 /* reset BAN mode if L3 partitioning returned to reset state */
414 pci_read_config_dword(nb->link, 0x1d4, ®);
416 pci_read_config_dword(nb->misc, 0x1b8, ®);
418 pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
424 static void amd_cache_gart(void)
428 if (!amd_nb_has_feature(AMD_NB_GART))
431 flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
433 amd_northbridges.flags &= ~AMD_NB_GART;
434 pr_notice("Cannot initialize GART flush words, GART support disabled\n");
438 for (i = 0; i != amd_northbridges.num; i++)
439 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
442 void amd_flush_garts(void)
446 static DEFINE_SPINLOCK(gart_lock);
448 if (!amd_nb_has_feature(AMD_NB_GART))
452 * Avoid races between AGP and IOMMU. In theory it's not needed
453 * but I'm not sure if the hardware won't lose flush requests
454 * when another is pending. This whole thing is so expensive anyways
455 * that it doesn't matter to serialize more. -AK
457 spin_lock_irqsave(&gart_lock, flags);
459 for (i = 0; i < amd_northbridges.num; i++) {
460 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
464 for (i = 0; i < amd_northbridges.num; i++) {
466 /* Make sure the hardware actually executed the flush*/
468 pci_read_config_dword(node_to_amd_nb(i)->misc,
475 spin_unlock_irqrestore(&gart_lock, flags);
477 pr_notice("nothing to flush?\n");
479 EXPORT_SYMBOL_GPL(amd_flush_garts);
481 static void __fix_erratum_688(void *info)
483 #define MSR_AMD64_IC_CFG 0xC0011021
485 msr_set_bit(MSR_AMD64_IC_CFG, 3);
486 msr_set_bit(MSR_AMD64_IC_CFG, 14);
489 /* Apply erratum 688 fix so machines without a BIOS fix work. */
490 static __init void fix_erratum_688(void)
495 if (boot_cpu_data.x86 != 0x14)
498 if (!amd_northbridges.num)
501 F4 = node_to_amd_nb(0)->link;
505 if (pci_read_config_dword(F4, 0x164, &val))
511 on_each_cpu(__fix_erratum_688, NULL, 0);
513 pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
516 static __init int init_amd_nbs(void)
518 amd_cache_northbridges();
526 /* This has to go after the PCI subsystem */
527 fs_initcall(init_amd_nbs);