Commit | Line | Data |
---|---|---|
a32073bf AK |
1 | /* |
2 | * Shared support code for AMD K8 northbridges and derivates. | |
3 | * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2. | |
4 | */ | |
a32073bf | 5 | #include <linux/types.h> |
5a0e3ad6 | 6 | #include <linux/slab.h> |
a32073bf AK |
7 | #include <linux/init.h> |
8 | #include <linux/errno.h> | |
9 | #include <linux/module.h> | |
10 | #include <linux/spinlock.h> | |
23ac4ae8 | 11 | #include <asm/amd_nb.h> |
a32073bf | 12 | |
a32073bf AK |
13 | static u32 *flush_words; |
14 | ||
691269f0 | 15 | const struct pci_device_id amd_nb_misc_ids[] = { |
cf169702 JR |
16 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, |
17 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, | |
5c80cc78 | 18 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) }, |
a32073bf AK |
19 | {} |
20 | }; | |
9653a5c7 | 21 | EXPORT_SYMBOL(amd_nb_misc_ids); |
a32073bf | 22 | |
41b2610c HR |
23 | static struct pci_device_id amd_nb_link_ids[] = { |
24 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_LINK) }, | |
25 | {} | |
26 | }; | |
27 | ||
24d9b70b JB |
28 | const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = { |
29 | { 0x00, 0x18, 0x20 }, | |
30 | { 0xff, 0x00, 0x20 }, | |
31 | { 0xfe, 0x00, 0x20 }, | |
32 | { } | |
33 | }; | |
34 | ||
eec1d4fa HR |
35 | struct amd_northbridge_info amd_northbridges; |
36 | EXPORT_SYMBOL(amd_northbridges); | |
a32073bf | 37 | |
9653a5c7 | 38 | static struct pci_dev *next_northbridge(struct pci_dev *dev, |
691269f0 | 39 | const struct pci_device_id *ids) |
a32073bf AK |
40 | { |
41 | do { | |
42 | dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); | |
43 | if (!dev) | |
44 | break; | |
9653a5c7 | 45 | } while (!pci_match_id(ids, dev)); |
a32073bf AK |
46 | return dev; |
47 | } | |
48 | ||
9653a5c7 | 49 | int amd_cache_northbridges(void) |
a32073bf | 50 | { |
9653a5c7 HR |
51 | int i = 0; |
52 | struct amd_northbridge *nb; | |
41b2610c | 53 | struct pci_dev *misc, *link; |
3c6df2a9 | 54 | |
9653a5c7 | 55 | if (amd_nb_num()) |
a32073bf AK |
56 | return 0; |
57 | ||
9653a5c7 HR |
58 | misc = NULL; |
59 | while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL) | |
60 | i++; | |
900f9ac9 | 61 | |
9653a5c7 HR |
62 | if (i == 0) |
63 | return 0; | |
a32073bf | 64 | |
9653a5c7 HR |
65 | nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL); |
66 | if (!nb) | |
a32073bf AK |
67 | return -ENOMEM; |
68 | ||
9653a5c7 HR |
69 | amd_northbridges.nb = nb; |
70 | amd_northbridges.num = i; | |
3c6df2a9 | 71 | |
41b2610c | 72 | link = misc = NULL; |
9653a5c7 HR |
73 | for (i = 0; i != amd_nb_num(); i++) { |
74 | node_to_amd_nb(i)->misc = misc = | |
75 | next_northbridge(misc, amd_nb_misc_ids); | |
41b2610c HR |
76 | node_to_amd_nb(i)->link = link = |
77 | next_northbridge(link, amd_nb_link_ids); | |
9653a5c7 HR |
78 | } |
79 | ||
80 | /* some CPU families (e.g. family 0x11) do not support GART */ | |
81 | if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 || | |
82 | boot_cpu_data.x86 == 0x15) | |
83 | amd_northbridges.flags |= AMD_NB_GART; | |
a32073bf | 84 | |
f658bcfb HR |
85 | /* |
86 | * Some CPU families support L3 Cache Index Disable. There are some | |
87 | * limitations because of E382 and E388 on family 0x10. | |
88 | */ | |
89 | if (boot_cpu_data.x86 == 0x10 && | |
90 | boot_cpu_data.x86_model >= 0x8 && | |
91 | (boot_cpu_data.x86_model > 0x9 || | |
92 | boot_cpu_data.x86_mask >= 0x1)) | |
93 | amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; | |
94 | ||
b453de02 HR |
95 | if (boot_cpu_data.x86 == 0x15) |
96 | amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; | |
97 | ||
cabb5bd7 HR |
98 | /* L3 cache partitioning is supported on family 0x15 */ |
99 | if (boot_cpu_data.x86 == 0x15) | |
100 | amd_northbridges.flags |= AMD_NB_L3_PARTITIONING; | |
101 | ||
a32073bf AK |
102 | return 0; |
103 | } | |
9653a5c7 | 104 | EXPORT_SYMBOL_GPL(amd_cache_northbridges); |
a32073bf AK |
105 | |
106 | /* Ignores subdevice/subvendor but as far as I can figure out | |
107 | they're useless anyways */ | |
eec1d4fa | 108 | int __init early_is_amd_nb(u32 device) |
a32073bf | 109 | { |
691269f0 | 110 | const struct pci_device_id *id; |
a32073bf | 111 | u32 vendor = device & 0xffff; |
691269f0 | 112 | |
a32073bf | 113 | device >>= 16; |
9653a5c7 | 114 | for (id = amd_nb_misc_ids; id->vendor; id++) |
a32073bf AK |
115 | if (vendor == id->vendor && device == id->device) |
116 | return 1; | |
117 | return 0; | |
118 | } | |
119 | ||
cabb5bd7 HR |
120 | int amd_get_subcaches(int cpu) |
121 | { | |
122 | struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link; | |
123 | unsigned int mask; | |
124 | int cuid = 0; | |
125 | ||
126 | if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | |
127 | return 0; | |
128 | ||
129 | pci_read_config_dword(link, 0x1d4, &mask); | |
130 | ||
131 | #ifdef CONFIG_SMP | |
132 | cuid = cpu_data(cpu).compute_unit_id; | |
133 | #endif | |
134 | return (mask >> (4 * cuid)) & 0xf; | |
135 | } | |
136 | ||
137 | int amd_set_subcaches(int cpu, int mask) | |
138 | { | |
139 | static unsigned int reset, ban; | |
140 | struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu)); | |
141 | unsigned int reg; | |
142 | int cuid = 0; | |
143 | ||
144 | if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf) | |
145 | return -EINVAL; | |
146 | ||
147 | /* if necessary, collect reset state of L3 partitioning and BAN mode */ | |
148 | if (reset == 0) { | |
149 | pci_read_config_dword(nb->link, 0x1d4, &reset); | |
150 | pci_read_config_dword(nb->misc, 0x1b8, &ban); | |
151 | ban &= 0x180000; | |
152 | } | |
153 | ||
154 | /* deactivate BAN mode if any subcaches are to be disabled */ | |
155 | if (mask != 0xf) { | |
156 | pci_read_config_dword(nb->misc, 0x1b8, ®); | |
157 | pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000); | |
158 | } | |
159 | ||
160 | #ifdef CONFIG_SMP | |
161 | cuid = cpu_data(cpu).compute_unit_id; | |
162 | #endif | |
163 | mask <<= 4 * cuid; | |
164 | mask |= (0xf ^ (1 << cuid)) << 26; | |
165 | ||
166 | pci_write_config_dword(nb->link, 0x1d4, mask); | |
167 | ||
168 | /* reset BAN mode if L3 partitioning returned to reset state */ | |
169 | pci_read_config_dword(nb->link, 0x1d4, ®); | |
170 | if (reg == reset) { | |
171 | pci_read_config_dword(nb->misc, 0x1b8, ®); | |
172 | reg &= ~0x180000; | |
173 | pci_write_config_dword(nb->misc, 0x1b8, reg | ban); | |
174 | } | |
175 | ||
176 | return 0; | |
177 | } | |
178 | ||
9653a5c7 HR |
179 | int amd_cache_gart(void) |
180 | { | |
181 | int i; | |
182 | ||
183 | if (!amd_nb_has_feature(AMD_NB_GART)) | |
184 | return 0; | |
185 | ||
186 | flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL); | |
187 | if (!flush_words) { | |
188 | amd_northbridges.flags &= ~AMD_NB_GART; | |
189 | return -ENOMEM; | |
190 | } | |
191 | ||
192 | for (i = 0; i != amd_nb_num(); i++) | |
193 | pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, | |
194 | &flush_words[i]); | |
195 | ||
196 | return 0; | |
197 | } | |
198 | ||
eec1d4fa | 199 | void amd_flush_garts(void) |
a32073bf AK |
200 | { |
201 | int flushed, i; | |
202 | unsigned long flags; | |
203 | static DEFINE_SPINLOCK(gart_lock); | |
204 | ||
9653a5c7 | 205 | if (!amd_nb_has_feature(AMD_NB_GART)) |
900f9ac9 AH |
206 | return; |
207 | ||
a32073bf AK |
208 | /* Avoid races between AGP and IOMMU. In theory it's not needed |
209 | but I'm not sure if the hardware won't lose flush requests | |
210 | when another is pending. This whole thing is so expensive anyways | |
211 | that it doesn't matter to serialize more. -AK */ | |
212 | spin_lock_irqsave(&gart_lock, flags); | |
213 | flushed = 0; | |
9653a5c7 HR |
214 | for (i = 0; i < amd_nb_num(); i++) { |
215 | pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c, | |
216 | flush_words[i] | 1); | |
a32073bf AK |
217 | flushed++; |
218 | } | |
9653a5c7 | 219 | for (i = 0; i < amd_nb_num(); i++) { |
a32073bf AK |
220 | u32 w; |
221 | /* Make sure the hardware actually executed the flush*/ | |
222 | for (;;) { | |
9653a5c7 | 223 | pci_read_config_dword(node_to_amd_nb(i)->misc, |
a32073bf AK |
224 | 0x9c, &w); |
225 | if (!(w & 1)) | |
226 | break; | |
227 | cpu_relax(); | |
228 | } | |
229 | } | |
230 | spin_unlock_irqrestore(&gart_lock, flags); | |
231 | if (!flushed) | |
232 | printk("nothing to flush?\n"); | |
233 | } | |
eec1d4fa | 234 | EXPORT_SYMBOL_GPL(amd_flush_garts); |
a32073bf | 235 | |
eec1d4fa | 236 | static __init int init_amd_nbs(void) |
0e152cd7 BP |
237 | { |
238 | int err = 0; | |
239 | ||
9653a5c7 | 240 | err = amd_cache_northbridges(); |
0e152cd7 BP |
241 | |
242 | if (err < 0) | |
eec1d4fa | 243 | printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n"); |
0e152cd7 | 244 | |
9653a5c7 HR |
245 | if (amd_cache_gart() < 0) |
246 | printk(KERN_NOTICE "AMD NB: Cannot initialize GART flush words, " | |
247 | "GART support disabled.\n"); | |
248 | ||
0e152cd7 BP |
249 | return err; |
250 | } | |
251 | ||
252 | /* This has to go after the PCI subsystem */ | |
eec1d4fa | 253 | fs_initcall(init_amd_nbs); |