cpumask: change cpumask_scnprintf, cpumask_parse_user, cpulist_parse, and cpulist_scn...
[linux-2.6-block.git] / arch / x86 / kernel / cpu / intel_cacheinfo.c
CommitLineData
1da177e4 1/*
cdcf772e 2 * Routines to indentify caches on Intel CPU.
1da177e4 3 *
cdcf772e
IM
4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
1aa1a9f9 6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
67cddd94 7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
1da177e4
LT
8 */
9
10#include <linux/init.h>
11#include <linux/slab.h>
12#include <linux/device.h>
13#include <linux/compiler.h>
14#include <linux/cpu.h>
4e57b681 15#include <linux/sched.h>
a24e8d36 16#include <linux/pci.h>
1da177e4
LT
17
18#include <asm/processor.h>
19#include <asm/smp.h>
20
21#define LVL_1_INST 1
22#define LVL_1_DATA 2
23#define LVL_2 3
24#define LVL_3 4
25#define LVL_TRACE 5
26
27struct _cache_table
28{
29 unsigned char descriptor;
30 char cache_type;
31 short size;
32};
33
34/* all the cache descriptor types we care about (no TLB or trace cache entries) */
1aa1a9f9 35static struct _cache_table cache_table[] __cpuinitdata =
1da177e4
LT
36{
37 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
38 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
39 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
40 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
41 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
42 { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
43 { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
44 { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
45 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
46 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
47 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
6fe8f479 48 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
1da177e4
LT
49 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
50 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
6fe8f479
DJ
51 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
52 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
04fa11ea 53 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
1da177e4
LT
54 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
55 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
56 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
57 { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */
58 { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */
6fe8f479
DJ
59 { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */
60 { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */
61 { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
62 { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */
63 { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
64 { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */
65 { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */
205f9328 66 { 0x4e, LVL_2, 6144 }, /* 24-way set assoc, 64 byte line size */
1da177e4
LT
67 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
68 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
69 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
70 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
71 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
72 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
73 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
6fe8f479 74 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
1da177e4
LT
75 { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */
76 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
77 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
78 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
79 { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
80 { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */
81 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
82 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
83 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
84 { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */
85 { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
86 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
87 { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
88 { 0x00, 0, 0}
89};
90
91
92enum _cache_type
93{
94 CACHE_TYPE_NULL = 0,
95 CACHE_TYPE_DATA = 1,
96 CACHE_TYPE_INST = 2,
97 CACHE_TYPE_UNIFIED = 3
98};
99
100union _cpuid4_leaf_eax {
101 struct {
102 enum _cache_type type:5;
103 unsigned int level:3;
104 unsigned int is_self_initializing:1;
105 unsigned int is_fully_associative:1;
106 unsigned int reserved:4;
107 unsigned int num_threads_sharing:12;
108 unsigned int num_cores_on_die:6;
109 } split;
110 u32 full;
111};
112
113union _cpuid4_leaf_ebx {
114 struct {
115 unsigned int coherency_line_size:12;
116 unsigned int physical_line_partition:10;
117 unsigned int ways_of_associativity:10;
118 } split;
119 u32 full;
120};
121
122union _cpuid4_leaf_ecx {
123 struct {
124 unsigned int number_of_sets:32;
125 } split;
126 u32 full;
127};
128
129struct _cpuid4_info {
130 union _cpuid4_leaf_eax eax;
131 union _cpuid4_leaf_ebx ebx;
132 union _cpuid4_leaf_ecx ecx;
133 unsigned long size;
8cb22bcb 134 unsigned long can_disable;
6b6309b4 135 cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */
1da177e4
LT
136};
137
239bd831 138#ifdef CONFIG_PCI
a24e8d36 139static struct pci_device_id k8_nb_id[] = {
cdcf772e
IM
140 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) },
141 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) },
142 {}
a24e8d36 143};
239bd831 144#endif
a24e8d36 145
240cd6a8
AK
146unsigned short num_cache_leaves;
147
148/* AMD doesn't have CPUID4. Emulate it here to report the same
149 information to the user. This makes some assumptions about the machine:
67cddd94 150 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
240cd6a8
AK
151
152 In theory the TLBs could be reported as fake type (they are in "dummy").
153 Maybe later */
154union l1_cache {
155 struct {
156 unsigned line_size : 8;
157 unsigned lines_per_tag : 8;
158 unsigned assoc : 8;
159 unsigned size_in_kb : 8;
160 };
161 unsigned val;
162};
163
164union l2_cache {
165 struct {
166 unsigned line_size : 8;
167 unsigned lines_per_tag : 4;
168 unsigned assoc : 4;
169 unsigned size_in_kb : 16;
170 };
171 unsigned val;
172};
173
67cddd94
AK
174union l3_cache {
175 struct {
176 unsigned line_size : 8;
177 unsigned lines_per_tag : 4;
178 unsigned assoc : 4;
179 unsigned res : 2;
180 unsigned size_encoded : 14;
181 };
182 unsigned val;
183};
184
7b384935 185static unsigned short assocs[] __cpuinitdata = {
240cd6a8 186 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
67cddd94
AK
187 [8] = 16, [0xa] = 32, [0xb] = 48,
188 [0xc] = 64,
240cd6a8 189 [0xf] = 0xffff // ??
67cddd94
AK
190};
191
7b384935
SS
192static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 };
193static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 };
240cd6a8 194
cdcf772e
IM
195static void __cpuinit
196amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
197 union _cpuid4_leaf_ebx *ebx,
198 union _cpuid4_leaf_ecx *ecx)
240cd6a8
AK
199{
200 unsigned dummy;
201 unsigned line_size, lines_per_tag, assoc, size_in_kb;
202 union l1_cache l1i, l1d;
203 union l2_cache l2;
67cddd94
AK
204 union l3_cache l3;
205 union l1_cache *l1 = &l1d;
240cd6a8
AK
206
207 eax->full = 0;
208 ebx->full = 0;
209 ecx->full = 0;
210
211 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
67cddd94 212 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
240cd6a8 213
67cddd94
AK
214 switch (leaf) {
215 case 1:
216 l1 = &l1i;
217 case 0:
218 if (!l1->val)
219 return;
240cd6a8
AK
220 assoc = l1->assoc;
221 line_size = l1->line_size;
222 lines_per_tag = l1->lines_per_tag;
223 size_in_kb = l1->size_in_kb;
67cddd94
AK
224 break;
225 case 2:
226 if (!l2.val)
227 return;
240cd6a8
AK
228 assoc = l2.assoc;
229 line_size = l2.line_size;
230 lines_per_tag = l2.lines_per_tag;
231 /* cpu_data has errata corrections for K7 applied */
232 size_in_kb = current_cpu_data.x86_cache_size;
67cddd94
AK
233 break;
234 case 3:
235 if (!l3.val)
236 return;
237 assoc = l3.assoc;
238 line_size = l3.line_size;
239 lines_per_tag = l3.lines_per_tag;
240 size_in_kb = l3.size_encoded * 512;
241 break;
242 default:
243 return;
240cd6a8
AK
244 }
245
67cddd94
AK
246 eax->split.is_self_initializing = 1;
247 eax->split.type = types[leaf];
248 eax->split.level = levels[leaf];
249 if (leaf == 3)
250 eax->split.num_threads_sharing = current_cpu_data.x86_max_cores - 1;
251 else
252 eax->split.num_threads_sharing = 0;
253 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
254
255
240cd6a8
AK
256 if (assoc == 0xf)
257 eax->split.is_fully_associative = 1;
258 ebx->split.coherency_line_size = line_size - 1;
259 ebx->split.ways_of_associativity = assocs[assoc] - 1;
260 ebx->split.physical_line_partition = lines_per_tag - 1;
261 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
262 (ebx->split.ways_of_associativity + 1) - 1;
263}
1da177e4 264
7a4983bb
IM
265static void __cpuinit
266amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf)
8cb22bcb
ML
267{
268 if (index < 3)
269 return;
cdcf772e 270 this_leaf->can_disable = 1;
8cb22bcb
ML
271}
272
7a4983bb
IM
273static int
274__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
1da177e4 275{
240cd6a8
AK
276 union _cpuid4_leaf_eax eax;
277 union _cpuid4_leaf_ebx ebx;
278 union _cpuid4_leaf_ecx ecx;
279 unsigned edx;
1da177e4 280
8cb22bcb 281 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
240cd6a8 282 amd_cpuid4(index, &eax, &ebx, &ecx);
8cb22bcb
ML
283 if (boot_cpu_data.x86 >= 0x10)
284 amd_check_l3_disable(index, this_leaf);
7a4983bb
IM
285 } else {
286 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
287 }
288
240cd6a8 289 if (eax.split.type == CACHE_TYPE_NULL)
e2cac789 290 return -EIO; /* better error ? */
1da177e4 291
240cd6a8
AK
292 this_leaf->eax = eax;
293 this_leaf->ebx = ebx;
294 this_leaf->ecx = ecx;
7a4983bb
IM
295 this_leaf->size = (ecx.split.number_of_sets + 1) *
296 (ebx.split.coherency_line_size + 1) *
297 (ebx.split.physical_line_partition + 1) *
298 (ebx.split.ways_of_associativity + 1);
1da177e4
LT
299 return 0;
300}
301
61d488da 302static int __cpuinit find_num_cache_leaves(void)
1da177e4
LT
303{
304 unsigned int eax, ebx, ecx, edx;
305 union _cpuid4_leaf_eax cache_eax;
d16aafff 306 int i = -1;
1da177e4 307
d16aafff
SS
308 do {
309 ++i;
310 /* Do cpuid(4) loop to find out num_cache_leaves */
1da177e4
LT
311 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
312 cache_eax.full = eax;
d16aafff
SS
313 } while (cache_eax.split.type != CACHE_TYPE_NULL);
314 return i;
1da177e4
LT
315}
316
1aa1a9f9 317unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
1da177e4
LT
318{
319 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
320 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
321 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
1e9f28fa 322 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
96c52749 323#ifdef CONFIG_X86_HT
92cb7612 324 unsigned int cpu = c->cpu_index;
1e9f28fa 325#endif
1da177e4 326
f2d0d263 327 if (c->cpuid_level > 3) {
1da177e4
LT
328 static int is_initialized;
329
330 if (is_initialized == 0) {
331 /* Init num_cache_leaves from boot CPU */
332 num_cache_leaves = find_num_cache_leaves();
333 is_initialized++;
334 }
335
336 /*
337 * Whenever possible use cpuid(4), deterministic cache
338 * parameters cpuid leaf to find the cache details
339 */
340 for (i = 0; i < num_cache_leaves; i++) {
341 struct _cpuid4_info this_leaf;
342
343 int retval;
344
345 retval = cpuid4_cache_lookup(i, &this_leaf);
346 if (retval >= 0) {
347 switch(this_leaf.eax.split.level) {
348 case 1:
349 if (this_leaf.eax.split.type ==
350 CACHE_TYPE_DATA)
351 new_l1d = this_leaf.size/1024;
352 else if (this_leaf.eax.split.type ==
353 CACHE_TYPE_INST)
354 new_l1i = this_leaf.size/1024;
355 break;
356 case 2:
357 new_l2 = this_leaf.size/1024;
1e9f28fa
SS
358 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
359 index_msb = get_count_order(num_threads_sharing);
360 l2_id = c->apicid >> index_msb;
1da177e4
LT
361 break;
362 case 3:
363 new_l3 = this_leaf.size/1024;
1e9f28fa
SS
364 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
365 index_msb = get_count_order(num_threads_sharing);
366 l3_id = c->apicid >> index_msb;
1da177e4
LT
367 break;
368 default:
369 break;
370 }
371 }
372 }
373 }
b06be912
SL
374 /*
375 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
376 * trace cache
377 */
378 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
1da177e4 379 /* supports eax=2 call */
c1666e66
HH
380 int j, n;
381 unsigned int regs[4];
1da177e4 382 unsigned char *dp = (unsigned char *)regs;
b06be912
SL
383 int only_trace = 0;
384
385 if (num_cache_leaves != 0 && c->x86 == 15)
386 only_trace = 1;
1da177e4
LT
387
388 /* Number of times to iterate */
389 n = cpuid_eax(2) & 0xFF;
390
391 for ( i = 0 ; i < n ; i++ ) {
392 cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
393
394 /* If bit 31 is set, this is an unknown format */
395 for ( j = 0 ; j < 3 ; j++ ) {
c1666e66 396 if (regs[j] & (1 << 31)) regs[j] = 0;
1da177e4
LT
397 }
398
399 /* Byte 0 is level count, not a descriptor */
400 for ( j = 1 ; j < 16 ; j++ ) {
401 unsigned char des = dp[j];
402 unsigned char k = 0;
403
404 /* look up this descriptor in the table */
405 while (cache_table[k].descriptor != 0)
406 {
407 if (cache_table[k].descriptor == des) {
b06be912
SL
408 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
409 break;
1da177e4
LT
410 switch (cache_table[k].cache_type) {
411 case LVL_1_INST:
412 l1i += cache_table[k].size;
413 break;
414 case LVL_1_DATA:
415 l1d += cache_table[k].size;
416 break;
417 case LVL_2:
418 l2 += cache_table[k].size;
419 break;
420 case LVL_3:
421 l3 += cache_table[k].size;
422 break;
423 case LVL_TRACE:
424 trace += cache_table[k].size;
425 break;
426 }
427
428 break;
429 }
430
431 k++;
432 }
433 }
434 }
b06be912 435 }
1da177e4 436
b06be912
SL
437 if (new_l1d)
438 l1d = new_l1d;
1da177e4 439
b06be912
SL
440 if (new_l1i)
441 l1i = new_l1i;
1da177e4 442
b06be912
SL
443 if (new_l2) {
444 l2 = new_l2;
96c52749 445#ifdef CONFIG_X86_HT
b6278470 446 per_cpu(cpu_llc_id, cpu) = l2_id;
1e9f28fa 447#endif
b06be912 448 }
1da177e4 449
b06be912
SL
450 if (new_l3) {
451 l3 = new_l3;
96c52749 452#ifdef CONFIG_X86_HT
b6278470 453 per_cpu(cpu_llc_id, cpu) = l3_id;
1e9f28fa 454#endif
1da177e4
LT
455 }
456
b06be912
SL
457 if (trace)
458 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
459 else if ( l1i )
460 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
461
462 if (l1d)
463 printk(", L1 D cache: %dK\n", l1d);
464 else
465 printk("\n");
466
467 if (l2)
468 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
469
470 if (l3)
471 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
472
473 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
474
1da177e4
LT
475 return l2;
476}
477
478/* pointer to _cpuid4_info array (for each cache leaf) */
6b6309b4 479static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
cdcf772e 480#define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
1da177e4
LT
481
482#ifdef CONFIG_SMP
1aa1a9f9 483static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
1da177e4 484{
2b091875 485 struct _cpuid4_info *this_leaf, *sibling_leaf;
1da177e4 486 unsigned long num_threads_sharing;
2b091875 487 int index_msb, i;
92cb7612 488 struct cpuinfo_x86 *c = &cpu_data(cpu);
1da177e4
LT
489
490 this_leaf = CPUID4_INFO_IDX(cpu, index);
491 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
492
493 if (num_threads_sharing == 1)
494 cpu_set(cpu, this_leaf->shared_cpu_map);
2b091875
SS
495 else {
496 index_msb = get_count_order(num_threads_sharing);
497
498 for_each_online_cpu(i) {
92cb7612
MT
499 if (cpu_data(i).apicid >> index_msb ==
500 c->apicid >> index_msb) {
2b091875 501 cpu_set(i, this_leaf->shared_cpu_map);
6b6309b4 502 if (i != cpu && per_cpu(cpuid4_info, i)) {
2b091875
SS
503 sibling_leaf = CPUID4_INFO_IDX(i, index);
504 cpu_set(cpu, sibling_leaf->shared_cpu_map);
505 }
506 }
507 }
508 }
509}
3bc9b76b 510static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
2b091875
SS
511{
512 struct _cpuid4_info *this_leaf, *sibling_leaf;
513 int sibling;
514
515 this_leaf = CPUID4_INFO_IDX(cpu, index);
334ef7a7 516 for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) {
cdcf772e 517 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
2b091875
SS
518 cpu_clear(cpu, sibling_leaf->shared_cpu_map);
519 }
1da177e4
LT
520}
521#else
7b384935
SS
522static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
523static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
1da177e4
LT
524#endif
525
f22d9bc1 526static void __cpuinit free_cache_attributes(unsigned int cpu)
1da177e4 527{
ef1d7151
AM
528 int i;
529
530 for (i = 0; i < num_cache_leaves; i++)
531 cache_remove_shared_cpu_map(cpu, i);
532
6b6309b4
MT
533 kfree(per_cpu(cpuid4_info, cpu));
534 per_cpu(cpuid4_info, cpu) = NULL;
1da177e4
LT
535}
536
1aa1a9f9 537static int __cpuinit detect_cache_attributes(unsigned int cpu)
1da177e4
LT
538{
539 struct _cpuid4_info *this_leaf;
ef1d7151
AM
540 unsigned long j;
541 int retval;
e2cac789 542 cpumask_t oldmask;
1da177e4
LT
543
544 if (num_cache_leaves == 0)
545 return -ENOENT;
546
6b6309b4 547 per_cpu(cpuid4_info, cpu) = kzalloc(
1da177e4 548 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
6b6309b4 549 if (per_cpu(cpuid4_info, cpu) == NULL)
1da177e4 550 return -ENOMEM;
1da177e4 551
e2cac789 552 oldmask = current->cpus_allowed;
0bc3cc03 553 retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
e2cac789
AK
554 if (retval)
555 goto out;
556
1da177e4
LT
557 /* Do cpuid and store the results */
558 for (j = 0; j < num_cache_leaves; j++) {
559 this_leaf = CPUID4_INFO_IDX(cpu, j);
560 retval = cpuid4_cache_lookup(j, this_leaf);
ef1d7151
AM
561 if (unlikely(retval < 0)) {
562 int i;
563
564 for (i = 0; i < j; i++)
565 cache_remove_shared_cpu_map(cpu, i);
e2cac789 566 break;
ef1d7151 567 }
1da177e4
LT
568 cache_shared_cpu_map_setup(cpu, j);
569 }
fc0e4748 570 set_cpus_allowed_ptr(current, &oldmask);
1da177e4 571
e2cac789 572out:
ef1d7151 573 if (retval) {
6b6309b4
MT
574 kfree(per_cpu(cpuid4_info, cpu));
575 per_cpu(cpuid4_info, cpu) = NULL;
ef1d7151
AM
576 }
577
e2cac789 578 return retval;
1da177e4
LT
579}
580
581#ifdef CONFIG_SYSFS
582
583#include <linux/kobject.h>
584#include <linux/sysfs.h>
585
586extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
587
588/* pointer to kobject for cpuX/cache */
6b6309b4 589static DEFINE_PER_CPU(struct kobject *, cache_kobject);
1da177e4
LT
590
591struct _index_kobject {
592 struct kobject kobj;
593 unsigned int cpu;
594 unsigned short index;
595};
596
597/* pointer to array of kobjects for cpuX/cache/indexY */
6b6309b4 598static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
cdcf772e 599#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
1da177e4
LT
600
601#define show_one_plus(file_name, object, val) \
602static ssize_t show_##file_name \
603 (struct _cpuid4_info *this_leaf, char *buf) \
604{ \
605 return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
606}
607
608show_one_plus(level, eax.split.level, 0);
609show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
610show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
611show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
612show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
613
614static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
615{
616 return sprintf (buf, "%luK\n", this_leaf->size / 1024);
617}
618
fb0f330e
MT
619static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
620 int type, char *buf)
1da177e4 621{
fb0f330e 622 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
6b6309b4 623 int n = 0;
6b6309b4 624
fb0f330e
MT
625 if (len > 1) {
626 cpumask_t *mask = &this_leaf->shared_cpu_map;
627
628 n = type?
29c0177e
RR
629 cpulist_scnprintf(buf, len-2, mask) :
630 cpumask_scnprintf(buf, len-2, mask);
fb0f330e
MT
631 buf[n++] = '\n';
632 buf[n] = '\0';
6b6309b4
MT
633 }
634 return n;
1da177e4
LT
635}
636
fb0f330e
MT
637static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf)
638{
639 return show_shared_cpu_map_func(leaf, 0, buf);
640}
641
642static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
643{
644 return show_shared_cpu_map_func(leaf, 1, buf);
645}
646
1da177e4
LT
647static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) {
648 switch(this_leaf->eax.split.type) {
649 case CACHE_TYPE_DATA:
650 return sprintf(buf, "Data\n");
651 break;
652 case CACHE_TYPE_INST:
653 return sprintf(buf, "Instruction\n");
654 break;
655 case CACHE_TYPE_UNIFIED:
656 return sprintf(buf, "Unified\n");
657 break;
658 default:
659 return sprintf(buf, "Unknown\n");
660 break;
661 }
662}
663
7a4983bb
IM
664#define to_object(k) container_of(k, struct _index_kobject, kobj)
665#define to_attr(a) container_of(a, struct _cache_attr, attr)
8cb22bcb 666
239bd831 667#ifdef CONFIG_PCI
a24e8d36
ML
668static struct pci_dev *get_k8_northbridge(int node)
669{
670 struct pci_dev *dev = NULL;
671 int i;
672
673 for (i = 0; i <= node; i++) {
674 do {
675 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
676 if (!dev)
677 break;
678 } while (!pci_match_id(&k8_nb_id[0], dev));
679 if (!dev)
680 break;
681 }
cdcf772e 682 return dev;
a24e8d36 683}
239bd831
IM
684#else
685static struct pci_dev *get_k8_northbridge(int node)
686{
687 return NULL;
688}
689#endif
a24e8d36 690
8cb22bcb
ML
691static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf)
692{
7a4983bb 693 int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
a24e8d36 694 struct pci_dev *dev = NULL;
7a4983bb
IM
695 ssize_t ret = 0;
696 int i;
8cb22bcb 697
7a4983bb
IM
698 if (!this_leaf->can_disable)
699 return sprintf(buf, "Feature not enabled\n");
700
a24e8d36
ML
701 dev = get_k8_northbridge(node);
702 if (!dev) {
703 printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n");
704 return -EINVAL;
705 }
706
7a4983bb
IM
707 for (i = 0; i < 2; i++) {
708 unsigned int reg;
709
710 pci_read_config_dword(dev, 0x1BC + i * 4, &reg);
711
712 ret += sprintf(buf, "%sEntry: %d\n", buf, i);
713 ret += sprintf(buf, "%sReads: %s\tNew Entries: %s\n",
714 buf,
715 reg & 0x80000000 ? "Disabled" : "Allowed",
716 reg & 0x40000000 ? "Disabled" : "Allowed");
717 ret += sprintf(buf, "%sSubCache: %x\tIndex: %x\n",
718 buf, (reg & 0x30000) >> 16, reg & 0xfff);
8cb22bcb 719 }
7a4983bb 720 return ret;
8cb22bcb
ML
721}
722
7a4983bb
IM
723static ssize_t
724store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf,
725 size_t count)
8cb22bcb 726{
7a4983bb 727 int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
a24e8d36 728 struct pci_dev *dev = NULL;
7a4983bb
IM
729 unsigned int ret, index, val;
730
731 if (!this_leaf->can_disable)
732 return 0;
733
7a4983bb
IM
734 if (strlen(buf) > 15)
735 return -EINVAL;
736
737 ret = sscanf(buf, "%x %x", &index, &val);
738 if (ret != 2)
739 return -EINVAL;
740 if (index > 1)
741 return -EINVAL;
742
743 val |= 0xc0000000;
a24e8d36
ML
744 dev = get_k8_northbridge(node);
745 if (!dev) {
746 printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n");
747 return -EINVAL;
748 }
cdcf772e 749
7a4983bb
IM
750 pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000);
751 wbinvd();
752 pci_write_config_dword(dev, 0x1BC + index * 4, val);
753
754 return 1;
8cb22bcb
ML
755}
756
1da177e4
LT
757struct _cache_attr {
758 struct attribute attr;
759 ssize_t (*show)(struct _cpuid4_info *, char *);
760 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
761};
762
763#define define_one_ro(_name) \
764static struct _cache_attr _name = \
765 __ATTR(_name, 0444, show_##_name, NULL)
766
767define_one_ro(level);
768define_one_ro(type);
769define_one_ro(coherency_line_size);
770define_one_ro(physical_line_partition);
771define_one_ro(ways_of_associativity);
772define_one_ro(number_of_sets);
773define_one_ro(size);
774define_one_ro(shared_cpu_map);
fb0f330e 775define_one_ro(shared_cpu_list);
1da177e4 776
8cb22bcb
ML
777static struct _cache_attr cache_disable = __ATTR(cache_disable, 0644, show_cache_disable, store_cache_disable);
778
1da177e4
LT
779static struct attribute * default_attrs[] = {
780 &type.attr,
781 &level.attr,
782 &coherency_line_size.attr,
783 &physical_line_partition.attr,
784 &ways_of_associativity.attr,
785 &number_of_sets.attr,
786 &size.attr,
787 &shared_cpu_map.attr,
fb0f330e 788 &shared_cpu_list.attr,
8cb22bcb 789 &cache_disable.attr,
1da177e4
LT
790 NULL
791};
792
1da177e4
LT
793static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
794{
795 struct _cache_attr *fattr = to_attr(attr);
796 struct _index_kobject *this_leaf = to_object(kobj);
797 ssize_t ret;
798
799 ret = fattr->show ?
800 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
801 buf) :
cdcf772e 802 0;
1da177e4
LT
803 return ret;
804}
805
806static ssize_t store(struct kobject * kobj, struct attribute * attr,
807 const char * buf, size_t count)
808{
8cb22bcb
ML
809 struct _cache_attr *fattr = to_attr(attr);
810 struct _index_kobject *this_leaf = to_object(kobj);
811 ssize_t ret;
812
cdcf772e
IM
813 ret = fattr->store ?
814 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
815 buf, count) :
8cb22bcb
ML
816 0;
817 return ret;
1da177e4
LT
818}
819
820static struct sysfs_ops sysfs_ops = {
821 .show = show,
822 .store = store,
823};
824
825static struct kobj_type ktype_cache = {
826 .sysfs_ops = &sysfs_ops,
827 .default_attrs = default_attrs,
828};
829
830static struct kobj_type ktype_percpu_entry = {
831 .sysfs_ops = &sysfs_ops,
832};
833
ef1d7151 834static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
1da177e4 835{
6b6309b4
MT
836 kfree(per_cpu(cache_kobject, cpu));
837 kfree(per_cpu(index_kobject, cpu));
838 per_cpu(cache_kobject, cpu) = NULL;
839 per_cpu(index_kobject, cpu) = NULL;
1da177e4
LT
840 free_cache_attributes(cpu);
841}
842
1aa1a9f9 843static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
1da177e4 844{
ef1d7151 845 int err;
1da177e4
LT
846
847 if (num_cache_leaves == 0)
848 return -ENOENT;
849
ef1d7151
AM
850 err = detect_cache_attributes(cpu);
851 if (err)
852 return err;
1da177e4
LT
853
854 /* Allocate all required memory */
6b6309b4
MT
855 per_cpu(cache_kobject, cpu) =
856 kzalloc(sizeof(struct kobject), GFP_KERNEL);
857 if (unlikely(per_cpu(cache_kobject, cpu) == NULL))
1da177e4 858 goto err_out;
1da177e4 859
6b6309b4 860 per_cpu(index_kobject, cpu) = kzalloc(
1da177e4 861 sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
6b6309b4 862 if (unlikely(per_cpu(index_kobject, cpu) == NULL))
1da177e4 863 goto err_out;
1da177e4
LT
864
865 return 0;
866
867err_out:
868 cpuid4_cache_sysfs_exit(cpu);
869 return -ENOMEM;
870}
871
ef1d7151
AM
872static cpumask_t cache_dev_map = CPU_MASK_NONE;
873
1da177e4 874/* Add/Remove cache interface for CPU device */
1aa1a9f9 875static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
1da177e4
LT
876{
877 unsigned int cpu = sys_dev->id;
878 unsigned long i, j;
879 struct _index_kobject *this_object;
ef1d7151 880 int retval;
1da177e4
LT
881
882 retval = cpuid4_cache_sysfs_init(cpu);
883 if (unlikely(retval < 0))
884 return retval;
885
6b6309b4
MT
886 retval = kobject_init_and_add(per_cpu(cache_kobject, cpu),
887 &ktype_percpu_entry,
5b3f355d 888 &sys_dev->kobj, "%s", "cache");
ef1d7151
AM
889 if (retval < 0) {
890 cpuid4_cache_sysfs_exit(cpu);
891 return retval;
892 }
1da177e4
LT
893
894 for (i = 0; i < num_cache_leaves; i++) {
895 this_object = INDEX_KOBJECT_PTR(cpu,i);
896 this_object->cpu = cpu;
897 this_object->index = i;
5b3f355d 898 retval = kobject_init_and_add(&(this_object->kobj),
6b6309b4
MT
899 &ktype_cache,
900 per_cpu(cache_kobject, cpu),
5b3f355d 901 "index%1lu", i);
1da177e4
LT
902 if (unlikely(retval)) {
903 for (j = 0; j < i; j++) {
38a382ae 904 kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj));
1da177e4 905 }
6b6309b4 906 kobject_put(per_cpu(cache_kobject, cpu));
1da177e4 907 cpuid4_cache_sysfs_exit(cpu);
8b2b9c1a 908 return retval;
1da177e4 909 }
5b3f355d 910 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
1da177e4 911 }
8b2b9c1a 912 cpu_set(cpu, cache_dev_map);
ef1d7151 913
6b6309b4 914 kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
8b2b9c1a 915 return 0;
1da177e4
LT
916}
917
114ab8e9 918static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
1da177e4
LT
919{
920 unsigned int cpu = sys_dev->id;
921 unsigned long i;
922
6b6309b4 923 if (per_cpu(cpuid4_info, cpu) == NULL)
2966c6a0 924 return;
ef1d7151
AM
925 if (!cpu_isset(cpu, cache_dev_map))
926 return;
927 cpu_clear(cpu, cache_dev_map);
928
929 for (i = 0; i < num_cache_leaves; i++)
38a382ae 930 kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
6b6309b4 931 kobject_put(per_cpu(cache_kobject, cpu));
1da177e4 932 cpuid4_cache_sysfs_exit(cpu);
1aa1a9f9
AR
933}
934
9c7b216d 935static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
1aa1a9f9
AR
936 unsigned long action, void *hcpu)
937{
938 unsigned int cpu = (unsigned long)hcpu;
939 struct sys_device *sys_dev;
940
941 sys_dev = get_cpu_sysdev(cpu);
942 switch (action) {
943 case CPU_ONLINE:
8bb78442 944 case CPU_ONLINE_FROZEN:
1aa1a9f9
AR
945 cache_add_dev(sys_dev);
946 break;
947 case CPU_DEAD:
8bb78442 948 case CPU_DEAD_FROZEN:
1aa1a9f9
AR
949 cache_remove_dev(sys_dev);
950 break;
951 }
952 return NOTIFY_OK;
1da177e4
LT
953}
954
74b85f37 955static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
1aa1a9f9 956{
ef1d7151 957 .notifier_call = cacheinfo_cpu_callback,
1da177e4
LT
958};
959
1aa1a9f9 960static int __cpuinit cache_sysfs_init(void)
1da177e4 961{
1aa1a9f9
AR
962 int i;
963
1da177e4
LT
964 if (num_cache_leaves == 0)
965 return 0;
966
1aa1a9f9 967 for_each_online_cpu(i) {
ef1d7151
AM
968 int err;
969 struct sys_device *sys_dev = get_cpu_sysdev(i);
c789c037 970
ef1d7151
AM
971 err = cache_add_dev(sys_dev);
972 if (err)
973 return err;
1aa1a9f9 974 }
ef1d7151 975 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
1aa1a9f9 976 return 0;
1da177e4
LT
977}
978
1aa1a9f9 979device_initcall(cache_sysfs_init);
1da177e4
LT
980
981#endif