[PATCH] cpu hotplug: make cpu_notifier related notifier blocks __cpuinit only
[linux-2.6-block.git] / arch / ia64 / kernel / palinfo.c
CommitLineData
1da177e4
LT
1/*
2 * palinfo.c
3 *
4 * Prints processor specific information reported by PAL.
5 * This code is based on specification of PAL as of the
6 * Intel IA-64 Architecture Software Developer's Manual v1.0.
7 *
8 *
9 * Copyright (C) 2000-2001, 2003 Hewlett-Packard Co
10 * Stephane Eranian <eranian@hpl.hp.com>
11 * Copyright (C) 2004 Intel Corporation
12 * Ashok Raj <ashok.raj@intel.com>
13 *
14 * 05/26/2000 S.Eranian initial release
15 * 08/21/2000 S.Eranian updated to July 2000 PAL specs
16 * 02/05/2001 S.Eranian fixed module support
17 * 10/23/2001 S.Eranian updated pal_perf_mon_info bug fixes
18 * 03/24/2004 Ashok Raj updated to work with CPU Hotplug
19 */
20#include <linux/config.h>
21#include <linux/types.h>
22#include <linux/errno.h>
23#include <linux/init.h>
24#include <linux/proc_fs.h>
25#include <linux/mm.h>
26#include <linux/module.h>
27#include <linux/efi.h>
28#include <linux/notifier.h>
29#include <linux/cpu.h>
30#include <linux/cpumask.h>
31
32#include <asm/pal.h>
33#include <asm/sal.h>
34#include <asm/page.h>
35#include <asm/processor.h>
36#include <linux/smp.h>
37
38MODULE_AUTHOR("Stephane Eranian <eranian@hpl.hp.com>");
39MODULE_DESCRIPTION("/proc interface to IA-64 PAL");
40MODULE_LICENSE("GPL");
41
42#define PALINFO_VERSION "0.5"
43
44typedef int (*palinfo_func_t)(char*);
45
46typedef struct {
47 const char *name; /* name of the proc entry */
48 palinfo_func_t proc_read; /* function to call for reading */
49 struct proc_dir_entry *entry; /* registered entry (removal) */
50} palinfo_entry_t;
51
52
53/*
54 * A bunch of string array to get pretty printing
55 */
56
57static char *cache_types[] = {
58 "", /* not used */
59 "Instruction",
60 "Data",
61 "Data/Instruction" /* unified */
62};
63
64static const char *cache_mattrib[]={
65 "WriteThrough",
66 "WriteBack",
67 "", /* reserved */
68 "" /* reserved */
69};
70
71static const char *cache_st_hints[]={
72 "Temporal, level 1",
73 "Reserved",
74 "Reserved",
75 "Non-temporal, all levels",
76 "Reserved",
77 "Reserved",
78 "Reserved",
79 "Reserved"
80};
81
82static const char *cache_ld_hints[]={
83 "Temporal, level 1",
84 "Non-temporal, level 1",
85 "Reserved",
86 "Non-temporal, all levels",
87 "Reserved",
88 "Reserved",
89 "Reserved",
90 "Reserved"
91};
92
93static const char *rse_hints[]={
94 "enforced lazy",
95 "eager stores",
96 "eager loads",
97 "eager loads and stores"
98};
99
100#define RSE_HINTS_COUNT ARRAY_SIZE(rse_hints)
101
102static const char *mem_attrib[]={
103 "WB", /* 000 */
104 "SW", /* 001 */
105 "010", /* 010 */
106 "011", /* 011 */
107 "UC", /* 100 */
108 "UCE", /* 101 */
109 "WC", /* 110 */
110 "NaTPage" /* 111 */
111};
112
113/*
114 * Take a 64bit vector and produces a string such that
115 * if bit n is set then 2^n in clear text is generated. The adjustment
116 * to the right unit is also done.
117 *
118 * Input:
119 * - a pointer to a buffer to hold the string
120 * - a 64-bit vector
121 * Ouput:
122 * - a pointer to the end of the buffer
123 *
124 */
125static char *
126bitvector_process(char *p, u64 vector)
127{
128 int i,j;
129 const char *units[]={ "", "K", "M", "G", "T" };
130
131 for (i=0, j=0; i < 64; i++ , j=i/10) {
132 if (vector & 0x1) {
133 p += sprintf(p, "%d%s ", 1 << (i-j*10), units[j]);
134 }
135 vector >>= 1;
136 }
137 return p;
138}
139
140/*
141 * Take a 64bit vector and produces a string such that
142 * if bit n is set then register n is present. The function
143 * takes into account consecutive registers and prints out ranges.
144 *
145 * Input:
146 * - a pointer to a buffer to hold the string
147 * - a 64-bit vector
148 * Ouput:
149 * - a pointer to the end of the buffer
150 *
151 */
152static char *
153bitregister_process(char *p, u64 *reg_info, int max)
154{
155 int i, begin, skip = 0;
156 u64 value = reg_info[0];
157
158 value >>= i = begin = ffs(value) - 1;
159
160 for(; i < max; i++ ) {
161
162 if (i != 0 && (i%64) == 0) value = *++reg_info;
163
164 if ((value & 0x1) == 0 && skip == 0) {
165 if (begin <= i - 2)
166 p += sprintf(p, "%d-%d ", begin, i-1);
167 else
168 p += sprintf(p, "%d ", i-1);
169 skip = 1;
170 begin = -1;
171 } else if ((value & 0x1) && skip == 1) {
172 skip = 0;
173 begin = i;
174 }
175 value >>=1;
176 }
177 if (begin > -1) {
178 if (begin < 127)
179 p += sprintf(p, "%d-127", begin);
180 else
181 p += sprintf(p, "127");
182 }
183
184 return p;
185}
186
187static int
188power_info(char *page)
189{
190 s64 status;
191 char *p = page;
192 u64 halt_info_buffer[8];
193 pal_power_mgmt_info_u_t *halt_info =(pal_power_mgmt_info_u_t *)halt_info_buffer;
194 int i;
195
196 status = ia64_pal_halt_info(halt_info);
197 if (status != 0) return 0;
198
199 for (i=0; i < 8 ; i++ ) {
200 if (halt_info[i].pal_power_mgmt_info_s.im == 1) {
201 p += sprintf(p, "Power level %d:\n"
202 "\tentry_latency : %d cycles\n"
203 "\texit_latency : %d cycles\n"
204 "\tpower consumption : %d mW\n"
205 "\tCache+TLB coherency : %s\n", i,
206 halt_info[i].pal_power_mgmt_info_s.entry_latency,
207 halt_info[i].pal_power_mgmt_info_s.exit_latency,
208 halt_info[i].pal_power_mgmt_info_s.power_consumption,
209 halt_info[i].pal_power_mgmt_info_s.co ? "Yes" : "No");
210 } else {
211 p += sprintf(p,"Power level %d: not implemented\n",i);
212 }
213 }
214 return p - page;
215}
216
217static int
218cache_info(char *page)
219{
220 char *p = page;
221 u64 i, levels, unique_caches;
222 pal_cache_config_info_t cci;
223 int j, k;
224 s64 status;
225
226 if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
227 printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status);
228 return 0;
229 }
230
231 p += sprintf(p, "Cache levels : %ld\nUnique caches : %ld\n\n", levels, unique_caches);
232
233 for (i=0; i < levels; i++) {
234
235 for (j=2; j >0 ; j--) {
236
237 /* even without unification some level may not be present */
238 if ((status=ia64_pal_cache_config_info(i,j, &cci)) != 0) {
239 continue;
240 }
241 p += sprintf(p,
242 "%s Cache level %lu:\n"
2ab9391d 243 "\tSize : %u bytes\n"
1da177e4
LT
244 "\tAttributes : ",
245 cache_types[j+cci.pcci_unified], i+1,
246 cci.pcci_cache_size);
247
248 if (cci.pcci_unified) p += sprintf(p, "Unified ");
249
250 p += sprintf(p, "%s\n", cache_mattrib[cci.pcci_cache_attr]);
251
252 p += sprintf(p,
253 "\tAssociativity : %d\n"
254 "\tLine size : %d bytes\n"
255 "\tStride : %d bytes\n",
256 cci.pcci_assoc, 1<<cci.pcci_line_size, 1<<cci.pcci_stride);
257 if (j == 1)
258 p += sprintf(p, "\tStore latency : N/A\n");
259 else
260 p += sprintf(p, "\tStore latency : %d cycle(s)\n",
261 cci.pcci_st_latency);
262
263 p += sprintf(p,
264 "\tLoad latency : %d cycle(s)\n"
265 "\tStore hints : ", cci.pcci_ld_latency);
266
267 for(k=0; k < 8; k++ ) {
268 if ( cci.pcci_st_hints & 0x1)
269 p += sprintf(p, "[%s]", cache_st_hints[k]);
270 cci.pcci_st_hints >>=1;
271 }
272 p += sprintf(p, "\n\tLoad hints : ");
273
274 for(k=0; k < 8; k++ ) {
275 if (cci.pcci_ld_hints & 0x1)
276 p += sprintf(p, "[%s]", cache_ld_hints[k]);
277 cci.pcci_ld_hints >>=1;
278 }
279 p += sprintf(p,
280 "\n\tAlias boundary : %d byte(s)\n"
281 "\tTag LSB : %d\n"
282 "\tTag MSB : %d\n",
283 1<<cci.pcci_alias_boundary, cci.pcci_tag_lsb,
284 cci.pcci_tag_msb);
285
286 /* when unified, data(j=2) is enough */
287 if (cci.pcci_unified) break;
288 }
289 }
290 return p - page;
291}
292
293
294static int
295vm_info(char *page)
296{
297 char *p = page;
298 u64 tr_pages =0, vw_pages=0, tc_pages;
299 u64 attrib;
300 pal_vm_info_1_u_t vm_info_1;
301 pal_vm_info_2_u_t vm_info_2;
302 pal_tc_info_u_t tc_info;
303 ia64_ptce_info_t ptce;
304 const char *sep;
305 int i, j;
306 s64 status;
307
308 if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
309 printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
714d2dc1 310 } else {
1da177e4 311
714d2dc1 312 p += sprintf(p,
1da177e4
LT
313 "Physical Address Space : %d bits\n"
314 "Virtual Address Space : %d bits\n"
315 "Protection Key Registers(PKR) : %d\n"
316 "Implemented bits in PKR.key : %d\n"
317 "Hash Tag ID : 0x%x\n"
318 "Size of RR.rid : %d\n",
319 vm_info_1.pal_vm_info_1_s.phys_add_size,
714d2dc1
PC
320 vm_info_2.pal_vm_info_2_s.impl_va_msb+1,
321 vm_info_1.pal_vm_info_1_s.max_pkr+1,
322 vm_info_1.pal_vm_info_1_s.key_size,
323 vm_info_1.pal_vm_info_1_s.hash_tag_id,
1da177e4 324 vm_info_2.pal_vm_info_2_s.rid_size);
714d2dc1 325 }
1da177e4 326
714d2dc1
PC
327 if (ia64_pal_mem_attrib(&attrib) == 0) {
328 p += sprintf(p, "Supported memory attributes : ");
329 sep = "";
330 for (i = 0; i < 8; i++) {
331 if (attrib & (1 << i)) {
332 p += sprintf(p, "%s%s", sep, mem_attrib[i]);
333 sep = ", ";
334 }
1da177e4 335 }
714d2dc1 336 p += sprintf(p, "\n");
1da177e4 337 }
1da177e4
LT
338
339 if ((status = ia64_pal_vm_page_size(&tr_pages, &vw_pages)) !=0) {
340 printk(KERN_ERR "ia64_pal_vm_page_size=%ld\n", status);
714d2dc1 341 } else {
1da177e4 342
714d2dc1
PC
343 p += sprintf(p,
344 "\nTLB walker : %simplemented\n"
345 "Number of DTR : %d\n"
346 "Number of ITR : %d\n"
347 "TLB insertable page sizes : ",
348 vm_info_1.pal_vm_info_1_s.vw ? "" : "not ",
349 vm_info_1.pal_vm_info_1_s.max_dtr_entry+1,
350 vm_info_1.pal_vm_info_1_s.max_itr_entry+1);
1da177e4 351
1da177e4 352
714d2dc1 353 p = bitvector_process(p, tr_pages);
1da177e4 354
714d2dc1 355 p += sprintf(p, "\nTLB purgeable page sizes : ");
1da177e4 356
714d2dc1
PC
357 p = bitvector_process(p, vw_pages);
358 }
1da177e4
LT
359 if ((status=ia64_get_ptce(&ptce)) != 0) {
360 printk(KERN_ERR "ia64_get_ptce=%ld\n", status);
714d2dc1
PC
361 } else {
362 p += sprintf(p,
1da177e4
LT
363 "\nPurge base address : 0x%016lx\n"
364 "Purge outer loop count : %d\n"
365 "Purge inner loop count : %d\n"
366 "Purge outer loop stride : %d\n"
367 "Purge inner loop stride : %d\n",
714d2dc1
PC
368 ptce.base, ptce.count[0], ptce.count[1],
369 ptce.stride[0], ptce.stride[1]);
1da177e4 370
714d2dc1 371 p += sprintf(p,
1da177e4
LT
372 "TC Levels : %d\n"
373 "Unique TC(s) : %d\n",
374 vm_info_1.pal_vm_info_1_s.num_tc_levels,
375 vm_info_1.pal_vm_info_1_s.max_unique_tcs);
376
714d2dc1
PC
377 for(i=0; i < vm_info_1.pal_vm_info_1_s.num_tc_levels; i++) {
378 for (j=2; j>0 ; j--) {
379 tc_pages = 0; /* just in case */
1da177e4
LT
380
381
714d2dc1
PC
382 /* even without unification, some levels may not be present */
383 if ((status=ia64_pal_vm_info(i,j, &tc_info, &tc_pages)) != 0) {
384 continue;
385 }
1da177e4 386
714d2dc1 387 p += sprintf(p,
1da177e4
LT
388 "\n%s Translation Cache Level %d:\n"
389 "\tHash sets : %d\n"
390 "\tAssociativity : %d\n"
391 "\tNumber of entries : %d\n"
392 "\tFlags : ",
714d2dc1
PC
393 cache_types[j+tc_info.tc_unified], i+1,
394 tc_info.tc_num_sets,
395 tc_info.tc_associativity,
396 tc_info.tc_num_entries);
1da177e4 397
714d2dc1
PC
398 if (tc_info.tc_pf)
399 p += sprintf(p, "PreferredPageSizeOptimized ");
400 if (tc_info.tc_unified)
401 p += sprintf(p, "Unified ");
402 if (tc_info.tc_reduce_tr)
403 p += sprintf(p, "TCReduction");
1da177e4 404
714d2dc1 405 p += sprintf(p, "\n\tSupported page sizes: ");
1da177e4 406
714d2dc1 407 p = bitvector_process(p, tc_pages);
1da177e4 408
714d2dc1
PC
409 /* when unified date (j=2) is enough */
410 if (tc_info.tc_unified)
411 break;
412 }
1da177e4
LT
413 }
414 }
415 p += sprintf(p, "\n");
416
417 return p - page;
418}
419
420
421static int
422register_info(char *page)
423{
424 char *p = page;
425 u64 reg_info[2];
426 u64 info;
427 u64 phys_stacked;
428 pal_hints_u_t hints;
429 u64 iregs, dregs;
430 char *info_type[]={
431 "Implemented AR(s)",
432 "AR(s) with read side-effects",
433 "Implemented CR(s)",
434 "CR(s) with read side-effects",
435 };
436
437 for(info=0; info < 4; info++) {
438
439 if (ia64_pal_register_info(info, &reg_info[0], &reg_info[1]) != 0) return 0;
440
441 p += sprintf(p, "%-32s : ", info_type[info]);
442
443 p = bitregister_process(p, reg_info, 128);
444
445 p += sprintf(p, "\n");
446 }
447
714d2dc1 448 if (ia64_pal_rse_info(&phys_stacked, &hints) == 0) {
1da177e4
LT
449
450 p += sprintf(p,
451 "RSE stacked physical registers : %ld\n"
452 "RSE load/store hints : %ld (%s)\n",
453 phys_stacked, hints.ph_data,
454 hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(??)");
714d2dc1 455 }
1da177e4
LT
456 if (ia64_pal_debug_info(&iregs, &dregs))
457 return 0;
458
459 p += sprintf(p,
460 "Instruction debug register pairs : %ld\n"
461 "Data debug register pairs : %ld\n", iregs, dregs);
462
463 return p - page;
464}
465
466static const char *proc_features[]={
467 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
468 NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,
469 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
470 NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,
471 NULL,NULL,NULL,NULL,NULL,
472 "XIP,XPSR,XFS implemented",
473 "XR1-XR3 implemented",
474 "Disable dynamic predicate prediction",
475 "Disable processor physical number",
476 "Disable dynamic data cache prefetch",
477 "Disable dynamic inst cache prefetch",
478 "Disable dynamic branch prediction",
479 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
480 "Disable BINIT on processor time-out",
481 "Disable dynamic power management (DPM)",
482 "Disable coherency",
483 "Disable cache",
484 "Enable CMCI promotion",
485 "Enable MCA to BINIT promotion",
486 "Enable MCA promotion",
487 "Enable BERR promotion"
488};
489
490
491static int
492processor_info(char *page)
493{
494 char *p = page;
495 const char **v = proc_features;
496 u64 avail=1, status=1, control=1;
497 int i;
498 s64 ret;
499
500 if ((ret=ia64_pal_proc_get_features(&avail, &status, &control)) != 0) return 0;
501
502 for(i=0; i < 64; i++, v++,avail >>=1, status >>=1, control >>=1) {
503 if ( ! *v ) continue;
504 p += sprintf(p, "%-40s : %s%s %s\n", *v,
505 avail & 0x1 ? "" : "NotImpl",
506 avail & 0x1 ? (status & 0x1 ? "On" : "Off"): "",
507 avail & 0x1 ? (control & 0x1 ? "Ctrl" : "NoCtrl"): "");
508 }
509 return p - page;
510}
511
512static const char *bus_features[]={
513 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
514 NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,
515 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
516 NULL,NULL,
517 "Request Bus Parking",
518 "Bus Lock Mask",
519 "Enable Half Transfer",
520 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
521 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
522 NULL, NULL, NULL, NULL,
523 "Enable Cache Line Repl. Shared",
524 "Enable Cache Line Repl. Exclusive",
525 "Disable Transaction Queuing",
526 "Disable Response Error Checking",
527 "Disable Bus Error Checking",
528 "Disable Bus Requester Internal Error Signalling",
529 "Disable Bus Requester Error Signalling",
530 "Disable Bus Initialization Event Checking",
531 "Disable Bus Initialization Event Signalling",
532 "Disable Bus Address Error Checking",
533 "Disable Bus Address Error Signalling",
534 "Disable Bus Data Error Checking"
535};
536
537
538static int
539bus_info(char *page)
540{
541 char *p = page;
542 const char **v = bus_features;
543 pal_bus_features_u_t av, st, ct;
544 u64 avail, status, control;
545 int i;
546 s64 ret;
547
548 if ((ret=ia64_pal_bus_get_features(&av, &st, &ct)) != 0) return 0;
549
550 avail = av.pal_bus_features_val;
551 status = st.pal_bus_features_val;
552 control = ct.pal_bus_features_val;
553
554 for(i=0; i < 64; i++, v++, avail >>=1, status >>=1, control >>=1) {
555 if ( ! *v ) continue;
556 p += sprintf(p, "%-48s : %s%s %s\n", *v,
557 avail & 0x1 ? "" : "NotImpl",
558 avail & 0x1 ? (status & 0x1 ? "On" : "Off"): "",
559 avail & 0x1 ? (control & 0x1 ? "Ctrl" : "NoCtrl"): "");
560 }
561 return p - page;
562}
563
564static int
565version_info(char *page)
566{
567 pal_version_u_t min_ver, cur_ver;
568 char *p = page;
569
570 /* The PAL_VERSION call is advertised as being able to support
571 * both physical and virtual mode calls. This seems to be a documentation
572 * bug rather than firmware bug. In fact, it does only support physical mode.
573 * So now the code reflects this fact and the pal_version() has been updated
574 * accordingly.
575 */
576 if (ia64_pal_version(&min_ver, &cur_ver) != 0) return 0;
577
578 p += sprintf(p,
579 "PAL_vendor : 0x%02x (min=0x%02x)\n"
580 "PAL_A : %x.%x.%x (min=%x.%x.%x)\n"
581 "PAL_B : %x.%x.%x (min=%x.%x.%x)\n",
582 cur_ver.pal_version_s.pv_pal_vendor, min_ver.pal_version_s.pv_pal_vendor,
583
584 cur_ver.pal_version_s.pv_pal_a_model>>4,
585 cur_ver.pal_version_s.pv_pal_a_model&0xf, cur_ver.pal_version_s.pv_pal_a_rev,
586 min_ver.pal_version_s.pv_pal_a_model>>4,
587 min_ver.pal_version_s.pv_pal_a_model&0xf, min_ver.pal_version_s.pv_pal_a_rev,
588
589 cur_ver.pal_version_s.pv_pal_b_model>>4,
590 cur_ver.pal_version_s.pv_pal_b_model&0xf, cur_ver.pal_version_s.pv_pal_b_rev,
591 min_ver.pal_version_s.pv_pal_b_model>>4,
592 min_ver.pal_version_s.pv_pal_b_model&0xf, min_ver.pal_version_s.pv_pal_b_rev);
593 return p - page;
594}
595
596static int
597perfmon_info(char *page)
598{
599 char *p = page;
600 u64 pm_buffer[16];
601 pal_perf_mon_info_u_t pm_info;
602
603 if (ia64_pal_perf_mon_info(pm_buffer, &pm_info) != 0) return 0;
604
605 p += sprintf(p,
606 "PMC/PMD pairs : %d\n"
607 "Counter width : %d bits\n"
608 "Cycle event number : %d\n"
609 "Retired event number : %d\n"
610 "Implemented PMC : ",
611 pm_info.pal_perf_mon_info_s.generic, pm_info.pal_perf_mon_info_s.width,
612 pm_info.pal_perf_mon_info_s.cycles, pm_info.pal_perf_mon_info_s.retired);
613
614 p = bitregister_process(p, pm_buffer, 256);
615 p += sprintf(p, "\nImplemented PMD : ");
616 p = bitregister_process(p, pm_buffer+4, 256);
617 p += sprintf(p, "\nCycles count capable : ");
618 p = bitregister_process(p, pm_buffer+8, 256);
619 p += sprintf(p, "\nRetired bundles count capable : ");
620
621#ifdef CONFIG_ITANIUM
622 /*
623 * PAL_PERF_MON_INFO reports that only PMC4 can be used to count CPU_CYCLES
624 * which is wrong, both PMC4 and PMD5 support it.
625 */
626 if (pm_buffer[12] == 0x10) pm_buffer[12]=0x30;
627#endif
628
629 p = bitregister_process(p, pm_buffer+12, 256);
630
631 p += sprintf(p, "\n");
632
633 return p - page;
634}
635
636static int
637frequency_info(char *page)
638{
639 char *p = page;
640 struct pal_freq_ratio proc, itc, bus;
641 u64 base;
642
643 if (ia64_pal_freq_base(&base) == -1)
644 p += sprintf(p, "Output clock : not implemented\n");
645 else
646 p += sprintf(p, "Output clock : %ld ticks/s\n", base);
647
648 if (ia64_pal_freq_ratios(&proc, &bus, &itc) != 0) return 0;
649
650 p += sprintf(p,
2ab9391d
TL
651 "Processor/Clock ratio : %d/%d\n"
652 "Bus/Clock ratio : %d/%d\n"
653 "ITC/Clock ratio : %d/%d\n",
1da177e4
LT
654 proc.num, proc.den, bus.num, bus.den, itc.num, itc.den);
655
656 return p - page;
657}
658
659static int
660tr_info(char *page)
661{
662 char *p = page;
663 s64 status;
664 pal_tr_valid_u_t tr_valid;
665 u64 tr_buffer[4];
666 pal_vm_info_1_u_t vm_info_1;
667 pal_vm_info_2_u_t vm_info_2;
668 u64 i, j;
669 u64 max[3], pgm;
670 struct ifa_reg {
671 u64 valid:1;
672 u64 ig:11;
673 u64 vpn:52;
674 } *ifa_reg;
675 struct itir_reg {
676 u64 rv1:2;
677 u64 ps:6;
678 u64 key:24;
679 u64 rv2:32;
680 } *itir_reg;
681 struct gr_reg {
682 u64 p:1;
683 u64 rv1:1;
684 u64 ma:3;
685 u64 a:1;
686 u64 d:1;
687 u64 pl:2;
688 u64 ar:3;
689 u64 ppn:38;
690 u64 rv2:2;
691 u64 ed:1;
692 u64 ig:11;
693 } *gr_reg;
694 struct rid_reg {
695 u64 ig1:1;
696 u64 rv1:1;
697 u64 ig2:6;
698 u64 rid:24;
699 u64 rv2:32;
700 } *rid_reg;
701
702 if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
703 printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
704 return 0;
705 }
706 max[0] = vm_info_1.pal_vm_info_1_s.max_itr_entry+1;
707 max[1] = vm_info_1.pal_vm_info_1_s.max_dtr_entry+1;
708
709 for (i=0; i < 2; i++ ) {
710 for (j=0; j < max[i]; j++) {
711
712 status = ia64_pal_tr_read(j, i, tr_buffer, &tr_valid);
713 if (status != 0) {
714 printk(KERN_ERR "palinfo: pal call failed on tr[%lu:%lu]=%ld\n",
715 i, j, status);
716 continue;
717 }
718
719 ifa_reg = (struct ifa_reg *)&tr_buffer[2];
720
721 if (ifa_reg->valid == 0) continue;
722
723 gr_reg = (struct gr_reg *)tr_buffer;
724 itir_reg = (struct itir_reg *)&tr_buffer[1];
725 rid_reg = (struct rid_reg *)&tr_buffer[3];
726
727 pgm = -1 << (itir_reg->ps - 12);
728 p += sprintf(p,
729 "%cTR%lu: av=%d pv=%d dv=%d mv=%d\n"
730 "\tppn : 0x%lx\n"
731 "\tvpn : 0x%lx\n"
732 "\tps : ",
733 "ID"[i], j,
734 tr_valid.pal_tr_valid_s.access_rights_valid,
735 tr_valid.pal_tr_valid_s.priv_level_valid,
736 tr_valid.pal_tr_valid_s.dirty_bit_valid,
737 tr_valid.pal_tr_valid_s.mem_attr_valid,
738 (gr_reg->ppn & pgm)<< 12, (ifa_reg->vpn & pgm)<< 12);
739
740 p = bitvector_process(p, 1<< itir_reg->ps);
741
742 p += sprintf(p,
743 "\n\tpl : %d\n"
744 "\tar : %d\n"
745 "\trid : %x\n"
746 "\tp : %d\n"
747 "\tma : %d\n"
748 "\td : %d\n",
749 gr_reg->pl, gr_reg->ar, rid_reg->rid, gr_reg->p, gr_reg->ma,
750 gr_reg->d);
751 }
752 }
753 return p - page;
754}
755
756
757
758/*
759 * List {name,function} pairs for every entry in /proc/palinfo/cpu*
760 */
761static palinfo_entry_t palinfo_entries[]={
762 { "version_info", version_info, },
763 { "vm_info", vm_info, },
764 { "cache_info", cache_info, },
765 { "power_info", power_info, },
766 { "register_info", register_info, },
767 { "processor_info", processor_info, },
768 { "perfmon_info", perfmon_info, },
769 { "frequency_info", frequency_info, },
770 { "bus_info", bus_info },
771 { "tr_info", tr_info, }
772};
773
774#define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries)
775
776/*
777 * this array is used to keep track of the proc entries we create. This is
778 * required in the module mode when we need to remove all entries. The procfs code
779 * does not do recursion of deletion
780 *
781 * Notes:
782 * - +1 accounts for the cpuN directory entry in /proc/pal
783 */
784#define NR_PALINFO_PROC_ENTRIES (NR_CPUS*(NR_PALINFO_ENTRIES+1))
785
786static struct proc_dir_entry *palinfo_proc_entries[NR_PALINFO_PROC_ENTRIES];
787static struct proc_dir_entry *palinfo_dir;
788
789/*
790 * This data structure is used to pass which cpu,function is being requested
791 * It must fit in a 64bit quantity to be passed to the proc callback routine
792 *
793 * In SMP mode, when we get a request for another CPU, we must call that
794 * other CPU using IPI and wait for the result before returning.
795 */
796typedef union {
797 u64 value;
798 struct {
799 unsigned req_cpu: 32; /* for which CPU this info is */
800 unsigned func_id: 32; /* which function is requested */
801 } pal_func_cpu;
802} pal_func_cpu_u_t;
803
804#define req_cpu pal_func_cpu.req_cpu
805#define func_id pal_func_cpu.func_id
806
807#ifdef CONFIG_SMP
808
809/*
810 * used to hold information about final function to call
811 */
812typedef struct {
813 palinfo_func_t func; /* pointer to function to call */
814 char *page; /* buffer to store results */
815 int ret; /* return value from call */
816} palinfo_smp_data_t;
817
818
819/*
820 * this function does the actual final call and he called
821 * from the smp code, i.e., this is the palinfo callback routine
822 */
823static void
824palinfo_smp_call(void *info)
825{
826 palinfo_smp_data_t *data = (palinfo_smp_data_t *)info;
827 if (data == NULL) {
828 printk(KERN_ERR "palinfo: data pointer is NULL\n");
829 data->ret = 0; /* no output */
830 return;
831 }
832 /* does this actual call */
833 data->ret = (*data->func)(data->page);
834}
835
836/*
837 * function called to trigger the IPI, we need to access a remote CPU
838 * Return:
839 * 0 : error or nothing to output
840 * otherwise how many bytes in the "page" buffer were written
841 */
842static
843int palinfo_handle_smp(pal_func_cpu_u_t *f, char *page)
844{
845 palinfo_smp_data_t ptr;
846 int ret;
847
848 ptr.func = palinfo_entries[f->func_id].proc_read;
849 ptr.page = page;
850 ptr.ret = 0; /* just in case */
851
852
853 /* will send IPI to other CPU and wait for completion of remote call */
854 if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 0, 1))) {
855 printk(KERN_ERR "palinfo: remote CPU call from %d to %d on function %d: "
856 "error %d\n", smp_processor_id(), f->req_cpu, f->func_id, ret);
857 return 0;
858 }
859 return ptr.ret;
860}
861#else /* ! CONFIG_SMP */
862static
863int palinfo_handle_smp(pal_func_cpu_u_t *f, char *page)
864{
865 printk(KERN_ERR "palinfo: should not be called with non SMP kernel\n");
866 return 0;
867}
868#endif /* CONFIG_SMP */
869
870/*
871 * Entry point routine: all calls go through this function
872 */
873static int
874palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, void *data)
875{
876 int len=0;
877 pal_func_cpu_u_t *f = (pal_func_cpu_u_t *)&data;
878
879 /*
880 * in SMP mode, we may need to call another CPU to get correct
881 * information. PAL, by definition, is processor specific
882 */
883 if (f->req_cpu == get_cpu())
884 len = (*palinfo_entries[f->func_id].proc_read)(page);
885 else
886 len = palinfo_handle_smp(f, page);
887
888 put_cpu();
889
890 if (len <= off+count) *eof = 1;
891
892 *start = page + off;
893 len -= off;
894
895 if (len>count) len = count;
896 if (len<0) len = 0;
897
898 return len;
899}
900
901static void
902create_palinfo_proc_entries(unsigned int cpu)
903{
904# define CPUSTR "cpu%d"
905
906 pal_func_cpu_u_t f;
907 struct proc_dir_entry **pdir;
908 struct proc_dir_entry *cpu_dir;
909 int j;
910 char cpustr[sizeof(CPUSTR)];
911
912
913 /*
914 * we keep track of created entries in a depth-first order for
915 * cleanup purposes. Each entry is stored into palinfo_proc_entries
916 */
917 sprintf(cpustr,CPUSTR, cpu);
918
919 cpu_dir = proc_mkdir(cpustr, palinfo_dir);
920
921 f.req_cpu = cpu;
922
923 /*
924 * Compute the location to store per cpu entries
925 * We dont store the top level entry in this list, but
926 * remove it finally after removing all cpu entries.
927 */
928 pdir = &palinfo_proc_entries[cpu*(NR_PALINFO_ENTRIES+1)];
929 *pdir++ = cpu_dir;
930 for (j=0; j < NR_PALINFO_ENTRIES; j++) {
931 f.func_id = j;
932 *pdir = create_proc_read_entry(
933 palinfo_entries[j].name, 0, cpu_dir,
934 palinfo_read_entry, (void *)f.value);
935 if (*pdir)
936 (*pdir)->owner = THIS_MODULE;
937 pdir++;
938 }
939}
940
941static void
942remove_palinfo_proc_entries(unsigned int hcpu)
943{
944 int j;
945 struct proc_dir_entry *cpu_dir, **pdir;
946
947 pdir = &palinfo_proc_entries[hcpu*(NR_PALINFO_ENTRIES+1)];
948 cpu_dir = *pdir;
949 *pdir++=NULL;
950 for (j=0; j < (NR_PALINFO_ENTRIES); j++) {
951 if ((*pdir)) {
952 remove_proc_entry ((*pdir)->name, cpu_dir);
953 *pdir ++= NULL;
954 }
955 }
956
957 if (cpu_dir) {
958 remove_proc_entry(cpu_dir->name, palinfo_dir);
959 }
960}
961
74b85f37 962static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
1da177e4
LT
963 unsigned long action,
964 void *hcpu)
965{
966 unsigned int hotcpu = (unsigned long)hcpu;
967
968 switch (action) {
969 case CPU_ONLINE:
970 create_palinfo_proc_entries(hotcpu);
971 break;
972#ifdef CONFIG_HOTPLUG_CPU
973 case CPU_DEAD:
974 remove_palinfo_proc_entries(hotcpu);
975 break;
976#endif
977 }
978 return NOTIFY_OK;
979}
980
74b85f37 981static struct notifier_block __cpuinitdata palinfo_cpu_notifier =
1da177e4
LT
982{
983 .notifier_call = palinfo_cpu_callback,
984 .priority = 0,
985};
986
987static int __init
988palinfo_init(void)
989{
990 int i = 0;
991
992 printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION);
993 palinfo_dir = proc_mkdir("pal", NULL);
994
995 /* Create palinfo dirs in /proc for all online cpus */
996 for_each_online_cpu(i) {
997 create_palinfo_proc_entries(i);
998 }
999
1000 /* Register for future delivery via notify registration */
1001 register_cpu_notifier(&palinfo_cpu_notifier);
1002
1003 return 0;
1004}
1005
1006static void __exit
1007palinfo_exit(void)
1008{
1009 int i = 0;
1010
1011 /* remove all nodes: depth first pass. Could optimize this */
1012 for_each_online_cpu(i) {
1013 remove_palinfo_proc_entries(i);
1014 }
1015
1016 /*
1017 * Remove the top level entry finally
1018 */
1019 remove_proc_entry(palinfo_dir->name, NULL);
1020
1021 /*
1022 * Unregister from cpu notifier callbacks
1023 */
1024 unregister_cpu_notifier(&palinfo_cpu_notifier);
1025}
1026
1027module_init(palinfo_init);
1028module_exit(palinfo_exit);