1 /*
2  * palinfo.c
3  *
4  * Prints processor specific information reported by PAL.
5  * This code is based on specification of PAL as of the
6  * Intel IA-64 Architecture Software Developer's Manual v1.0.
7  *
8  *
9  * Copyright (C) 2000-2001, 2003 Hewlett-Packard Co
10  *	Stephane Eranian <eranian@hpl.hp.com>
11  *
12  * 05/26/2000	S.Eranian	initial release
13  * 08/21/2000	S.Eranian	updated to July 2000 PAL specs
14  * 02/05/2001   S.Eranian	fixed module support
15  * 10/23/2001	S.Eranian	updated pal_perf_mon_info bug fixes
16  */
17 #include <linux/config.h>
18 #include <linux/types.h>
19 #include <linux/errno.h>
20 #include <linux/init.h>
21 #include <linux/proc_fs.h>
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/efi.h>
25 
26 #include <asm/pal.h>
27 #include <asm/sal.h>
28 #include <asm/page.h>
29 #include <asm/processor.h>
30 #include <linux/smp.h>
31 
32 MODULE_AUTHOR("Stephane Eranian <eranian@hpl.hp.com>");
33 MODULE_DESCRIPTION("/proc interface to IA-64 PAL");
34 MODULE_LICENSE("GPL");
35 
36 #define PALINFO_VERSION "0.5"
37 
38 typedef int (*palinfo_func_t)(char*);
39 
40 typedef struct {
41 	const char		*name;		/* name of the proc entry */
42 	palinfo_func_t		proc_read;	/* function to call for reading */
43 	struct proc_dir_entry	*entry;		/* registered entry (removal) */
44 } palinfo_entry_t;
45 
46 
47 /*
48  *  A bunch of string array to get pretty printing
49  */
50 
51 static char *cache_types[] = {
52 	"",			/* not used */
53 	"Instruction",
54 	"Data",
55 	"Data/Instruction"	/* unified */
56 };
57 
58 static const char *cache_mattrib[]={
59 	"WriteThrough",
60 	"WriteBack",
61 	"",		/* reserved */
62 	""		/* reserved */
63 };
64 
65 static const char *cache_st_hints[]={
66 	"Temporal, level 1",
67 	"Reserved",
68 	"Reserved",
69 	"Non-temporal, all levels",
70 	"Reserved",
71 	"Reserved",
72 	"Reserved",
73 	"Reserved"
74 };
75 
76 static const char *cache_ld_hints[]={
77 	"Temporal, level 1",
78 	"Non-temporal, level 1",
79 	"Reserved",
80 	"Non-temporal, all levels",
81 	"Reserved",
82 	"Reserved",
83 	"Reserved",
84 	"Reserved"
85 };
86 
87 static const char *rse_hints[]={
88 	"enforced lazy",
89 	"eager stores",
90 	"eager loads",
91 	"eager loads and stores"
92 };
93 
94 #define RSE_HINTS_COUNT ARRAY_SIZE(rse_hints)
95 
96 static const char *mem_attrib[]={
97 	"WB",		/* 000 */
98 	"SW",		/* 001 */
99 	"010",		/* 010 */
100 	"011",		/* 011 */
101 	"UC",		/* 100 */
102 	"UCE",		/* 101 */
103 	"WC",		/* 110 */
104 	"NaTPage"	/* 111 */
105 };
106 
107 /*
108  * Take a 64bit vector and produces a string such that
109  * if bit n is set then 2^n in clear text is generated. The adjustment
110  * to the right unit is also done.
111  *
112  * Input:
113  *	- a pointer to a buffer to hold the string
114  *	- a 64-bit vector
115  * Ouput:
116  *	- a pointer to the end of the buffer
117  *
118  */
119 static char *
bitvector_process(char * p,u64 vector)120 bitvector_process(char *p, u64 vector)
121 {
122 	int i,j;
123 	const char *units[]={ "", "K", "M", "G", "T" };
124 
125 	for (i=0, j=0; i < 64; i++ , j=i/10) {
126 		if (vector & 0x1) {
127 			p += sprintf(p, "%d%s ", 1 << (i-j*10), units[j]);
128 		}
129 		vector >>= 1;
130 	}
131 	return p;
132 }
133 
134 /*
135  * Take a 64bit vector and produces a string such that
136  * if bit n is set then register n is present. The function
137  * takes into account consecutive registers and prints out ranges.
138  *
139  * Input:
140  *	- a pointer to a buffer to hold the string
141  *	- a 64-bit vector
142  * Ouput:
143  *	- a pointer to the end of the buffer
144  *
145  */
146 static char *
bitregister_process(char * p,u64 * reg_info,int max)147 bitregister_process(char *p, u64 *reg_info, int max)
148 {
149 	int i, begin, skip = 0;
150 	u64 value = reg_info[0];
151 
152 	value >>= i = begin = ffs(value) - 1;
153 
154 	for(; i < max; i++ ) {
155 
156 		if (i != 0 && (i%64) == 0) value = *++reg_info;
157 
158 		if ((value & 0x1) == 0 && skip == 0) {
159 			if (begin  <= i - 2)
160 				p += sprintf(p, "%d-%d ", begin, i-1);
161 			else
162 				p += sprintf(p, "%d ", i-1);
163 			skip  = 1;
164 			begin = -1;
165 		} else if ((value & 0x1) && skip == 1) {
166 			skip = 0;
167 			begin = i;
168 		}
169 		value >>=1;
170 	}
171 	if (begin > -1) {
172 		if (begin < 127)
173 			p += sprintf(p, "%d-127", begin);
174 		else
175 			p += sprintf(p, "127");
176 	}
177 
178 	return p;
179 }
180 
181 static int
power_info(char * page)182 power_info(char *page)
183 {
184 	s64 status;
185 	char *p = page;
186 	u64 halt_info_buffer[8];
187 	pal_power_mgmt_info_u_t *halt_info =(pal_power_mgmt_info_u_t *)halt_info_buffer;
188 	int i;
189 
190 	status = ia64_pal_halt_info(halt_info);
191 	if (status != 0) return 0;
192 
193 	for (i=0; i < 8 ; i++ ) {
194 		if (halt_info[i].pal_power_mgmt_info_s.im == 1) {
195 			p += sprintf(p,	"Power level %d:\n"
196 				     "\tentry_latency       : %d cycles\n"
197 				     "\texit_latency        : %d cycles\n"
198 				     "\tpower consumption   : %d mW\n"
199 				     "\tCache+TLB coherency : %s\n", i,
200 				     halt_info[i].pal_power_mgmt_info_s.entry_latency,
201 				     halt_info[i].pal_power_mgmt_info_s.exit_latency,
202 				     halt_info[i].pal_power_mgmt_info_s.power_consumption,
203 				     halt_info[i].pal_power_mgmt_info_s.co ? "Yes" : "No");
204 		} else {
205 			p += sprintf(p,"Power level %d: not implemented\n",i);
206 		}
207 	}
208 	return p - page;
209 }
210 
211 static int
cache_info(char * page)212 cache_info(char *page)
213 {
214 	char *p = page;
215 	u64 i, levels, unique_caches;
216 	pal_cache_config_info_t cci;
217 	int j, k;
218 	s64 status;
219 
220 	if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
221 		printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status);
222 		return 0;
223 	}
224 
225 	p += sprintf(p, "Cache levels  : %ld\nUnique caches : %ld\n\n", levels, unique_caches);
226 
227 	for (i=0; i < levels; i++) {
228 
229 		for (j=2; j >0 ; j--) {
230 
231 			/* even without unification some level may not be present */
232 			if ((status=ia64_pal_cache_config_info(i,j, &cci)) != 0) {
233 				continue;
234 			}
235 			p += sprintf(p,
236 				     "%s Cache level %lu:\n"
237 				     "\tSize           : %lu bytes\n"
238 				     "\tAttributes     : ",
239 				     cache_types[j+cci.pcci_unified], i+1,
240 				     cci.pcci_cache_size);
241 
242 			if (cci.pcci_unified) p += sprintf(p, "Unified ");
243 
244 			p += sprintf(p, "%s\n", cache_mattrib[cci.pcci_cache_attr]);
245 
246 			p += sprintf(p,
247 				     "\tAssociativity  : %d\n"
248 				     "\tLine size      : %d bytes\n"
249 				     "\tStride         : %d bytes\n",
250 				     cci.pcci_assoc, 1<<cci.pcci_line_size, 1<<cci.pcci_stride);
251 			if (j == 1)
252 				p += sprintf(p, "\tStore latency  : N/A\n");
253 			else
254 				p += sprintf(p, "\tStore latency  : %d cycle(s)\n",
255 						cci.pcci_st_latency);
256 
257 			p += sprintf(p,
258 				     "\tLoad latency   : %d cycle(s)\n"
259 				     "\tStore hints    : ", cci.pcci_ld_latency);
260 
261 			for(k=0; k < 8; k++ ) {
262 				if ( cci.pcci_st_hints & 0x1)
263 					p += sprintf(p, "[%s]", cache_st_hints[k]);
264 				cci.pcci_st_hints >>=1;
265 			}
266 			p += sprintf(p, "\n\tLoad hints     : ");
267 
268 			for(k=0; k < 8; k++ ) {
269 				if (cci.pcci_ld_hints & 0x1)
270 					p += sprintf(p, "[%s]", cache_ld_hints[k]);
271 				cci.pcci_ld_hints >>=1;
272 			}
273 			p += sprintf(p,
274 				     "\n\tAlias boundary : %d byte(s)\n"
275 				     "\tTag LSB        : %d\n"
276 				     "\tTag MSB        : %d\n",
277 				     1<<cci.pcci_alias_boundary, cci.pcci_tag_lsb,
278 				     cci.pcci_tag_msb);
279 
280 			/* when unified, data(j=2) is enough */
281 			if (cci.pcci_unified) break;
282 		}
283 	}
284 	return p - page;
285 }
286 
287 
288 static int
vm_info(char * page)289 vm_info(char *page)
290 {
291 	char *p = page;
292 	u64 tr_pages =0, vw_pages=0, tc_pages;
293 	u64 attrib;
294 	pal_vm_info_1_u_t vm_info_1;
295 	pal_vm_info_2_u_t vm_info_2;
296 	pal_tc_info_u_t	tc_info;
297 	ia64_ptce_info_t ptce;
298 	const char *sep;
299 	int i, j;
300 	s64 status;
301 
302 	if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
303 		printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
304 		return 0;
305 	}
306 
307 
308 	p += sprintf(p,
309 		     "Physical Address Space         : %d bits\n"
310 		     "Virtual Address Space          : %d bits\n"
311 		     "Protection Key Registers(PKR)  : %d\n"
312 		     "Implemented bits in PKR.key    : %d\n"
313 		     "Hash Tag ID                    : 0x%x\n"
314 		     "Size of RR.rid                 : %d\n",
315 		     vm_info_1.pal_vm_info_1_s.phys_add_size,
316 		     vm_info_2.pal_vm_info_2_s.impl_va_msb+1, vm_info_1.pal_vm_info_1_s.max_pkr+1,
317 		     vm_info_1.pal_vm_info_1_s.key_size, vm_info_1.pal_vm_info_1_s.hash_tag_id,
318 		     vm_info_2.pal_vm_info_2_s.rid_size);
319 
320 	if (ia64_pal_mem_attrib(&attrib) != 0)
321 		return 0;
322 
323 	p += sprintf(p, "Supported memory attributes    : ");
324 	sep = "";
325 	for (i = 0; i < 8; i++) {
326 		if (attrib & (1 << i)) {
327 			p += sprintf(p, "%s%s", sep, mem_attrib[i]);
328 			sep = ", ";
329 		}
330 	}
331 	p += sprintf(p, "\n");
332 
333 	if ((status = ia64_pal_vm_page_size(&tr_pages, &vw_pages)) !=0) {
334 		printk(KERN_ERR "ia64_pal_vm_page_size=%ld\n", status);
335 		return 0;
336 	}
337 
338 	p += sprintf(p,
339 		     "\nTLB walker                     : %simplemented\n"
340 		     "Number of DTR                  : %d\n"
341 		     "Number of ITR                  : %d\n"
342 		     "TLB insertable page sizes      : ",
343 		     vm_info_1.pal_vm_info_1_s.vw ? "" : "not ",
344 		     vm_info_1.pal_vm_info_1_s.max_dtr_entry+1,
345 		     vm_info_1.pal_vm_info_1_s.max_itr_entry+1);
346 
347 
348 	p = bitvector_process(p, tr_pages);
349 
350 	p += sprintf(p, "\nTLB purgeable page sizes       : ");
351 
352 	p = bitvector_process(p, vw_pages);
353 
354 	if ((status=ia64_get_ptce(&ptce)) != 0) {
355 		printk(KERN_ERR "ia64_get_ptce=%ld\n", status);
356 		return 0;
357 	}
358 
359 	p += sprintf(p,
360 		     "\nPurge base address             : 0x%016lx\n"
361 		     "Purge outer loop count         : %d\n"
362 		     "Purge inner loop count         : %d\n"
363 		     "Purge outer loop stride        : %d\n"
364 		     "Purge inner loop stride        : %d\n",
365 		     ptce.base, ptce.count[0], ptce.count[1], ptce.stride[0], ptce.stride[1]);
366 
367 	p += sprintf(p,
368 		     "TC Levels                      : %d\n"
369 		     "Unique TC(s)                   : %d\n",
370 		     vm_info_1.pal_vm_info_1_s.num_tc_levels,
371 		     vm_info_1.pal_vm_info_1_s.max_unique_tcs);
372 
373 	for(i=0; i < vm_info_1.pal_vm_info_1_s.num_tc_levels; i++) {
374 		for (j=2; j>0 ; j--) {
375 			tc_pages = 0; /* just in case */
376 
377 
378 			/* even without unification, some levels may not be present */
379 			if ((status=ia64_pal_vm_info(i,j, &tc_info, &tc_pages)) != 0) {
380 				continue;
381 			}
382 
383 			p += sprintf(p,
384 				     "\n%s Translation Cache Level %d:\n"
385 				     "\tHash sets           : %d\n"
386 				     "\tAssociativity       : %d\n"
387 				     "\tNumber of entries   : %d\n"
388 				     "\tFlags               : ",
389 				     cache_types[j+tc_info.tc_unified], i+1, tc_info.tc_num_sets,
390 				     tc_info.tc_associativity, tc_info.tc_num_entries);
391 
392 			if (tc_info.tc_pf) p += sprintf(p, "PreferredPageSizeOptimized ");
393 			if (tc_info.tc_unified) p += sprintf(p, "Unified ");
394 			if (tc_info.tc_reduce_tr) p += sprintf(p, "TCReduction");
395 
396 			p += sprintf(p, "\n\tSupported page sizes: ");
397 
398 			p = bitvector_process(p, tc_pages);
399 
400 			/* when unified date (j=2) is enough */
401 			if (tc_info.tc_unified) break;
402 		}
403 	}
404 	p += sprintf(p, "\n");
405 
406 	return p - page;
407 }
408 
409 
410 static int
register_info(char * page)411 register_info(char *page)
412 {
413 	char *p = page;
414 	u64 reg_info[2];
415 	u64 info;
416 	u64 phys_stacked;
417 	pal_hints_u_t hints;
418 	u64 iregs, dregs;
419 	char *info_type[]={
420 		"Implemented AR(s)",
421 		"AR(s) with read side-effects",
422 		"Implemented CR(s)",
423 		"CR(s) with read side-effects",
424 	};
425 
426 	for(info=0; info < 4; info++) {
427 
428 		if (ia64_pal_register_info(info, &reg_info[0], &reg_info[1]) != 0) return 0;
429 
430 		p += sprintf(p, "%-32s : ", info_type[info]);
431 
432 		p = bitregister_process(p, reg_info, 128);
433 
434 		p += sprintf(p, "\n");
435 	}
436 
437 	if (ia64_pal_rse_info(&phys_stacked, &hints) != 0) return 0;
438 
439 	p += sprintf(p,
440 		     "RSE stacked physical registers   : %ld\n"
441 		     "RSE load/store hints             : %ld (%s)\n",
442 		     phys_stacked, hints.ph_data,
443 		     hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(\?\?)");
444 
445 	if (ia64_pal_debug_info(&iregs, &dregs))
446 		return 0;
447 
448 	p += sprintf(p,
449 		     "Instruction debug register pairs : %ld\n"
450 		     "Data debug register pairs        : %ld\n", iregs, dregs);
451 
452 	return p - page;
453 }
454 
455 static const char *proc_features[]={
456 	NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
457 	NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,
458 	NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
459 	NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,
460 	NULL,NULL,NULL,NULL,NULL,
461 	"XIP,XPSR,XFS implemented",
462 	"XR1-XR3 implemented",
463 	"Disable dynamic predicate prediction",
464 	"Disable processor physical number",
465 	"Disable dynamic data cache prefetch",
466 	"Disable dynamic inst cache prefetch",
467 	"Disable dynamic branch prediction",
468 	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
469 	"Disable BINIT on processor time-out",
470 	"Disable dynamic power management (DPM)",
471 	"Disable coherency",
472 	"Disable cache",
473 	"Enable CMCI promotion",
474 	"Enable MCA to BINIT promotion",
475 	"Enable MCA promotion",
476 	"Enable BERR promotion"
477 };
478 
479 
480 static int
processor_info(char * page)481 processor_info(char *page)
482 {
483 	char *p = page;
484 	const char **v = proc_features;
485 	u64 avail=1, status=1, control=1;
486 	int i;
487 	s64 ret;
488 
489 	if ((ret=ia64_pal_proc_get_features(&avail, &status, &control)) != 0) return 0;
490 
491 	for(i=0; i < 64; i++, v++,avail >>=1, status >>=1, control >>=1) {
492 		if ( ! *v ) continue;
493 		p += sprintf(p, "%-40s : %s%s %s\n", *v,
494 				avail & 0x1 ? "" : "NotImpl",
495 				avail & 0x1 ? (status & 0x1 ? "On" : "Off"): "",
496 				avail & 0x1 ? (control & 0x1 ? "Ctrl" : "NoCtrl"): "");
497 	}
498 	return p - page;
499 }
500 
501 static const char *bus_features[]={
502 	NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
503 	NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,
504 	NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
505 	NULL,NULL,
506 	"Request  Bus Parking",
507 	"Bus Lock Mask",
508 	"Enable Half Transfer",
509 	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
510 	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
511 	NULL, NULL, NULL, NULL,
512 	"Enable Cache Line Repl. Shared",
513 	"Enable Cache Line Repl. Exclusive",
514 	"Disable Transaction Queuing",
515 	"Disable Response Error Checking",
516 	"Disable Bus Error Checking",
517 	"Disable Bus Requester Internal Error Signalling",
518 	"Disable Bus Requester Error Signalling",
519 	"Disable Bus Initialization Event Checking",
520 	"Disable Bus Initialization Event Signalling",
521 	"Disable Bus Address Error Checking",
522 	"Disable Bus Address Error Signalling",
523 	"Disable Bus Data Error Checking"
524 };
525 
526 
527 static int
bus_info(char * page)528 bus_info(char *page)
529 {
530 	char *p = page;
531 	const char **v = bus_features;
532 	pal_bus_features_u_t av, st, ct;
533 	u64 avail, status, control;
534 	int i;
535 	s64 ret;
536 
537 	if ((ret=ia64_pal_bus_get_features(&av, &st, &ct)) != 0) return 0;
538 
539 	avail   = av.pal_bus_features_val;
540 	status  = st.pal_bus_features_val;
541 	control = ct.pal_bus_features_val;
542 
543 	for(i=0; i < 64; i++, v++, avail >>=1, status >>=1, control >>=1) {
544 		if ( ! *v ) continue;
545 		p += sprintf(p, "%-48s : %s%s %s\n", *v,
546 				avail & 0x1 ? "" : "NotImpl",
547 				avail & 0x1 ? (status  & 0x1 ? "On" : "Off"): "",
548 				avail & 0x1 ? (control & 0x1 ? "Ctrl" : "NoCtrl"): "");
549 	}
550 	return p - page;
551 }
552 
553 static int
version_info(char * page)554 version_info(char *page)
555 {
556 	pal_version_u_t min_ver, cur_ver;
557 	char *p = page;
558 
559 	/* The PAL_VERSION call is advertised as being able to support
560 	 * both physical and virtual mode calls. This seems to be a documentation
561 	 * bug rather than firmware bug. In fact, it does only support physical mode.
562 	 * So now the code reflects this fact and the pal_version() has been updated
563 	 * accordingly.
564 	 */
565 	if (ia64_pal_version(&min_ver, &cur_ver) != 0) return 0;
566 
567 	p += sprintf(p,
568 		     "PAL_vendor : 0x%02x (min=0x%02x)\n"
569 		     "PAL_A      : %x.%x.%x (min=%x.%x.%x)\n"
570 		     "PAL_B      : %x.%x.%x (min=%x.%x.%x)\n",
571 		     cur_ver.pal_version_s.pv_pal_vendor, min_ver.pal_version_s.pv_pal_vendor,
572 
573 		     cur_ver.pal_version_s.pv_pal_a_model>>4,
574 		     cur_ver.pal_version_s.pv_pal_a_model&0xf, cur_ver.pal_version_s.pv_pal_a_rev,
575 		     min_ver.pal_version_s.pv_pal_a_model>>4,
576 		     min_ver.pal_version_s.pv_pal_a_model&0xf, min_ver.pal_version_s.pv_pal_a_rev,
577 
578 		     cur_ver.pal_version_s.pv_pal_b_model>>4,
579 		     cur_ver.pal_version_s.pv_pal_b_model&0xf, cur_ver.pal_version_s.pv_pal_b_rev,
580 		     min_ver.pal_version_s.pv_pal_b_model>>4,
581 		     min_ver.pal_version_s.pv_pal_b_model&0xf, min_ver.pal_version_s.pv_pal_b_rev);
582 	return p - page;
583 }
584 
585 static int
perfmon_info(char * page)586 perfmon_info(char *page)
587 {
588 	char *p = page;
589 	u64 pm_buffer[16];
590 	pal_perf_mon_info_u_t pm_info;
591 
592 	if (ia64_pal_perf_mon_info(pm_buffer, &pm_info) != 0) return 0;
593 
594 	p += sprintf(p,
595 		     "PMC/PMD pairs                 : %d\n"
596 		     "Counter width                 : %d bits\n"
597 		     "Cycle event number            : %d\n"
598 		     "Retired event number          : %d\n"
599 		     "Implemented PMC               : ",
600 		     pm_info.pal_perf_mon_info_s.generic, pm_info.pal_perf_mon_info_s.width,
601 		     pm_info.pal_perf_mon_info_s.cycles, pm_info.pal_perf_mon_info_s.retired);
602 
603 	p = bitregister_process(p, pm_buffer, 256);
604 	p += sprintf(p, "\nImplemented PMD               : ");
605 	p = bitregister_process(p, pm_buffer+4, 256);
606 	p += sprintf(p, "\nCycles count capable          : ");
607 	p = bitregister_process(p, pm_buffer+8, 256);
608 	p += sprintf(p, "\nRetired bundles count capable : ");
609 
610 #ifdef CONFIG_ITANIUM
611 	/*
612 	 * PAL_PERF_MON_INFO reports that only PMC4 can be used to count CPU_CYCLES
613 	 * which is wrong, both PMC4 and PMD5 support it.
614 	 */
615 	if (pm_buffer[12] == 0x10) pm_buffer[12]=0x30;
616 #endif
617 
618 	p = bitregister_process(p, pm_buffer+12, 256);
619 
620 	p += sprintf(p, "\n");
621 
622 	return p - page;
623 }
624 
625 static int
frequency_info(char * page)626 frequency_info(char *page)
627 {
628 	char *p = page;
629 	struct pal_freq_ratio proc, itc, bus;
630 	u64 base;
631 
632 	if (ia64_pal_freq_base(&base) == -1)
633 		p += sprintf(p, "Output clock            : not implemented\n");
634 	else
635 		p += sprintf(p, "Output clock            : %ld ticks/s\n", base);
636 
637 	if (ia64_pal_freq_ratios(&proc, &bus, &itc) != 0) return 0;
638 
639 	p += sprintf(p,
640 		     "Processor/Clock ratio   : %ld/%ld\n"
641 		     "Bus/Clock ratio         : %ld/%ld\n"
642 		     "ITC/Clock ratio         : %ld/%ld\n",
643 		     proc.num, proc.den, bus.num, bus.den, itc.num, itc.den);
644 
645 	return p - page;
646 }
647 
648 static int
tr_info(char * page)649 tr_info(char *page)
650 {
651 	char *p = page;
652 	s64 status;
653 	pal_tr_valid_u_t tr_valid;
654 	u64 tr_buffer[4];
655 	pal_vm_info_1_u_t vm_info_1;
656 	pal_vm_info_2_u_t vm_info_2;
657 	u64 i, j;
658 	u64 max[3], pgm;
659 	struct ifa_reg {
660 		u64 valid:1;
661 		u64 ig:11;
662 		u64 vpn:52;
663 	} *ifa_reg;
664 	struct itir_reg {
665 		u64 rv1:2;
666 		u64 ps:6;
667 		u64 key:24;
668 		u64 rv2:32;
669 	} *itir_reg;
670 	struct gr_reg {
671 		u64 p:1;
672 		u64 rv1:1;
673 		u64 ma:3;
674 		u64 a:1;
675 		u64 d:1;
676 		u64 pl:2;
677 		u64 ar:3;
678 		u64 ppn:38;
679 		u64 rv2:2;
680 		u64 ed:1;
681 		u64 ig:11;
682 	} *gr_reg;
683 	struct rid_reg {
684 		u64 ig1:1;
685 		u64 rv1:1;
686 		u64 ig2:6;
687 		u64 rid:24;
688 		u64 rv2:32;
689 	} *rid_reg;
690 
691 	if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
692 		printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
693 		return 0;
694 	}
695 	max[0] = vm_info_1.pal_vm_info_1_s.max_itr_entry+1;
696 	max[1] = vm_info_1.pal_vm_info_1_s.max_dtr_entry+1;
697 
698 	for (i=0; i < 2; i++ ) {
699 		for (j=0; j < max[i]; j++) {
700 
701 		status = ia64_pal_tr_read(j, i, tr_buffer, &tr_valid);
702 		if (status != 0) {
703 			printk(KERN_ERR "palinfo: pal call failed on tr[%lu:%lu]=%ld\n",
704 			       i, j, status);
705 			continue;
706 		}
707 
708 		ifa_reg  = (struct ifa_reg *)&tr_buffer[2];
709 
710 		if (ifa_reg->valid == 0) continue;
711 
712 		gr_reg   = (struct gr_reg *)tr_buffer;
713 		itir_reg = (struct itir_reg *)&tr_buffer[1];
714 		rid_reg  = (struct rid_reg *)&tr_buffer[3];
715 
716 		pgm	 = -1 << (itir_reg->ps - 12);
717 		p += sprintf(p,
718 			     "%cTR%lu: av=%d pv=%d dv=%d mv=%d\n"
719 			     "\tppn  : 0x%lx\n"
720 			     "\tvpn  : 0x%lx\n"
721 			     "\tps   : ",
722 			     "ID"[i], j,
723 			     tr_valid.pal_tr_valid_s.access_rights_valid,
724 			     tr_valid.pal_tr_valid_s.priv_level_valid,
725 			     tr_valid.pal_tr_valid_s.dirty_bit_valid,
726 			     tr_valid.pal_tr_valid_s.mem_attr_valid,
727 			     (gr_reg->ppn & pgm)<< 12, (ifa_reg->vpn & pgm)<< 12);
728 
729 		p = bitvector_process(p, 1<< itir_reg->ps);
730 
731 		p += sprintf(p,
732 			     "\n\tpl   : %d\n"
733 			     "\tar   : %d\n"
734 			     "\trid  : %x\n"
735 			     "\tp    : %d\n"
736 			     "\tma   : %d\n"
737 			     "\td    : %d\n",
738 			     gr_reg->pl, gr_reg->ar, rid_reg->rid, gr_reg->p, gr_reg->ma,
739 			     gr_reg->d);
740 		}
741 	}
742 	return p - page;
743 }
744 
745 
746 
747 /*
748  * List {name,function} pairs for every entry in /proc/palinfo/cpu*
749  */
750 static palinfo_entry_t palinfo_entries[]={
751 	{ "version_info",	version_info, },
752 	{ "vm_info",		vm_info, },
753 	{ "cache_info",		cache_info, },
754 	{ "power_info",		power_info, },
755 	{ "register_info",	register_info, },
756 	{ "processor_info",	processor_info, },
757 	{ "perfmon_info",	perfmon_info, },
758 	{ "frequency_info",	frequency_info, },
759 	{ "bus_info",		bus_info },
760 	{ "tr_info",		tr_info, }
761 };
762 
763 #define NR_PALINFO_ENTRIES	(int) ARRAY_SIZE(palinfo_entries)
764 
765 /*
766  * this array is used to keep track of the proc entries we create. This is
767  * required in the module mode when we need to remove all entries. The procfs code
768  * does not do recursion of deletion
769  *
770  * Notes:
771  *	- first +1 accounts for the cpuN entry
772  *	- second +1 account for toplevel palinfo
773  *
774  */
775 #define NR_PALINFO_PROC_ENTRIES	(NR_CPUS*(NR_PALINFO_ENTRIES+1)+1)
776 
777 static struct proc_dir_entry *palinfo_proc_entries[NR_PALINFO_PROC_ENTRIES];
778 
779 /*
780  * This data structure is used to pass which cpu,function is being requested
781  * It must fit in a 64bit quantity to be passed to the proc callback routine
782  *
783  * In SMP mode, when we get a request for another CPU, we must call that
784  * other CPU using IPI and wait for the result before returning.
785  */
786 typedef union {
787 	u64 value;
788 	struct {
789 		unsigned	req_cpu: 32;	/* for which CPU this info is */
790 		unsigned	func_id: 32;	/* which function is requested */
791 	} pal_func_cpu;
792 } pal_func_cpu_u_t;
793 
794 #define req_cpu	pal_func_cpu.req_cpu
795 #define func_id pal_func_cpu.func_id
796 
797 #ifdef CONFIG_SMP
798 
799 /*
800  * used to hold information about final function to call
801  */
802 typedef struct {
803 	palinfo_func_t	func;	/* pointer to function to call */
804 	char		*page;	/* buffer to store results */
805 	int		ret;	/* return value from call */
806 } palinfo_smp_data_t;
807 
808 
809 /*
810  * this function does the actual final call and he called
811  * from the smp code, i.e., this is the palinfo callback routine
812  */
813 static void
palinfo_smp_call(void * info)814 palinfo_smp_call(void *info)
815 {
816 	palinfo_smp_data_t *data = (palinfo_smp_data_t *)info;
817 	if (data == NULL) {
818 		printk(KERN_ERR "palinfo: data pointer is NULL\n");
819 		data->ret = 0; /* no output */
820 		return;
821 	}
822 	/* does this actual call */
823 	data->ret = (*data->func)(data->page);
824 }
825 
826 /*
827  * function called to trigger the IPI, we need to access a remote CPU
828  * Return:
829  *	0 : error or nothing to output
830  *	otherwise how many bytes in the "page" buffer were written
831  */
832 static
palinfo_handle_smp(pal_func_cpu_u_t * f,char * page)833 int palinfo_handle_smp(pal_func_cpu_u_t *f, char *page)
834 {
835 	palinfo_smp_data_t ptr;
836 	int ret;
837 
838 	ptr.func = palinfo_entries[f->func_id].proc_read;
839 	ptr.page = page;
840 	ptr.ret  = 0; /* just in case */
841 
842 
843 	/* will send IPI to other CPU and wait for completion of remote call */
844 	if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 0, 1))) {
845 		printk(KERN_ERR "palinfo: remote CPU call from %d to %d on function %d: "
846 		       "error %d\n", smp_processor_id(), f->req_cpu, f->func_id, ret);
847 		return 0;
848 	}
849 	return ptr.ret;
850 }
851 #else /* ! CONFIG_SMP */
852 static
palinfo_handle_smp(pal_func_cpu_u_t * f,char * page)853 int palinfo_handle_smp(pal_func_cpu_u_t *f, char *page)
854 {
855 	printk(KERN_ERR "palinfo: should not be called with non SMP kernel\n");
856 	return 0;
857 }
858 #endif /* CONFIG_SMP */
859 
860 /*
861  * Entry point routine: all calls go through this function
862  */
863 static int
palinfo_read_entry(char * page,char ** start,off_t off,int count,int * eof,void * data)864 palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, void *data)
865 {
866 	int len=0;
867 	pal_func_cpu_u_t *f = (pal_func_cpu_u_t *)&data;
868 
869 	MOD_INC_USE_COUNT;
870 	/*
871 	 * in SMP mode, we may need to call another CPU to get correct
872 	 * information. PAL, by definition, is processor specific
873 	 */
874 	if (f->req_cpu == smp_processor_id())
875 		len = (*palinfo_entries[f->func_id].proc_read)(page);
876 	else
877 		len = palinfo_handle_smp(f, page);
878 
879 	if (len <= off+count) *eof = 1;
880 
881 	*start = page + off;
882 	len   -= off;
883 
884 	if (len>count) len = count;
885 	if (len<0) len = 0;
886 
887 	MOD_DEC_USE_COUNT;
888 
889 	return len;
890 }
891 
892 static int __init
palinfo_init(void)893 palinfo_init(void)
894 {
895 #	define CPUSTR	"cpu%d"
896 
897 	pal_func_cpu_u_t f;
898 	struct proc_dir_entry **pdir = palinfo_proc_entries;
899 	struct proc_dir_entry *palinfo_dir, *cpu_dir;
900 	int i, j;
901 	char cpustr[sizeof(CPUSTR)];
902 
903 	printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION);
904 
905 	palinfo_dir = proc_mkdir("pal", NULL);
906 
907 	/*
908 	 * we keep track of created entries in a depth-first order for
909 	 * cleanup purposes. Each entry is stored into palinfo_proc_entries
910 	 */
911 	for (i=0; i < NR_CPUS; i++) {
912 
913 		if (!cpu_online(i)) continue;
914 
915 		sprintf(cpustr,CPUSTR, i);
916 
917 		cpu_dir = proc_mkdir(cpustr, palinfo_dir);
918 
919 		f.req_cpu = i;
920 
921 		for (j=0; j < NR_PALINFO_ENTRIES; j++) {
922 			f.func_id = j;
923 			*pdir = create_proc_read_entry(
924 					palinfo_entries[j].name, 0, cpu_dir,
925 					palinfo_read_entry, (void *)f.value);
926 			pdir++;
927 		}
928 		*pdir++ = cpu_dir;
929 	}
930 	*pdir = palinfo_dir;
931 
932 	return 0;
933 }
934 
935 static void __exit
palinfo_exit(void)936 palinfo_exit(void)
937 {
938 	int i = 0;
939 
940 	/* remove all nodes: depth first pass. Could optimize this  */
941 	for (i=0; i< NR_PALINFO_PROC_ENTRIES ; i++) {
942 		if (palinfo_proc_entries[i])
943 			remove_proc_entry (palinfo_proc_entries[i]->name, NULL);
944 	}
945 }
946 
947 module_init(palinfo_init);
948 module_exit(palinfo_exit);
949