1 /*
2 * inventory.c
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Copyright (c) 1999 The Puffin Group (David Kennedy and Alex deVries)
10 * Copyright (c) 2001 Matthew Wilcox for Hewlett-Packard
11 *
12 * These are the routines to discover what hardware exists in this box.
13 * This task is complicated by there being 3 different ways of
14 * performing an inventory, depending largely on the age of the box.
15 * The recommended way to do this is to check to see whether the machine
16 * is a `Snake' first, then try System Map, then try PAT. We try System
17 * Map before checking for a Snake -- this probably doesn't cause any
18 * problems, but...
19 */
20
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/init.h>
24 #include <linux/slab.h>
25 #include <linux/mm.h>
26 #include <asm/hardware.h>
27 #include <asm/io.h>
28 #include <asm/pdc.h>
29 #include <asm/processor.h>
30 #include <asm/page.h>
31
32 /*
33 ** Debug options
34 ** DEBUG_PAT Dump details which PDC PAT provides about ranges/devices.
35 */
36 #undef DEBUG_PAT
37
38 int pdc_type = PDC_TYPE_ILLEGAL;
39
setup_pdc(void)40 void __init setup_pdc(void)
41 {
42 long status;
43 unsigned int bus_id;
44 struct pdc_system_map_mod_info module_result;
45 struct pdc_module_path module_path;
46 struct pdc_model model;
47 #ifdef __LP64__
48 struct pdc_pat_cell_num cell_info;
49 #endif
50
51 /* Determine the pdc "type" used on this machine */
52
53 printk(KERN_INFO "Determining PDC firmware type: ");
54
55 status = pdc_system_map_find_mods(&module_result, &module_path, 0);
56 if (status == PDC_OK) {
57 pdc_type = PDC_TYPE_SYSTEM_MAP;
58 printk("System Map.\n");
59 return;
60 }
61
62 /*
63 * If the machine doesn't support PDC_SYSTEM_MAP then either it
64 * is a pdc pat box, or it is an older box. All 64 bit capable
65 * machines are either pdc pat boxes or they support PDC_SYSTEM_MAP.
66 */
67
68 /*
69 * TODO: We should test for 64 bit capability and give a
70 * clearer message.
71 */
72
73 #ifdef __LP64__
74 status = pdc_pat_cell_get_number(&cell_info);
75 if (status == PDC_OK) {
76 pdc_type = PDC_TYPE_PAT;
77 printk("64 bit PAT.\n");
78 return;
79 }
80 #endif
81
82 /* Check the CPU's bus ID. There's probably a better test. */
83
84 status = pdc_model_info(&model);
85
86 bus_id = (model.hversion >> (4 + 7)) & 0x1f;
87
88 switch (bus_id) {
89 case 0x4: /* 720, 730, 750, 735, 755 */
90 case 0x6: /* 705, 710 */
91 case 0x7: /* 715, 725 */
92 case 0x8: /* 745, 747, 742 */
93 case 0xA: /* 712 and similiar */
94 case 0xC: /* 715/64, at least */
95
96 pdc_type = PDC_TYPE_SNAKE;
97 printk("Snake.\n");
98 return;
99
100 default: /* Everything else */
101
102 printk("Unsupported.\n");
103 panic("If this is a 64-bit machine, please try a 64-bit kernel.\n");
104 }
105 }
106
107 #define PDC_PAGE_ADJ_SHIFT (PAGE_SHIFT - 12) /* pdc pages are always 4k */
108
109 static void __init
set_pmem_entry(physmem_range_t * pmem_ptr,unsigned long start,unsigned long pages4k)110 set_pmem_entry(physmem_range_t *pmem_ptr, unsigned long start,
111 unsigned long pages4k)
112 {
113 /* Rather than aligning and potentially throwing away
114 * memory, we'll assume that any ranges are already
115 * nicely aligned with any reasonable page size, and
116 * panic if they are not (it's more likely that the
117 * pdc info is bad in this case).
118 */
119
120 if ( ((start & (PAGE_SIZE - 1)) != 0)
121 || ((pages4k & ((1UL << PDC_PAGE_ADJ_SHIFT) - 1)) != 0) ) {
122
123 panic("Memory range doesn't align with page size!\n");
124 }
125
126 pmem_ptr->start_pfn = (start >> PAGE_SHIFT);
127 pmem_ptr->pages = (pages4k >> PDC_PAGE_ADJ_SHIFT);
128 }
129
pagezero_memconfig(void)130 static void __init pagezero_memconfig(void)
131 {
132 unsigned long npages;
133
134 /* Use the 32 bit information from page zero to create a single
135 * entry in the pmem_ranges[] table.
136 *
137 * We currently don't support machines with contiguous memory
138 * >= 4 Gb, who report that memory using 64 bit only fields
139 * on page zero. It's not worth doing until it can be tested,
140 * and it is not clear we can support those machines for other
141 * reasons.
142 *
143 * If that support is done in the future, this is where it
144 * should be done.
145 */
146
147 npages = (PAGE_ALIGN(PAGE0->imm_max_mem) >> PAGE_SHIFT);
148 set_pmem_entry(pmem_ranges,0UL,npages);
149 npmem_ranges = 1;
150 }
151
152 #ifdef __LP64__
153
154 /* All of the PDC PAT specific code is 64-bit only */
155
156 /*
157 ** The module object is filled via PDC_PAT_CELL[Return Cell Module].
158 ** If a module is found, register module will get the IODC bytes via
159 ** pdc_iodc_read() using the PA view of conf_base_addr for the hpa parameter.
160 **
161 ** The IO view can be used by PDC_PAT_CELL[Return Cell Module]
162 ** only for SBAs and LBAs. This view will cause an invalid
163 ** argument error for all other cell module types.
164 **
165 */
166
167 static int __init
pat_query_module(ulong pcell_loc,ulong mod_index)168 pat_query_module(ulong pcell_loc, ulong mod_index)
169 {
170 pdc_pat_cell_mod_maddr_block_t pa_pdc_cell;
171 pdc_pat_cell_mod_maddr_block_t io_pdc_cell;
172 unsigned long bytecnt;
173 unsigned long temp; /* 64-bit scratch value */
174 long status; /* PDC return value status */
175 struct parisc_device *dev;
176
177 /* return cell module (PA or Processor view) */
178 status = pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
179 PA_VIEW, &pa_pdc_cell);
180
181 if (status != PDC_OK) {
182 /* no more cell modules or error */
183 return status;
184 }
185
186 temp = pa_pdc_cell.cba;
187 dev = alloc_pa_dev(PAT_GET_CBA(temp), &pa_pdc_cell.mod_path);
188 if (!dev) {
189 return PDC_NE_MOD;
190 }
191
192 /* alloc_pa_dev sets dev->hpa */
193
194 /*
195 ** save parameters in the parisc_device
196 ** (The idea being the device driver will call pdc_pat_cell_module()
197 ** and store the results in its own data structure.)
198 */
199 dev->pcell_loc = pcell_loc;
200 dev->mod_index = mod_index;
201
202 /* save generic info returned from the call */
203 /* REVISIT: who is the consumer of this? not sure yet... */
204 dev->mod_info = pa_pdc_cell.mod_info; /* pass to PAT_GET_ENTITY() */
205 dev->pmod_loc = pa_pdc_cell.mod_location;
206
207 register_parisc_device(dev); /* advertise device */
208
209 #ifdef DEBUG_PAT
210 /* dump what we see so far... */
211 switch (PAT_GET_ENTITY(dev->mod_info)) {
212 unsigned long i;
213
214 case PAT_ENTITY_PROC:
215 printk(KERN_DEBUG "PAT_ENTITY_PROC: id_eid 0x%lx\n",
216 pa_pdc_cell.mod[0]);
217 break;
218
219 case PAT_ENTITY_MEM:
220 printk(KERN_DEBUG
221 "PAT_ENTITY_MEM: amount 0x%lx min_gni_base 0x%lx min_gni_len 0x%lx\n",
222 pa_pdc_cell.mod[0], pa_pdc_cell.mod[1],
223 pa_pdc_cell.mod[2]);
224 break;
225 case PAT_ENTITY_CA:
226 printk(KERN_DEBUG "PAT_ENTITY_CA: %ld\n", pcell_loc);
227 break;
228
229 case PAT_ENTITY_PBC:
230 printk(KERN_DEBUG "PAT_ENTITY_PBC: ");
231 goto print_ranges;
232
233 case PAT_ENTITY_SBA:
234 printk(KERN_DEBUG "PAT_ENTITY_SBA: ");
235 goto print_ranges;
236
237 case PAT_ENTITY_LBA:
238 printk(KERN_DEBUG "PAT_ENTITY_LBA: ");
239
240 print_ranges:
241 pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
242 IO_VIEW, &io_pdc_cell);
243 printk(KERN_DEBUG "ranges %ld\n", pa_pdc_cell.mod[1]);
244 for (i = 0; i < pa_pdc_cell.mod[1]; i++) {
245 printk(KERN_DEBUG
246 " PA_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
247 i, pa_pdc_cell.mod[2 + i * 3], /* type */
248 pa_pdc_cell.mod[3 + i * 3], /* start */
249 pa_pdc_cell.mod[4 + i * 3]); /* finish (ie end) */
250 printk(KERN_DEBUG
251 " IO_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
252 i, io_pdc_cell.mod[2 + i * 3], /* type */
253 io_pdc_cell.mod[3 + i * 3], /* start */
254 io_pdc_cell.mod[4 + i * 3]); /* finish (ie end) */
255 }
256 printk(KERN_DEBUG "\n");
257 break;
258 }
259 #endif /* DEBUG_PAT */
260 return PDC_OK;
261 }
262
263
264 /* pat pdc can return information about a variety of different
265 * types of memory (e.g. firmware,i/o, etc) but we only care about
266 * the usable physical ram right now. Since the firmware specific
267 * information is allocated on the stack, we'll be generous, in
268 * case there is a lot of other information we don't care about.
269 */
270
271 #define PAT_MAX_RANGES (4 * MAX_PHYSMEM_RANGES)
272
pat_memconfig(void)273 static void __init pat_memconfig(void)
274 {
275 unsigned long actual_len;
276 struct pdc_pat_pd_addr_map_entry mem_table[PAT_MAX_RANGES+1];
277 struct pdc_pat_pd_addr_map_entry *mtbl_ptr;
278 physmem_range_t *pmem_ptr;
279 long status;
280 int entries;
281 unsigned long length;
282 int i;
283
284 length = (PAT_MAX_RANGES + 1) * sizeof(struct pdc_pat_pd_addr_map_entry);
285
286 status = pdc_pat_pd_get_addr_map(&actual_len, mem_table, length, 0L);
287
288 if ((status != PDC_OK)
289 || ((actual_len % sizeof(struct pdc_pat_pd_addr_map_entry)) != 0)) {
290
291 /* The above pdc call shouldn't fail, but, just in
292 * case, just use the PAGE0 info.
293 */
294
295 printk("\n\n\n");
296 printk(KERN_WARNING "WARNING! Could not get full memory configuration. "
297 "All memory may not be used!\n\n\n");
298 pagezero_memconfig();
299 return;
300 }
301
302 entries = actual_len / sizeof(struct pdc_pat_pd_addr_map_entry);
303
304 if (entries > PAT_MAX_RANGES) {
305 printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
306 printk(KERN_WARNING "Some memory may not be used!\n");
307 }
308
309 /* Copy information into the firmware independent pmem_ranges
310 * array, skipping types we don't care about. Notice we said
311 * "may" above. We'll use all the entries that were returned.
312 */
313
314 npmem_ranges = 0;
315 mtbl_ptr = mem_table;
316 pmem_ptr = pmem_ranges; /* Global firmware independent table */
317 for (i = 0; i < entries; i++,mtbl_ptr++) {
318 if ( (mtbl_ptr->entry_type != PAT_MEMORY_DESCRIPTOR)
319 || (mtbl_ptr->memory_type != PAT_MEMTYPE_MEMORY)
320 || (mtbl_ptr->pages == 0)
321 || ( (mtbl_ptr->memory_usage != PAT_MEMUSE_GENERAL)
322 && (mtbl_ptr->memory_usage != PAT_MEMUSE_GI)
323 && (mtbl_ptr->memory_usage != PAT_MEMUSE_GNI) ) ) {
324
325 continue;
326 }
327
328 if (npmem_ranges == MAX_PHYSMEM_RANGES) {
329 printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
330 printk(KERN_WARNING "Some memory will not be used!\n");
331 break;
332 }
333
334 set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
335 npmem_ranges++;
336 }
337 }
338
pat_inventory(void)339 static int __init pat_inventory(void)
340 {
341 int status;
342 ulong mod_index = 0;
343 struct pdc_pat_cell_num cell_info;
344
345 /*
346 ** Note: Prelude (and it's successors: Lclass, A400/500) only
347 ** implement PDC_PAT_CELL sub-options 0 and 2.
348 */
349 status = pdc_pat_cell_get_number(&cell_info);
350 if (status != PDC_OK) {
351 return 0;
352 }
353
354 #ifdef DEBUG_PAT
355 printk(KERN_DEBUG "CELL_GET_NUMBER: 0x%lx 0x%lx\n", cell_info.cell_num,
356 cell_info.cell_loc);
357 #endif
358
359 while (PDC_OK == pat_query_module(cell_info.cell_loc, mod_index)) {
360 mod_index++;
361 }
362
363 return mod_index;
364 }
365
366 /* We only look for extended memory ranges on a 64 bit capable box */
sprockets_memconfig(void)367 static void __init sprockets_memconfig(void)
368 {
369 struct pdc_memory_table_raddr r_addr;
370 struct pdc_memory_table mem_table[MAX_PHYSMEM_RANGES];
371 struct pdc_memory_table *mtbl_ptr;
372 physmem_range_t *pmem_ptr;
373 long status;
374 int entries;
375 int i;
376
377 status = pdc_mem_mem_table(&r_addr,mem_table,
378 (unsigned long)MAX_PHYSMEM_RANGES);
379
380 if (status != PDC_OK) {
381
382 /* The above pdc call only works on boxes with sprockets
383 * firmware (newer B,C,J class). Other non PAT PDC machines
384 * do support more than 3.75 Gb of memory, but we don't
385 * support them yet.
386 */
387
388 pagezero_memconfig();
389 return;
390 }
391
392 if (r_addr.entries_total > MAX_PHYSMEM_RANGES) {
393 printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
394 printk(KERN_WARNING "Some memory will not be used!\n");
395 }
396
397 entries = (int)r_addr.entries_returned;
398
399 npmem_ranges = 0;
400 mtbl_ptr = mem_table;
401 pmem_ptr = pmem_ranges; /* Global firmware independent table */
402 for (i = 0; i < entries; i++,mtbl_ptr++) {
403 set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
404 npmem_ranges++;
405 }
406 }
407
408 #else /* !__LP64__ */
409
410 #define pat_inventory() do { } while (0)
411 #define pat_memconfig() do { } while (0)
412 #define sprockets_memconfig() pagezero_memconfig()
413
414 #endif /* !__LP64__ */
415
416
417 #ifndef CONFIG_PA20
418
419 /* Code to support Snake machines (7[2350], 7[235]5, 715/Scorpio) */
420
421 static struct parisc_device * __init
legacy_create_device(struct pdc_memory_map * r_addr,struct pdc_module_path * module_path)422 legacy_create_device(struct pdc_memory_map *r_addr,
423 struct pdc_module_path *module_path)
424 {
425 struct parisc_device *dev;
426 int status = pdc_mem_map_hpa(r_addr, module_path);
427 if (status != PDC_OK)
428 return NULL;
429
430 dev = alloc_pa_dev(r_addr->hpa, &module_path->path);
431 if (dev == NULL)
432 return NULL;
433
434 register_parisc_device(dev);
435 return dev;
436 }
437
438 /**
439 * snake_inventory
440 *
441 * Before PDC_SYSTEM_MAP was invented, the PDC_MEM_MAP call was used.
442 * To use it, we initialise the mod_path.bc to 0xff and try all values of
443 * mod to get the HPA for the top-level devices. Bus adapters may have
444 * sub-devices which are discovered by setting bc[5] to 0 and bc[4] to the
445 * module, then trying all possible functions.
446 */
snake_inventory(void)447 static void __init snake_inventory(void)
448 {
449 int mod;
450 for (mod = 0; mod < 16; mod++) {
451 struct parisc_device *dev;
452 struct pdc_module_path module_path;
453 struct pdc_memory_map r_addr;
454 unsigned int func;
455
456 memset(module_path.path.bc, 0xff, 6);
457 module_path.path.mod = mod;
458 dev = legacy_create_device(&r_addr, &module_path);
459 if ((!dev) || (dev->id.hw_type != HPHW_BA))
460 continue;
461
462 memset(module_path.path.bc, 0xff, 4);
463 module_path.path.bc[4] = mod;
464
465 for (func = 0; func < 16; func++) {
466 module_path.path.bc[5] = 0;
467 module_path.path.mod = func;
468 legacy_create_device(&r_addr, &module_path);
469 }
470 }
471 }
472
473 #else /* CONFIG_PA20 */
474 #define snake_inventory() do { } while (0)
475 #endif /* CONFIG_PA20 */
476
477 /* Common 32/64 bit based code goes here */
478
479 /**
480 * add_system_map_addresses - Add additional addresses to the parisc device.
481 * @dev: The parisc device.
482 * @num_addrs: Then number of addresses to add;
483 * @module_instance: The system_map module instance.
484 *
485 * This function adds any additional addresses reported by the system_map
486 * firmware to the parisc device.
487 */
488 static void __init
add_system_map_addresses(struct parisc_device * dev,int num_addrs,int module_instance)489 add_system_map_addresses(struct parisc_device *dev, int num_addrs,
490 int module_instance)
491 {
492 int i;
493 long status;
494 struct pdc_system_map_addr_info addr_result;
495
496 dev->addr = kmalloc(num_addrs * sizeof(unsigned long), GFP_KERNEL);
497 if(!dev->addr) {
498 printk(KERN_ERR "%s %s(): memory allocation failure\n",
499 __FILE__, __FUNCTION__);
500 return;
501 }
502
503 for(i = 1; i <= num_addrs; ++i) {
504 status = pdc_system_map_find_addrs(&addr_result,
505 module_instance, i);
506 if(PDC_OK == status) {
507 dev->addr[dev->num_addrs] = (unsigned long)addr_result.mod_addr;
508 dev->num_addrs++;
509 } else {
510 printk(KERN_WARNING
511 "Bad PDC_FIND_ADDRESS status return (%ld) for index %d\n",
512 status, i);
513 }
514 }
515 }
516
517 /**
518 * do_system_map_inventory - Retrieve firmware devices via SYSTEM_MAP.
519 *
520 * This function attempts to retrieve and register all the devices firmware
521 * knows about via the SYSTEM_MAP PDC call.
522 */
system_map_inventory(void)523 static void __init system_map_inventory(void)
524 {
525 int i;
526 long status = PDC_OK;
527
528 /*
529 * first stop the usb controller, otherwise the machine
530 * might crash during iommu setup
531 */
532 pdc_suspend_usb();
533
534 for (i = 0; status != PDC_BAD_PROC && status != PDC_NE_MOD; i++) {
535 struct parisc_device *dev;
536 struct pdc_system_map_mod_info module_result;
537 struct pdc_module_path module_path;
538
539 status = pdc_system_map_find_mods(&module_result,
540 &module_path, i);
541 if (status != PDC_OK)
542 continue;
543
544 dev = alloc_pa_dev(module_result.mod_addr, &module_path.path);
545 if (!dev)
546 continue;
547
548 register_parisc_device(dev);
549
550 /* if available, get the additional addresses for a module */
551 if (!module_result.add_addrs)
552 continue;
553
554 add_system_map_addresses(dev, module_result.add_addrs, i);
555 }
556
557 walk_central_bus();
558 return;
559 }
560
do_memory_inventory(void)561 void __init do_memory_inventory(void)
562 {
563 switch (pdc_type) {
564
565 case PDC_TYPE_PAT:
566 pat_memconfig();
567 break;
568
569 case PDC_TYPE_SYSTEM_MAP:
570 sprockets_memconfig();
571 break;
572
573 case PDC_TYPE_SNAKE:
574 pagezero_memconfig();
575 return;
576
577 default:
578 panic("Unknown PDC type!\n");
579 }
580
581 if (npmem_ranges == 0 || pmem_ranges[0].start_pfn != 0) {
582 printk(KERN_WARNING "Bad memory configuration returned!\n");
583 printk(KERN_WARNING "Some memory may not be used!\n");
584 pagezero_memconfig();
585 }
586 }
587
do_device_inventory(void)588 void __init do_device_inventory(void)
589 {
590 printk(KERN_INFO "Searching for devices...\n");
591
592 switch (pdc_type) {
593
594 case PDC_TYPE_PAT:
595 pat_inventory();
596 break;
597
598 case PDC_TYPE_SYSTEM_MAP:
599 system_map_inventory();
600 break;
601
602 case PDC_TYPE_SNAKE:
603 snake_inventory();
604 break;
605
606 default:
607 panic("Unknown PDC type!\n");
608 }
609
610 printk(KERN_INFO "Found devices:\n");
611 print_parisc_devices();
612 }
613