1 /*
2  * This file is subject to the terms and conditions of the GNU General
3  * Public License.  See the file "COPYING" in the main directory of this
4  * archive for more details.
5  *
6  * Copyright (C) 2000 - 2001 by Kanoj Sarcar (kanoj@sgi.com)
7  * Copyright (C) 2000 - 2001 by Silicon Graphics, Inc.
8  */
9 
10 #include <linux/config.h>
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/sched.h>
14 #include <linux/mmzone.h>	/* for numnodes */
15 #include <linux/mm.h>
16 #include <linux/module.h>
17 
18 #include <asm/cpu.h>
19 #include <asm/pgalloc.h>
20 #include <asm/pgtable.h>
21 #include <asm/sn/types.h>
22 #include <asm/sn/sn0/addrs.h>
23 #include <asm/sn/sn0/hubni.h>
24 #include <asm/sn/sn0/hubio.h>
25 #include <asm/sn/klconfig.h>
26 #include <asm/sn/ioc3.h>
27 #include <asm/mipsregs.h>
28 #include <asm/sn/gda.h>
29 #include <asm/sn/intr.h>
30 #include <asm/current.h>
31 #include <asm/smp.h>
32 #include <asm/processor.h>
33 #include <asm/mmu_context.h>
34 #include <asm/sn/launch.h>
35 #include <asm/sn/sn_private.h>
36 #include <asm/sn/sn0/ip27.h>
37 #include <asm/sn/mapped_kernel.h>
38 #include <asm/sn/sn0/addrs.h>
39 #include <asm/sn/gda.h>
40 
41 #define CPU_NONE		(cpuid_t)-1
42 
43 /*
44  * The following should work till 64 nodes, ie 128p SN0s.
45  */
46 #define CNODEMASK_CLRALL(p)	(p) = 0
47 #define CNODEMASK_TSTB(p, bit)	((p) & (1ULL << (bit)))
48 #define CNODEMASK_SETB(p, bit)	((p) |= 1ULL << (bit))
49 
50 cpumask_t	boot_cpumask;
51 hubreg_t	region_mask = 0;
52 static int	fine_mode = 0;
53 int		maxcpus;
54 static spinlock_t hub_mask_lock = SPIN_LOCK_UNLOCKED;
55 static cnodemask_t hub_init_mask;
56 static atomic_t numstarted = ATOMIC_INIT(1);
57 static int router_distance;
58 nasid_t master_nasid = INVALID_NASID;
59 
60 EXPORT_SYMBOL(master_nasid);
61 
62 cnodeid_t	nasid_to_compact_node[MAX_NASIDS];
63 nasid_t		compact_to_nasid_node[MAX_COMPACT_NODES];
64 cnodeid_t	cpuid_to_compact_node[MAXCPUS];
65 char		node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
66 
67 EXPORT_SYMBOL(nasid_to_compact_node);
68 
get_region(cnodeid_t cnode)69 hubreg_t get_region(cnodeid_t cnode)
70 {
71 	if (fine_mode)
72 		return COMPACT_TO_NASID_NODEID(cnode) >> NASID_TO_FINEREG_SHFT;
73 	else
74 		return COMPACT_TO_NASID_NODEID(cnode) >> NASID_TO_COARSEREG_SHFT;
75 }
76 
gen_region_mask(hubreg_t * region_mask,int maxnodes)77 static void gen_region_mask(hubreg_t *region_mask, int maxnodes)
78 {
79 	cnodeid_t cnode;
80 
81 	(*region_mask) = 0;
82 	for (cnode = 0; cnode < maxnodes; cnode++) {
83 		(*region_mask) |= 1ULL << get_region(cnode);
84 	}
85 }
86 
is_fine_dirmode(void)87 int is_fine_dirmode(void)
88 {
89 	return (((LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_REGIONSIZE_MASK)
90 		>> NSRI_REGIONSIZE_SHFT) & REGIONSIZE_FINE);
91 }
92 
get_actual_nasid(lboard_t * brd)93 nasid_t get_actual_nasid(lboard_t *brd)
94 {
95 	klhub_t *hub;
96 
97 	if (!brd)
98 		return INVALID_NASID;
99 
100 	/* find out if we are a completely disabled brd. */
101 	hub  = (klhub_t *)find_first_component(brd, KLSTRUCT_HUB);
102 	if (!hub)
103 		return INVALID_NASID;
104 	if (!(hub->hub_info.flags & KLINFO_ENABLE))	/* disabled node brd */
105 		return hub->hub_info.physid;
106 	else
107 		return brd->brd_nasid;
108 }
109 
110 /* Tweak this for maximum number of CPUs to activate */
111 static int max_cpus = NR_CPUS;
112 
do_cpumask(cnodeid_t cnode,nasid_t nasid,cpumask_t * boot_cpumask,int * highest)113 int do_cpumask(cnodeid_t cnode, nasid_t nasid, cpumask_t *boot_cpumask,
114 							int *highest)
115 {
116 	static int tot_cpus_found = 0;
117 	lboard_t *brd;
118 	klcpu_t *acpu;
119 	int cpus_found = 0;
120 	cpuid_t cpuid;
121 
122 	brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27);
123 
124 	do {
125 		acpu = (klcpu_t *)find_first_component(brd, KLSTRUCT_CPU);
126 		while (acpu) {
127 			cpuid = acpu->cpu_info.virtid;
128 			/* cnode is not valid for completely disabled brds */
129 			if (get_actual_nasid(brd) == brd->brd_nasid)
130 				cpuid_to_compact_node[cpuid] = cnode;
131 			if (cpuid > *highest)
132 				*highest = cpuid;
133 			/* Only let it join in if it's marked enabled */
134 			if ((acpu->cpu_info.flags & KLINFO_ENABLE) &&
135 						(tot_cpus_found != max_cpus)) {
136 				CPUMASK_SETB(*boot_cpumask, cpuid);
137 				cpus_found++;
138 				tot_cpus_found++;
139 			}
140 			acpu = (klcpu_t *)find_component(brd, (klinfo_t *)acpu,
141 								KLSTRUCT_CPU);
142 		}
143 		brd = KLCF_NEXT(brd);
144 		if (brd)
145 			brd = find_lboard(brd,KLTYPE_IP27);
146 		else
147 			break;
148 	} while (brd);
149 
150 	return cpus_found;
151 }
152 
cpu_node_probe(cpumask_t * boot_cpumask,int * numnodes)153 cpuid_t cpu_node_probe(cpumask_t *boot_cpumask, int *numnodes)
154 {
155 	int i, cpus = 0, highest = 0;
156 	gda_t *gdap = GDA;
157 	nasid_t nasid;
158 
159 	/*
160 	 * Initialize the arrays to invalid nodeid (-1)
161 	 */
162 	for (i = 0; i < MAX_COMPACT_NODES; i++)
163 		compact_to_nasid_node[i] = INVALID_NASID;
164 	for (i = 0; i < MAX_NASIDS; i++)
165 		nasid_to_compact_node[i] = INVALID_CNODEID;
166 	for (i = 0; i < MAXCPUS; i++)
167 		cpuid_to_compact_node[i] = INVALID_CNODEID;
168 
169 	*numnodes = 0;
170 	for (i = 0; i < MAX_COMPACT_NODES; i++) {
171 		if ((nasid = gdap->g_nasidtable[i]) == INVALID_NASID) {
172 			break;
173 		} else {
174 			compact_to_nasid_node[i] = nasid;
175 			nasid_to_compact_node[nasid] = i;
176 			(*numnodes)++;
177 			cpus += do_cpumask(i, nasid, boot_cpumask, &highest);
178 		}
179 	}
180 
181 	/*
182 	 * Cpus are numbered in order of cnodes. Currently, disabled
183 	 * cpus are not numbered.
184 	 */
185 
186 	return(highest + 1);
187 }
188 
cpu_enabled(cpuid_t cpu)189 int cpu_enabled(cpuid_t cpu)
190 {
191 	if (cpu == CPU_NONE)
192 		return 0;
193 	return (CPUMASK_TSTB(boot_cpumask, cpu) != 0);
194 }
195 
mlreset(void)196 void mlreset (void)
197 {
198 	int i;
199 	void init_topology_matrix(void);
200 	void dump_topology(void);
201 
202 	master_nasid = get_nasid();
203 	fine_mode = is_fine_dirmode();
204 
205 	/*
206 	 * Probe for all CPUs - this creates the cpumask and
207 	 * sets up the mapping tables.
208 	 */
209 	CPUMASK_CLRALL(boot_cpumask);
210 	maxcpus = cpu_node_probe(&boot_cpumask, &numnodes);
211 	printk(KERN_INFO "Discovered %d cpus on %d nodes\n", maxcpus, numnodes);
212 
213 	init_topology_matrix();
214 	dump_topology();
215 
216 	gen_region_mask(&region_mask, numnodes);
217 	CNODEMASK_CLRALL(hub_init_mask);
218 
219 	setup_replication_mask(numnodes);
220 
221 	/*
222 	 * Set all nodes' calias sizes to 8k
223 	 */
224 	for (i = 0; i < numnodes; i++) {
225 		nasid_t nasid;
226 
227 		nasid = COMPACT_TO_NASID_NODEID(i);
228 
229 		/*
230 		 * Always have node 0 in the region mask, otherwise
231 		 * CALIAS accesses get exceptions since the hub
232 		 * thinks it is a node 0 address.
233 		 */
234 		REMOTE_HUB_S(nasid, PI_REGION_PRESENT, (region_mask | 1));
235 #ifdef CONFIG_REPLICATE_EXHANDLERS
236 		REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_8K);
237 #else
238 		REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_0);
239 #endif
240 
241 #ifdef LATER
242 		/*
243 		 * Set up all hubs to have a big window pointing at
244 		 * widget 0. Memory mode, widget 0, offset 0
245 		 */
246 		REMOTE_HUB_S(nasid, IIO_ITTE(SWIN0_BIGWIN),
247 			((HUB_PIO_MAP_TO_MEM << IIO_ITTE_IOSP_SHIFT) |
248 			(0 << IIO_ITTE_WIDGET_SHIFT)));
249 #endif
250 	}
251 }
252 
253 
intr_clear_bits(nasid_t nasid,volatile hubreg_t * pend,int base_level,char * name)254 void intr_clear_bits(nasid_t nasid, volatile hubreg_t *pend, int base_level,
255 							char *name)
256 {
257 	volatile hubreg_t bits;
258 	int i;
259 
260 	/* Check pending interrupts */
261 	if ((bits = HUB_L(pend)) != 0)
262 		for (i = 0; i < N_INTPEND_BITS; i++)
263 			if (bits & (1 << i))
264 				LOCAL_HUB_CLR_INTR(base_level + i);
265 }
266 
intr_clear_all(nasid_t nasid)267 void intr_clear_all(nasid_t nasid)
268 {
269 	REMOTE_HUB_S(nasid, PI_INT_MASK0_A, 0);
270 	REMOTE_HUB_S(nasid, PI_INT_MASK0_B, 0);
271 	REMOTE_HUB_S(nasid, PI_INT_MASK1_A, 0);
272 	REMOTE_HUB_S(nasid, PI_INT_MASK1_B, 0);
273 	intr_clear_bits(nasid, REMOTE_HUB_ADDR(nasid, PI_INT_PEND0),
274 		INT_PEND0_BASELVL, "INT_PEND0");
275 	intr_clear_bits(nasid, REMOTE_HUB_ADDR(nasid, PI_INT_PEND1),
276 		INT_PEND1_BASELVL, "INT_PEND1");
277 }
278 
sn_mp_setup(void)279 void sn_mp_setup(void)
280 {
281 	cnodeid_t	cnode;
282 #if 0
283 	cpuid_t		cpu;
284 #endif
285 
286 	for (cnode = 0; cnode < numnodes; cnode++) {
287 #if 0
288 		init_platform_nodepda();
289 #endif
290 		intr_clear_all(COMPACT_TO_NASID_NODEID(cnode));
291 	}
292 #if 0
293 	for (cpu = 0; cpu < maxcpus; cpu++) {
294 		init_platform_pda();
295 	}
296 #endif
297 }
298 
per_hub_init(cnodeid_t cnode)299 void per_hub_init(cnodeid_t cnode)
300 {
301 	extern void pcibr_setup(cnodeid_t);
302 	cnodemask_t	done;
303 	nasid_t		nasid;
304 
305 	nasid = COMPACT_TO_NASID_NODEID(cnode);
306 
307 	spin_lock(&hub_mask_lock);
308 	/* Test our bit. */
309 	if (!(done = CNODEMASK_TSTB(hub_init_mask, cnode))) {
310 		/* Turn our bit on in the mask. */
311 		CNODEMASK_SETB(hub_init_mask, cnode);
312 		/*
313 	 	 * Do the actual initialization if it hasn't been done yet.
314 	 	 * We don't need to hold a lock for this work.
315 	 	 */
316 		/*
317 		 * Set CRB timeout at 5ms, (< PI timeout of 10ms)
318 		 */
319 		REMOTE_HUB_S(nasid, IIO_ICTP, 0x800);
320 		REMOTE_HUB_S(nasid, IIO_ICTO, 0xff);
321 		hub_rtc_init(cnode);
322 		pcibr_setup(cnode);
323 #ifdef CONFIG_REPLICATE_EXHANDLERS
324 		/*
325 		 * If this is not a headless node initialization,
326 		 * copy over the caliased exception handlers.
327 		 */
328 		if (get_compact_nodeid() == cnode) {
329 			extern char except_vec0, except_vec1_r10k;
330 			extern char except_vec2_generic, except_vec3_generic;
331 
332 			memcpy((void *)(KSEG0 + 0x100), &except_vec2_generic,
333 								0x80);
334 			memcpy((void *)(KSEG0 + 0x180), &except_vec3_generic,
335 								0x80);
336 			memcpy((void *)KSEG0, &except_vec0, 0x80);
337 			memcpy((void *)KSEG0 + 0x080, &except_vec1_r10k, 0x80);
338 			memcpy((void *)(KSEG0 + 0x100), (void *) KSEG0, 0x80);
339 			memcpy((void *)(KSEG0 + 0x180), &except_vec3_generic,
340 								0x100);
341 			__flush_cache_all();
342 		}
343 #endif
344 	}
345 	spin_unlock(&hub_mask_lock);
346 }
347 
348 /*
349  * This is similar to hard_smp_processor_id().
350  */
getcpuid(void)351 cpuid_t getcpuid(void)
352 {
353 	klcpu_t *klcpu;
354 
355 	klcpu = nasid_slice_to_cpuinfo(get_nasid(),LOCAL_HUB_L(PI_CPU_NUM));
356 	return klcpu->cpu_info.virtid;
357 }
358 
per_cpu_init(void)359 void per_cpu_init(void)
360 {
361 	extern void install_cpu_nmi_handler(int slice);
362 	extern void load_mmu(void);
363 	static int is_slave = 0;
364 	int cpu = smp_processor_id();
365 	cnodeid_t cnode = get_compact_nodeid();
366 
367 	TLBMISS_HANDLER_SETUP();
368 #if 0
369 	intr_init();
370 #endif
371 	clear_c0_status(ST0_IM);
372 	per_hub_init(cnode);
373 	cpu_time_init();
374 	if (smp_processor_id())	/* master can't do this early, no kmalloc */
375 		install_cpuintr(cpu);
376 	/* Install our NMI handler if symmon hasn't installed one. */
377 	install_cpu_nmi_handler(cputoslice(cpu));
378 #if 0
379 	install_tlbintr(cpu);
380 #endif
381 	set_c0_status(SRB_DEV0 | SRB_DEV1);
382 	if (is_slave) {
383 		load_mmu();
384 		atomic_inc(&numstarted);
385 	} else {
386 		is_slave = 1;
387 	}
388 }
389 
get_compact_nodeid(void)390 cnodeid_t get_compact_nodeid(void)
391 {
392 	nasid_t nasid;
393 
394 	nasid = get_nasid();
395 	/*
396 	 * Map the physical node id to a virtual node id (virtual node ids
397 	 * are contiguous).
398 	 */
399 	return NASID_TO_COMPACT_NODEID(nasid);
400 }
401 
402 #ifdef CONFIG_SMP
403 
404 /*
405  * Takes as first input the PROM assigned cpu id, and the kernel
406  * assigned cpu id as the second.
407  */
alloc_cpupda(cpuid_t cpu,int cpunum)408 static void alloc_cpupda(cpuid_t cpu, int cpunum)
409 {
410 	cnodeid_t	node;
411 	nasid_t		nasid;
412 
413 	node = get_cpu_cnode(cpu);
414 	nasid = COMPACT_TO_NASID_NODEID(node);
415 
416 	cputonasid(cpunum) = nasid;
417 	cputocnode(cpunum) = node;
418 	cputoslice(cpunum) = get_cpu_slice(cpu);
419 	cpu_data[cpunum].p_cpuid = cpu;
420 }
421 
422 static volatile cpumask_t boot_barrier;
423 
424 extern atomic_t cpus_booted;
425 
start_secondary(void)426 void __init start_secondary(void)
427 {
428 	unsigned int cpu = smp_processor_id();
429 	extern atomic_t smp_commenced;
430 
431 	CPUMASK_CLRB(boot_barrier, getcpuid());	/* needs atomicity */
432 	cpu_probe();
433 	per_cpu_init();
434 	per_cpu_trap_init();
435 #if 0
436 	ecc_init();
437 	bte_lateinit();
438 	init_mfhi_war();
439 #endif
440 	local_flush_tlb_all();
441 	__flush_cache_all();
442 
443 	local_irq_enable();
444 #if 0
445 	/*
446 	 * Get our bogomips.
447 	 */
448         calibrate_delay();
449         smp_store_cpu_info(cpuid);
450 	prom_smp_finish();
451 #endif
452 	printk("Slave cpu booted successfully\n");
453 	CPUMASK_SETB(cpu_online_map, cpu);
454 	atomic_inc(&cpus_booted);
455 
456 	while (!atomic_read(&smp_commenced));
457 	return cpu_idle();
458 }
459 
fork_by_hand(void)460 static int __init fork_by_hand(void)
461 {
462 	struct pt_regs regs;
463 	/*
464 	 * don't care about the epc and regs settings since
465 	 * we'll never reschedule the forked task.
466 	 */
467 	return do_fork(CLONE_VM|CLONE_PID, 0, &regs, 0);
468 }
469 
allowboot(void)470 __init void allowboot(void)
471 {
472 	int		num_cpus = 0;
473 	cpuid_t		cpu, mycpuid = getcpuid();
474 	cnodeid_t	cnode;
475 
476 	sn_mp_setup();
477 	/* Master has already done per_cpu_init() */
478 	install_cpuintr(smp_processor_id());
479 #if 0
480 	bte_lateinit();
481 	ecc_init();
482 #endif
483 
484 	replicate_kernel_text(numnodes);
485 	boot_barrier = boot_cpumask;
486 	/* Launch slaves. */
487 	for (cpu = 0; cpu < maxcpus; cpu++) {
488 		struct task_struct *idle;
489 
490 		if (cpu == mycpuid) {
491 			alloc_cpupda(cpu, num_cpus);
492 			num_cpus++;
493 			/* We're already started, clear our bit */
494 			CPUMASK_SETB(cpu_online_map, cpu);
495 			CPUMASK_CLRB(boot_barrier, cpu);
496 			continue;
497 		}
498 
499 		/* Skip holes in CPU space */
500 		if (!CPUMASK_TSTB(boot_cpumask, cpu))
501 			continue;
502 
503 		/*
504 		 * We can't use kernel_thread since we must avoid to
505 		 * reschedule the child.
506 		 */
507 		if (fork_by_hand() < 0)
508 			panic("failed fork for CPU %d", num_cpus);
509 
510 		/*
511 		 * We remove it from the pidhash and the runqueue
512 		 * once we got the process:
513 		 */
514 		idle = init_task.prev_task;
515 		if (!idle)
516 			panic("No idle process for CPU %d", num_cpus);
517 
518 		idle->processor = num_cpus;
519 		idle->cpus_runnable = 1 << cpu; /* we schedule the first task manually */
520 
521 		alloc_cpupda(cpu, num_cpus);
522 
523 		idle->thread.reg31 = (unsigned long) start_secondary;
524 
525 		del_from_runqueue(idle);
526 		unhash_process(idle);
527 		init_tasks[num_cpus] = idle;
528 
529 		/*
530 	 	 * Launch a slave into smp_bootstrap().
531 	 	 * It doesn't take an argument, and we
532 		 * set sp to the kernel stack of the newly
533 		 * created idle process, gp to the proc struct
534 		 * (so that current-> works).
535 	 	 */
536 		LAUNCH_SLAVE(cputonasid(num_cpus),cputoslice(num_cpus),
537 			(launch_proc_t)MAPPED_KERN_RW_TO_K0(smp_bootstrap),
538 			0, (void *)((unsigned long)idle +
539 			KERNEL_STACK_SIZE - 32), (void *)idle);
540 
541 		/*
542 		 * Now optimistically set the mapping arrays. We
543 		 * need to wait here, verify the cpu booted up, then
544 		 * fire up the next cpu.
545 		 */
546 		__cpu_number_map[cpu] = num_cpus;
547 		__cpu_logical_map[num_cpus] = cpu;
548 		CPUMASK_SETB(cpu_online_map, cpu);
549 		num_cpus++;
550 
551 		/*
552 		 * Wait this cpu to start up and initialize its hub,
553 		 * and discover the io devices it will control.
554 		 *
555 		 * XXX: We really want to fire up launch all the CPUs
556 		 * at once.  We have to preserve the order of the
557 		 * devices on the bridges first though.
558 		 */
559 		while (atomic_read(&numstarted) != num_cpus);
560 	}
561 
562 #ifdef LATER
563 	Wait logic goes here.
564 #endif
565 	for (cnode = 0; cnode < numnodes; cnode++) {
566 #if 0
567 		if (cnodetocpu(cnode) == -1) {
568 			printk("Initializing headless hub,cnode %d", cnode);
569 			per_hub_init(cnode);
570 		}
571 #endif
572 	}
573 #if 0
574 	cpu_io_setup();
575 	init_mfhi_war();
576 #endif
577 	smp_num_cpus = num_cpus;
578 }
579 
smp_boot_cpus(void)580 void __init smp_boot_cpus(void)
581 {
582 	extern void allowboot(void);
583 
584 	init_new_context(current, &init_mm);
585 	current->processor = 0;
586 	init_idle();
587 	/* smp_tune_scheduling();  XXX */
588 	allowboot();
589 }
590 
591 #else /* CONFIG_SMP */
start_secondary(void)592 void __init start_secondary(void)
593 {
594 	/* XXX Why do we need this empty definition at all?  */
595 }
596 #endif /* CONFIG_SMP */
597 
598 
599 #define	rou_rflag	rou_flags
600 
601 void
router_recurse(klrou_t * router_a,klrou_t * router_b,int depth)602 router_recurse(klrou_t *router_a, klrou_t *router_b, int depth)
603 {
604 	klrou_t *router;
605 	lboard_t *brd;
606 	int	port;
607 
608 	if (router_a->rou_rflag == 1)
609 		return;
610 
611 	if (depth >= router_distance)
612 		return;
613 
614 	router_a->rou_rflag = 1;
615 
616 	for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
617 		if (router_a->rou_port[port].port_nasid == INVALID_NASID)
618 			continue;
619 
620 		brd = (lboard_t *)NODE_OFFSET_TO_K0(
621 			router_a->rou_port[port].port_nasid,
622 			router_a->rou_port[port].port_offset);
623 
624 		if (brd->brd_type == KLTYPE_ROUTER) {
625 			router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
626 			if (router == router_b) {
627 				if (depth < router_distance)
628 					router_distance = depth;
629 			}
630 			else
631 				router_recurse(router, router_b, depth + 1);
632 		}
633 	}
634 
635 	router_a->rou_rflag = 0;
636 }
637 
638 int
node_distance(nasid_t nasid_a,nasid_t nasid_b)639 node_distance(nasid_t nasid_a, nasid_t nasid_b)
640 {
641 	nasid_t nasid;
642 	cnodeid_t cnode;
643 	lboard_t *brd, *dest_brd;
644 	int port;
645 	klrou_t *router, *router_a = NULL, *router_b = NULL;
646 
647 	/* Figure out which routers nodes in question are connected to */
648 	for (cnode = 0; cnode < numnodes; cnode++) {
649 		nasid = COMPACT_TO_NASID_NODEID(cnode);
650 
651 		if (nasid == -1) continue;
652 
653 		brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
654 					KLTYPE_ROUTER);
655 
656 		if (!brd)
657 			continue;
658 
659 		do {
660 			if (brd->brd_flags & DUPLICATE_BOARD)
661 				continue;
662 
663 			router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
664 			router->rou_rflag = 0;
665 
666 			for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
667 				if (router->rou_port[port].port_nasid == INVALID_NASID)
668 					continue;
669 
670 				dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
671 					router->rou_port[port].port_nasid,
672 					router->rou_port[port].port_offset);
673 
674 				if (dest_brd->brd_type == KLTYPE_IP27) {
675 					if (dest_brd->brd_nasid == nasid_a)
676 						router_a = router;
677 					if (dest_brd->brd_nasid == nasid_b)
678 						router_b = router;
679 				}
680 			}
681 
682 		} while ( (brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)) );
683 	}
684 
685 	if (router_a == NULL) {
686 		printk("node_distance: router_a NULL\n");
687 		return -1;
688 	}
689 	if (router_b == NULL) {
690 		printk("node_distance: router_b NULL\n");
691 		return -1;
692 	}
693 
694 	if (nasid_a == nasid_b)
695 		return 0;
696 
697 	if (router_a == router_b)
698 		return 1;
699 
700 	router_distance = 100;
701 	router_recurse(router_a, router_b, 2);
702 
703 	return router_distance;
704 }
705 
706 void
init_topology_matrix(void)707 init_topology_matrix(void)
708 {
709 	nasid_t nasid, nasid2;
710 	cnodeid_t row, col;
711 
712 	for (row = 0; row < MAX_COMPACT_NODES; row++)
713 		for (col = 0; col < MAX_COMPACT_NODES; col++)
714 			node_distances[row][col] = -1;
715 
716 	for (row = 0; row < numnodes; row++) {
717 		nasid = COMPACT_TO_NASID_NODEID(row);
718 		for (col = 0; col < numnodes; col++) {
719 			nasid2 = COMPACT_TO_NASID_NODEID(col);
720 			node_distances[row][col] = node_distance(nasid, nasid2);
721 		}
722 	}
723 }
724 
725 void
dump_topology(void)726 dump_topology(void)
727 {
728 	nasid_t nasid;
729 	cnodeid_t cnode;
730 	lboard_t *brd, *dest_brd;
731 	int port;
732 	int router_num = 0;
733 	klrou_t *router;
734 	cnodeid_t row, col;
735 
736 	printk("************** Topology ********************\n");
737 
738 	printk("    ");
739 	for (col = 0; col < numnodes; col++)
740 		printk("%02d ", col);
741 	printk("\n");
742 	for (row = 0; row < numnodes; row++) {
743 		printk("%02d  ", row);
744 		for (col = 0; col < numnodes; col++)
745 			printk("%2d ", node_distances[row][col]);
746 		printk("\n");
747 	}
748 
749 	for (cnode = 0; cnode < numnodes; cnode++) {
750 		nasid = COMPACT_TO_NASID_NODEID(cnode);
751 
752 		if (nasid == -1) continue;
753 
754 		brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
755 					KLTYPE_ROUTER);
756 
757 		if (!brd)
758 			continue;
759 
760 		do {
761 			if (brd->brd_flags & DUPLICATE_BOARD)
762 				continue;
763 			printk("Router %d:", router_num);
764 			router_num++;
765 
766 			router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
767 
768 			for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
769 				if (router->rou_port[port].port_nasid == INVALID_NASID)
770 					continue;
771 
772 				dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
773 					router->rou_port[port].port_nasid,
774 					router->rou_port[port].port_offset);
775 
776 				if (dest_brd->brd_type == KLTYPE_IP27)
777 					printk(" %d", dest_brd->brd_nasid);
778 				if (dest_brd->brd_type == KLTYPE_ROUTER)
779 					printk(" r");
780 			}
781 			printk("\n");
782 
783 		} while ( (brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)) );
784 	}
785 }
786 
787 #if 0
788 #define		brd_widgetnum	brd_slot
789 #define NODE_OFFSET_TO_KLINFO(n,off)    ((klinfo_t*) TO_NODE_CAC(n,off))
790 void
791 dump_klcfg(void)
792 {
793 	cnodeid_t       cnode;
794 	int i;
795 	nasid_t         nasid;
796 	lboard_t        *lbptr;
797 	gda_t           *gdap;
798 
799 	gdap = (gda_t *)GDA_ADDR(get_nasid());
800 	if (gdap->g_magic != GDA_MAGIC) {
801 		printk("dumpklcfg_cmd: Invalid GDA MAGIC\n");
802 		return;
803 	}
804 
805 	for (cnode = 0; cnode < MAX_COMPACT_NODES; cnode ++) {
806 		nasid = gdap->g_nasidtable[cnode];
807 
808 		if (nasid == INVALID_NASID)
809 			continue;
810 
811 		printk("\nDumpping klconfig Nasid %d:\n", nasid);
812 
813 		lbptr = KL_CONFIG_INFO(nasid);
814 
815 		while (lbptr) {
816 			printk("    %s, Nasid %d, Module %d, widget 0x%x, partition %d, NIC 0x%x lboard 0x%lx",
817 				"board name here", /* BOARD_NAME(lbptr->brd_type), */
818 				lbptr->brd_nasid, lbptr->brd_module,
819 				lbptr->brd_widgetnum,
820 				lbptr->brd_partition,
821 				(lbptr->brd_nic), lbptr);
822 			if (lbptr->brd_flags & DUPLICATE_BOARD)
823 				printk(" -D");
824 			printk("\n");
825 			for (i = 0; i < lbptr->brd_numcompts; i++) {
826 				klinfo_t *kli;
827 				kli = NODE_OFFSET_TO_KLINFO(NASID_GET(lbptr), lbptr->brd_compts[i]);
828 				printk("        type %2d, flags 0x%04x, diagval %3d, physid %4d, virtid %2d: %s\n",
829 					kli->struct_type,
830 					kli->flags,
831 					kli->diagval,
832 					kli->physid,
833 					kli->virtid,
834 					"comp. name here");
835 					/* COMPONENT_NAME(kli->struct_type)); */
836 			}
837 			lbptr = KLCF_NEXT(lbptr);
838 		}
839 	}
840 	printk("\n");
841 
842 	/* Useful to print router maps also */
843 
844 	for (cnode = 0; cnode < MAX_COMPACT_NODES; cnode ++) {
845 		klrou_t *kr;
846 		int i;
847 
848         	nasid = gdap->g_nasidtable[cnode];
849         	if (nasid == INVALID_NASID)
850             		continue;
851         	lbptr = KL_CONFIG_INFO(nasid);
852 
853         	while (lbptr) {
854 
855 			lbptr = find_lboard_class(lbptr, KLCLASS_ROUTER);
856 			if(!lbptr)
857 				break;
858 			if (!KL_CONFIG_DUPLICATE_BOARD(lbptr)) {
859 				printk("%llx -> \n", lbptr->brd_nic);
860 				kr = (klrou_t *)find_first_component(lbptr,
861 					KLSTRUCT_ROU);
862 				for (i = 1; i <= MAX_ROUTER_PORTS; i++) {
863 					printk("[%d, %llx]; ",
864 						kr->rou_port[i].port_nasid,
865 						kr->rou_port[i].port_offset);
866 				}
867 				printk("\n");
868 			}
869 			lbptr = KLCF_NEXT(lbptr);
870         	}
871         	printk("\n");
872     	}
873 
874 	dump_topology();
875 }
876 #endif
877 
878