1 /* mdesc.c: Sun4V machine description handling.
2  *
3  * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
4  */
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/memblock.h>
8 #include <linux/log2.h>
9 #include <linux/list.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/bootmem.h>
14 
15 #include <asm/cpudata.h>
16 #include <asm/hypervisor.h>
17 #include <asm/mdesc.h>
18 #include <asm/prom.h>
19 #include <asm/oplib.h>
20 #include <asm/smp.h>
21 
22 /* Unlike the OBP device tree, the machine description is a full-on
23  * DAG.  An arbitrary number of ARCs are possible from one
24  * node to other nodes and thus we can't use the OBP device_node
25  * data structure to represent these nodes inside of the kernel.
26  *
27  * Actually, it isn't even a DAG, because there are back pointers
28  * which create cycles in the graph.
29  *
30  * mdesc_hdr and mdesc_elem describe the layout of the data structure
31  * we get from the Hypervisor.
32  */
33 struct mdesc_hdr {
34 	u32	version; /* Transport version */
35 	u32	node_sz; /* node block size */
36 	u32	name_sz; /* name block size */
37 	u32	data_sz; /* data block size */
38 } __attribute__((aligned(16)));
39 
40 struct mdesc_elem {
41 	u8	tag;
42 #define MD_LIST_END	0x00
43 #define MD_NODE		0x4e
44 #define MD_NODE_END	0x45
45 #define MD_NOOP		0x20
46 #define MD_PROP_ARC	0x61
47 #define MD_PROP_VAL	0x76
48 #define MD_PROP_STR	0x73
49 #define MD_PROP_DATA	0x64
50 	u8	name_len;
51 	u16	resv;
52 	u32	name_offset;
53 	union {
54 		struct {
55 			u32	data_len;
56 			u32	data_offset;
57 		} data;
58 		u64	val;
59 	} d;
60 };
61 
62 struct mdesc_mem_ops {
63 	struct mdesc_handle *(*alloc)(unsigned int mdesc_size);
64 	void (*free)(struct mdesc_handle *handle);
65 };
66 
67 struct mdesc_handle {
68 	struct list_head	list;
69 	struct mdesc_mem_ops	*mops;
70 	void			*self_base;
71 	atomic_t		refcnt;
72 	unsigned int		handle_size;
73 	struct mdesc_hdr	mdesc;
74 };
75 
mdesc_handle_init(struct mdesc_handle * hp,unsigned int handle_size,void * base)76 static void mdesc_handle_init(struct mdesc_handle *hp,
77 			      unsigned int handle_size,
78 			      void *base)
79 {
80 	BUG_ON(((unsigned long)&hp->mdesc) & (16UL - 1));
81 
82 	memset(hp, 0, handle_size);
83 	INIT_LIST_HEAD(&hp->list);
84 	hp->self_base = base;
85 	atomic_set(&hp->refcnt, 1);
86 	hp->handle_size = handle_size;
87 }
88 
mdesc_memblock_alloc(unsigned int mdesc_size)89 static struct mdesc_handle * __init mdesc_memblock_alloc(unsigned int mdesc_size)
90 {
91 	unsigned int handle_size, alloc_size;
92 	struct mdesc_handle *hp;
93 	unsigned long paddr;
94 
95 	handle_size = (sizeof(struct mdesc_handle) -
96 		       sizeof(struct mdesc_hdr) +
97 		       mdesc_size);
98 	alloc_size = PAGE_ALIGN(handle_size);
99 
100 	paddr = memblock_alloc(alloc_size, PAGE_SIZE);
101 
102 	hp = NULL;
103 	if (paddr) {
104 		hp = __va(paddr);
105 		mdesc_handle_init(hp, handle_size, hp);
106 	}
107 	return hp;
108 }
109 
mdesc_memblock_free(struct mdesc_handle * hp)110 static void __init mdesc_memblock_free(struct mdesc_handle *hp)
111 {
112 	unsigned int alloc_size;
113 	unsigned long start;
114 
115 	BUG_ON(atomic_read(&hp->refcnt) != 0);
116 	BUG_ON(!list_empty(&hp->list));
117 
118 	alloc_size = PAGE_ALIGN(hp->handle_size);
119 	start = __pa(hp);
120 	free_bootmem_late(start, alloc_size);
121 }
122 
123 static struct mdesc_mem_ops memblock_mdesc_ops = {
124 	.alloc = mdesc_memblock_alloc,
125 	.free  = mdesc_memblock_free,
126 };
127 
mdesc_kmalloc(unsigned int mdesc_size)128 static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size)
129 {
130 	unsigned int handle_size;
131 	void *base;
132 
133 	handle_size = (sizeof(struct mdesc_handle) -
134 		       sizeof(struct mdesc_hdr) +
135 		       mdesc_size);
136 
137 	base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_NOFAIL);
138 	if (base) {
139 		struct mdesc_handle *hp;
140 		unsigned long addr;
141 
142 		addr = (unsigned long)base;
143 		addr = (addr + 15UL) & ~15UL;
144 		hp = (struct mdesc_handle *) addr;
145 
146 		mdesc_handle_init(hp, handle_size, base);
147 		return hp;
148 	}
149 
150 	return NULL;
151 }
152 
mdesc_kfree(struct mdesc_handle * hp)153 static void mdesc_kfree(struct mdesc_handle *hp)
154 {
155 	BUG_ON(atomic_read(&hp->refcnt) != 0);
156 	BUG_ON(!list_empty(&hp->list));
157 
158 	kfree(hp->self_base);
159 }
160 
161 static struct mdesc_mem_ops kmalloc_mdesc_memops = {
162 	.alloc = mdesc_kmalloc,
163 	.free  = mdesc_kfree,
164 };
165 
mdesc_alloc(unsigned int mdesc_size,struct mdesc_mem_ops * mops)166 static struct mdesc_handle *mdesc_alloc(unsigned int mdesc_size,
167 					struct mdesc_mem_ops *mops)
168 {
169 	struct mdesc_handle *hp = mops->alloc(mdesc_size);
170 
171 	if (hp)
172 		hp->mops = mops;
173 
174 	return hp;
175 }
176 
mdesc_free(struct mdesc_handle * hp)177 static void mdesc_free(struct mdesc_handle *hp)
178 {
179 	hp->mops->free(hp);
180 }
181 
182 static struct mdesc_handle *cur_mdesc;
183 static LIST_HEAD(mdesc_zombie_list);
184 static DEFINE_SPINLOCK(mdesc_lock);
185 
mdesc_grab(void)186 struct mdesc_handle *mdesc_grab(void)
187 {
188 	struct mdesc_handle *hp;
189 	unsigned long flags;
190 
191 	spin_lock_irqsave(&mdesc_lock, flags);
192 	hp = cur_mdesc;
193 	if (hp)
194 		atomic_inc(&hp->refcnt);
195 	spin_unlock_irqrestore(&mdesc_lock, flags);
196 
197 	return hp;
198 }
199 EXPORT_SYMBOL(mdesc_grab);
200 
mdesc_release(struct mdesc_handle * hp)201 void mdesc_release(struct mdesc_handle *hp)
202 {
203 	unsigned long flags;
204 
205 	spin_lock_irqsave(&mdesc_lock, flags);
206 	if (atomic_dec_and_test(&hp->refcnt)) {
207 		list_del_init(&hp->list);
208 		hp->mops->free(hp);
209 	}
210 	spin_unlock_irqrestore(&mdesc_lock, flags);
211 }
212 EXPORT_SYMBOL(mdesc_release);
213 
214 static DEFINE_MUTEX(mdesc_mutex);
215 static struct mdesc_notifier_client *client_list;
216 
mdesc_register_notifier(struct mdesc_notifier_client * client)217 void mdesc_register_notifier(struct mdesc_notifier_client *client)
218 {
219 	u64 node;
220 
221 	mutex_lock(&mdesc_mutex);
222 	client->next = client_list;
223 	client_list = client;
224 
225 	mdesc_for_each_node_by_name(cur_mdesc, node, client->node_name)
226 		client->add(cur_mdesc, node);
227 
228 	mutex_unlock(&mdesc_mutex);
229 }
230 
parent_cfg_handle(struct mdesc_handle * hp,u64 node)231 static const u64 *parent_cfg_handle(struct mdesc_handle *hp, u64 node)
232 {
233 	const u64 *id;
234 	u64 a;
235 
236 	id = NULL;
237 	mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
238 		u64 target;
239 
240 		target = mdesc_arc_target(hp, a);
241 		id = mdesc_get_property(hp, target,
242 					"cfg-handle", NULL);
243 		if (id)
244 			break;
245 	}
246 
247 	return id;
248 }
249 
250 /* Run 'func' on nodes which are in A but not in B.  */
invoke_on_missing(const char * name,struct mdesc_handle * a,struct mdesc_handle * b,void (* func)(struct mdesc_handle *,u64))251 static void invoke_on_missing(const char *name,
252 			      struct mdesc_handle *a,
253 			      struct mdesc_handle *b,
254 			      void (*func)(struct mdesc_handle *, u64))
255 {
256 	u64 node;
257 
258 	mdesc_for_each_node_by_name(a, node, name) {
259 		int found = 0, is_vdc_port = 0;
260 		const char *name_prop;
261 		const u64 *id;
262 		u64 fnode;
263 
264 		name_prop = mdesc_get_property(a, node, "name", NULL);
265 		if (name_prop && !strcmp(name_prop, "vdc-port")) {
266 			is_vdc_port = 1;
267 			id = parent_cfg_handle(a, node);
268 		} else
269 			id = mdesc_get_property(a, node, "id", NULL);
270 
271 		if (!id) {
272 			printk(KERN_ERR "MD: Cannot find ID for %s node.\n",
273 			       (name_prop ? name_prop : name));
274 			continue;
275 		}
276 
277 		mdesc_for_each_node_by_name(b, fnode, name) {
278 			const u64 *fid;
279 
280 			if (is_vdc_port) {
281 				name_prop = mdesc_get_property(b, fnode,
282 							       "name", NULL);
283 				if (!name_prop ||
284 				    strcmp(name_prop, "vdc-port"))
285 					continue;
286 				fid = parent_cfg_handle(b, fnode);
287 				if (!fid) {
288 					printk(KERN_ERR "MD: Cannot find ID "
289 					       "for vdc-port node.\n");
290 					continue;
291 				}
292 			} else
293 				fid = mdesc_get_property(b, fnode,
294 							 "id", NULL);
295 
296 			if (*id == *fid) {
297 				found = 1;
298 				break;
299 			}
300 		}
301 		if (!found)
302 			func(a, node);
303 	}
304 }
305 
notify_one(struct mdesc_notifier_client * p,struct mdesc_handle * old_hp,struct mdesc_handle * new_hp)306 static void notify_one(struct mdesc_notifier_client *p,
307 		       struct mdesc_handle *old_hp,
308 		       struct mdesc_handle *new_hp)
309 {
310 	invoke_on_missing(p->node_name, old_hp, new_hp, p->remove);
311 	invoke_on_missing(p->node_name, new_hp, old_hp, p->add);
312 }
313 
mdesc_notify_clients(struct mdesc_handle * old_hp,struct mdesc_handle * new_hp)314 static void mdesc_notify_clients(struct mdesc_handle *old_hp,
315 				 struct mdesc_handle *new_hp)
316 {
317 	struct mdesc_notifier_client *p = client_list;
318 
319 	while (p) {
320 		notify_one(p, old_hp, new_hp);
321 		p = p->next;
322 	}
323 }
324 
mdesc_update(void)325 void mdesc_update(void)
326 {
327 	unsigned long len, real_len, status;
328 	struct mdesc_handle *hp, *orig_hp;
329 	unsigned long flags;
330 
331 	mutex_lock(&mdesc_mutex);
332 
333 	(void) sun4v_mach_desc(0UL, 0UL, &len);
334 
335 	hp = mdesc_alloc(len, &kmalloc_mdesc_memops);
336 	if (!hp) {
337 		printk(KERN_ERR "MD: mdesc alloc fails\n");
338 		goto out;
339 	}
340 
341 	status = sun4v_mach_desc(__pa(&hp->mdesc), len, &real_len);
342 	if (status != HV_EOK || real_len > len) {
343 		printk(KERN_ERR "MD: mdesc reread fails with %lu\n",
344 		       status);
345 		atomic_dec(&hp->refcnt);
346 		mdesc_free(hp);
347 		goto out;
348 	}
349 
350 	spin_lock_irqsave(&mdesc_lock, flags);
351 	orig_hp = cur_mdesc;
352 	cur_mdesc = hp;
353 	spin_unlock_irqrestore(&mdesc_lock, flags);
354 
355 	mdesc_notify_clients(orig_hp, hp);
356 
357 	spin_lock_irqsave(&mdesc_lock, flags);
358 	if (atomic_dec_and_test(&orig_hp->refcnt))
359 		mdesc_free(orig_hp);
360 	else
361 		list_add(&orig_hp->list, &mdesc_zombie_list);
362 	spin_unlock_irqrestore(&mdesc_lock, flags);
363 
364 out:
365 	mutex_unlock(&mdesc_mutex);
366 }
367 
node_block(struct mdesc_hdr * mdesc)368 static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc)
369 {
370 	return (struct mdesc_elem *) (mdesc + 1);
371 }
372 
name_block(struct mdesc_hdr * mdesc)373 static void *name_block(struct mdesc_hdr *mdesc)
374 {
375 	return ((void *) node_block(mdesc)) + mdesc->node_sz;
376 }
377 
data_block(struct mdesc_hdr * mdesc)378 static void *data_block(struct mdesc_hdr *mdesc)
379 {
380 	return ((void *) name_block(mdesc)) + mdesc->name_sz;
381 }
382 
mdesc_node_by_name(struct mdesc_handle * hp,u64 from_node,const char * name)383 u64 mdesc_node_by_name(struct mdesc_handle *hp,
384 		       u64 from_node, const char *name)
385 {
386 	struct mdesc_elem *ep = node_block(&hp->mdesc);
387 	const char *names = name_block(&hp->mdesc);
388 	u64 last_node = hp->mdesc.node_sz / 16;
389 	u64 ret;
390 
391 	if (from_node == MDESC_NODE_NULL) {
392 		ret = from_node = 0;
393 	} else if (from_node >= last_node) {
394 		return MDESC_NODE_NULL;
395 	} else {
396 		ret = ep[from_node].d.val;
397 	}
398 
399 	while (ret < last_node) {
400 		if (ep[ret].tag != MD_NODE)
401 			return MDESC_NODE_NULL;
402 		if (!strcmp(names + ep[ret].name_offset, name))
403 			break;
404 		ret = ep[ret].d.val;
405 	}
406 	if (ret >= last_node)
407 		ret = MDESC_NODE_NULL;
408 	return ret;
409 }
410 EXPORT_SYMBOL(mdesc_node_by_name);
411 
mdesc_get_property(struct mdesc_handle * hp,u64 node,const char * name,int * lenp)412 const void *mdesc_get_property(struct mdesc_handle *hp, u64 node,
413 			       const char *name, int *lenp)
414 {
415 	const char *names = name_block(&hp->mdesc);
416 	u64 last_node = hp->mdesc.node_sz / 16;
417 	void *data = data_block(&hp->mdesc);
418 	struct mdesc_elem *ep;
419 
420 	if (node == MDESC_NODE_NULL || node >= last_node)
421 		return NULL;
422 
423 	ep = node_block(&hp->mdesc) + node;
424 	ep++;
425 	for (; ep->tag != MD_NODE_END; ep++) {
426 		void *val = NULL;
427 		int len = 0;
428 
429 		switch (ep->tag) {
430 		case MD_PROP_VAL:
431 			val = &ep->d.val;
432 			len = 8;
433 			break;
434 
435 		case MD_PROP_STR:
436 		case MD_PROP_DATA:
437 			val = data + ep->d.data.data_offset;
438 			len = ep->d.data.data_len;
439 			break;
440 
441 		default:
442 			break;
443 		}
444 		if (!val)
445 			continue;
446 
447 		if (!strcmp(names + ep->name_offset, name)) {
448 			if (lenp)
449 				*lenp = len;
450 			return val;
451 		}
452 	}
453 
454 	return NULL;
455 }
456 EXPORT_SYMBOL(mdesc_get_property);
457 
mdesc_next_arc(struct mdesc_handle * hp,u64 from,const char * arc_type)458 u64 mdesc_next_arc(struct mdesc_handle *hp, u64 from, const char *arc_type)
459 {
460 	struct mdesc_elem *ep, *base = node_block(&hp->mdesc);
461 	const char *names = name_block(&hp->mdesc);
462 	u64 last_node = hp->mdesc.node_sz / 16;
463 
464 	if (from == MDESC_NODE_NULL || from >= last_node)
465 		return MDESC_NODE_NULL;
466 
467 	ep = base + from;
468 
469 	ep++;
470 	for (; ep->tag != MD_NODE_END; ep++) {
471 		if (ep->tag != MD_PROP_ARC)
472 			continue;
473 
474 		if (strcmp(names + ep->name_offset, arc_type))
475 			continue;
476 
477 		return ep - base;
478 	}
479 
480 	return MDESC_NODE_NULL;
481 }
482 EXPORT_SYMBOL(mdesc_next_arc);
483 
mdesc_arc_target(struct mdesc_handle * hp,u64 arc)484 u64 mdesc_arc_target(struct mdesc_handle *hp, u64 arc)
485 {
486 	struct mdesc_elem *ep, *base = node_block(&hp->mdesc);
487 
488 	ep = base + arc;
489 
490 	return ep->d.val;
491 }
492 EXPORT_SYMBOL(mdesc_arc_target);
493 
mdesc_node_name(struct mdesc_handle * hp,u64 node)494 const char *mdesc_node_name(struct mdesc_handle *hp, u64 node)
495 {
496 	struct mdesc_elem *ep, *base = node_block(&hp->mdesc);
497 	const char *names = name_block(&hp->mdesc);
498 	u64 last_node = hp->mdesc.node_sz / 16;
499 
500 	if (node == MDESC_NODE_NULL || node >= last_node)
501 		return NULL;
502 
503 	ep = base + node;
504 	if (ep->tag != MD_NODE)
505 		return NULL;
506 
507 	return names + ep->name_offset;
508 }
509 EXPORT_SYMBOL(mdesc_node_name);
510 
report_platform_properties(void)511 static void __init report_platform_properties(void)
512 {
513 	struct mdesc_handle *hp = mdesc_grab();
514 	u64 pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform");
515 	const char *s;
516 	const u64 *v;
517 
518 	if (pn == MDESC_NODE_NULL) {
519 		prom_printf("No platform node in machine-description.\n");
520 		prom_halt();
521 	}
522 
523 	s = mdesc_get_property(hp, pn, "banner-name", NULL);
524 	printk("PLATFORM: banner-name [%s]\n", s);
525 	s = mdesc_get_property(hp, pn, "name", NULL);
526 	printk("PLATFORM: name [%s]\n", s);
527 
528 	v = mdesc_get_property(hp, pn, "hostid", NULL);
529 	if (v)
530 		printk("PLATFORM: hostid [%08llx]\n", *v);
531 	v = mdesc_get_property(hp, pn, "serial#", NULL);
532 	if (v)
533 		printk("PLATFORM: serial# [%08llx]\n", *v);
534 	v = mdesc_get_property(hp, pn, "stick-frequency", NULL);
535 	printk("PLATFORM: stick-frequency [%08llx]\n", *v);
536 	v = mdesc_get_property(hp, pn, "mac-address", NULL);
537 	if (v)
538 		printk("PLATFORM: mac-address [%llx]\n", *v);
539 	v = mdesc_get_property(hp, pn, "watchdog-resolution", NULL);
540 	if (v)
541 		printk("PLATFORM: watchdog-resolution [%llu ms]\n", *v);
542 	v = mdesc_get_property(hp, pn, "watchdog-max-timeout", NULL);
543 	if (v)
544 		printk("PLATFORM: watchdog-max-timeout [%llu ms]\n", *v);
545 	v = mdesc_get_property(hp, pn, "max-cpus", NULL);
546 	if (v)
547 		printk("PLATFORM: max-cpus [%llu]\n", *v);
548 
549 #ifdef CONFIG_SMP
550 	{
551 		int max_cpu, i;
552 
553 		if (v) {
554 			max_cpu = *v;
555 			if (max_cpu > NR_CPUS)
556 				max_cpu = NR_CPUS;
557 		} else {
558 			max_cpu = NR_CPUS;
559 		}
560 		for (i = 0; i < max_cpu; i++)
561 			set_cpu_possible(i, true);
562 	}
563 #endif
564 
565 	mdesc_release(hp);
566 }
567 
fill_in_one_cache(cpuinfo_sparc * c,struct mdesc_handle * hp,u64 mp)568 static void __cpuinit fill_in_one_cache(cpuinfo_sparc *c,
569 					struct mdesc_handle *hp,
570 					u64 mp)
571 {
572 	const u64 *level = mdesc_get_property(hp, mp, "level", NULL);
573 	const u64 *size = mdesc_get_property(hp, mp, "size", NULL);
574 	const u64 *line_size = mdesc_get_property(hp, mp, "line-size", NULL);
575 	const char *type;
576 	int type_len;
577 
578 	type = mdesc_get_property(hp, mp, "type", &type_len);
579 
580 	switch (*level) {
581 	case 1:
582 		if (of_find_in_proplist(type, "instn", type_len)) {
583 			c->icache_size = *size;
584 			c->icache_line_size = *line_size;
585 		} else if (of_find_in_proplist(type, "data", type_len)) {
586 			c->dcache_size = *size;
587 			c->dcache_line_size = *line_size;
588 		}
589 		break;
590 
591 	case 2:
592 		c->ecache_size = *size;
593 		c->ecache_line_size = *line_size;
594 		break;
595 
596 	default:
597 		break;
598 	}
599 
600 	if (*level == 1) {
601 		u64 a;
602 
603 		mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
604 			u64 target = mdesc_arc_target(hp, a);
605 			const char *name = mdesc_node_name(hp, target);
606 
607 			if (!strcmp(name, "cache"))
608 				fill_in_one_cache(c, hp, target);
609 		}
610 	}
611 }
612 
mark_core_ids(struct mdesc_handle * hp,u64 mp,int core_id)613 static void __cpuinit mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id)
614 {
615 	u64 a;
616 
617 	mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) {
618 		u64 t = mdesc_arc_target(hp, a);
619 		const char *name;
620 		const u64 *id;
621 
622 		name = mdesc_node_name(hp, t);
623 		if (!strcmp(name, "cpu")) {
624 			id = mdesc_get_property(hp, t, "id", NULL);
625 			if (*id < NR_CPUS)
626 				cpu_data(*id).core_id = core_id;
627 		} else {
628 			u64 j;
629 
630 			mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_BACK) {
631 				u64 n = mdesc_arc_target(hp, j);
632 				const char *n_name;
633 
634 				n_name = mdesc_node_name(hp, n);
635 				if (strcmp(n_name, "cpu"))
636 					continue;
637 
638 				id = mdesc_get_property(hp, n, "id", NULL);
639 				if (*id < NR_CPUS)
640 					cpu_data(*id).core_id = core_id;
641 			}
642 		}
643 	}
644 }
645 
set_core_ids(struct mdesc_handle * hp)646 static void __cpuinit set_core_ids(struct mdesc_handle *hp)
647 {
648 	int idx;
649 	u64 mp;
650 
651 	idx = 1;
652 	mdesc_for_each_node_by_name(hp, mp, "cache") {
653 		const u64 *level;
654 		const char *type;
655 		int len;
656 
657 		level = mdesc_get_property(hp, mp, "level", NULL);
658 		if (*level != 1)
659 			continue;
660 
661 		type = mdesc_get_property(hp, mp, "type", &len);
662 		if (!of_find_in_proplist(type, "instn", len))
663 			continue;
664 
665 		mark_core_ids(hp, mp, idx);
666 
667 		idx++;
668 	}
669 }
670 
mark_proc_ids(struct mdesc_handle * hp,u64 mp,int proc_id)671 static void __cpuinit mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id)
672 {
673 	u64 a;
674 
675 	mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) {
676 		u64 t = mdesc_arc_target(hp, a);
677 		const char *name;
678 		const u64 *id;
679 
680 		name = mdesc_node_name(hp, t);
681 		if (strcmp(name, "cpu"))
682 			continue;
683 
684 		id = mdesc_get_property(hp, t, "id", NULL);
685 		if (*id < NR_CPUS)
686 			cpu_data(*id).proc_id = proc_id;
687 	}
688 }
689 
__set_proc_ids(struct mdesc_handle * hp,const char * exec_unit_name)690 static void __cpuinit __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name)
691 {
692 	int idx;
693 	u64 mp;
694 
695 	idx = 0;
696 	mdesc_for_each_node_by_name(hp, mp, exec_unit_name) {
697 		const char *type;
698 		int len;
699 
700 		type = mdesc_get_property(hp, mp, "type", &len);
701 		if (!of_find_in_proplist(type, "int", len) &&
702 		    !of_find_in_proplist(type, "integer", len))
703 			continue;
704 
705 		mark_proc_ids(hp, mp, idx);
706 
707 		idx++;
708 	}
709 }
710 
set_proc_ids(struct mdesc_handle * hp)711 static void __cpuinit set_proc_ids(struct mdesc_handle *hp)
712 {
713 	__set_proc_ids(hp, "exec_unit");
714 	__set_proc_ids(hp, "exec-unit");
715 }
716 
get_one_mondo_bits(const u64 * p,unsigned int * mask,unsigned char def)717 static void __cpuinit get_one_mondo_bits(const u64 *p, unsigned int *mask,
718 					 unsigned char def)
719 {
720 	u64 val;
721 
722 	if (!p)
723 		goto use_default;
724 	val = *p;
725 
726 	if (!val || val >= 64)
727 		goto use_default;
728 
729 	*mask = ((1U << val) * 64U) - 1U;
730 	return;
731 
732 use_default:
733 	*mask = ((1U << def) * 64U) - 1U;
734 }
735 
get_mondo_data(struct mdesc_handle * hp,u64 mp,struct trap_per_cpu * tb)736 static void __cpuinit get_mondo_data(struct mdesc_handle *hp, u64 mp,
737 				     struct trap_per_cpu *tb)
738 {
739 	const u64 *val;
740 
741 	val = mdesc_get_property(hp, mp, "q-cpu-mondo-#bits", NULL);
742 	get_one_mondo_bits(val, &tb->cpu_mondo_qmask, 7);
743 
744 	val = mdesc_get_property(hp, mp, "q-dev-mondo-#bits", NULL);
745 	get_one_mondo_bits(val, &tb->dev_mondo_qmask, 7);
746 
747 	val = mdesc_get_property(hp, mp, "q-resumable-#bits", NULL);
748 	get_one_mondo_bits(val, &tb->resum_qmask, 6);
749 
750 	val = mdesc_get_property(hp, mp, "q-nonresumable-#bits", NULL);
751 	get_one_mondo_bits(val, &tb->nonresum_qmask, 2);
752 }
753 
mdesc_iterate_over_cpus(void * (* func)(struct mdesc_handle *,u64,int,void *),void * arg,cpumask_t * mask)754 static void * __cpuinit mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handle *, u64, int, void *), void *arg, cpumask_t *mask)
755 {
756 	struct mdesc_handle *hp = mdesc_grab();
757 	void *ret = NULL;
758 	u64 mp;
759 
760 	mdesc_for_each_node_by_name(hp, mp, "cpu") {
761 		const u64 *id = mdesc_get_property(hp, mp, "id", NULL);
762 		int cpuid = *id;
763 
764 #ifdef CONFIG_SMP
765 		if (cpuid >= NR_CPUS) {
766 			printk(KERN_WARNING "Ignoring CPU %d which is "
767 			       ">= NR_CPUS (%d)\n",
768 			       cpuid, NR_CPUS);
769 			continue;
770 		}
771 		if (!cpu_isset(cpuid, *mask))
772 			continue;
773 #endif
774 
775 		ret = func(hp, mp, cpuid, arg);
776 		if (ret)
777 			goto out;
778 	}
779 out:
780 	mdesc_release(hp);
781 	return ret;
782 }
783 
record_one_cpu(struct mdesc_handle * hp,u64 mp,int cpuid,void * arg)784 static void * __cpuinit record_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg)
785 {
786 	ncpus_probed++;
787 #ifdef CONFIG_SMP
788 	set_cpu_present(cpuid, true);
789 #endif
790 	return NULL;
791 }
792 
mdesc_populate_present_mask(cpumask_t * mask)793 void __cpuinit mdesc_populate_present_mask(cpumask_t *mask)
794 {
795 	if (tlb_type != hypervisor)
796 		return;
797 
798 	ncpus_probed = 0;
799 	mdesc_iterate_over_cpus(record_one_cpu, NULL, mask);
800 }
801 
fill_in_one_cpu(struct mdesc_handle * hp,u64 mp,int cpuid,void * arg)802 static void * __cpuinit fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg)
803 {
804 	const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL);
805 	struct trap_per_cpu *tb;
806 	cpuinfo_sparc *c;
807 	u64 a;
808 
809 #ifndef CONFIG_SMP
810 	/* On uniprocessor we only want the values for the
811 	 * real physical cpu the kernel booted onto, however
812 	 * cpu_data() only has one entry at index 0.
813 	 */
814 	if (cpuid != real_hard_smp_processor_id())
815 		return NULL;
816 	cpuid = 0;
817 #endif
818 
819 	c = &cpu_data(cpuid);
820 	c->clock_tick = *cfreq;
821 
822 	tb = &trap_block[cpuid];
823 	get_mondo_data(hp, mp, tb);
824 
825 	mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
826 		u64 j, t = mdesc_arc_target(hp, a);
827 		const char *t_name;
828 
829 		t_name = mdesc_node_name(hp, t);
830 		if (!strcmp(t_name, "cache")) {
831 			fill_in_one_cache(c, hp, t);
832 			continue;
833 		}
834 
835 		mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_FWD) {
836 			u64 n = mdesc_arc_target(hp, j);
837 			const char *n_name;
838 
839 			n_name = mdesc_node_name(hp, n);
840 			if (!strcmp(n_name, "cache"))
841 				fill_in_one_cache(c, hp, n);
842 		}
843 	}
844 
845 	c->core_id = 0;
846 	c->proc_id = -1;
847 
848 	return NULL;
849 }
850 
mdesc_fill_in_cpu_data(cpumask_t * mask)851 void __cpuinit mdesc_fill_in_cpu_data(cpumask_t *mask)
852 {
853 	struct mdesc_handle *hp;
854 
855 	mdesc_iterate_over_cpus(fill_in_one_cpu, NULL, mask);
856 
857 #ifdef CONFIG_SMP
858 	sparc64_multi_core = 1;
859 #endif
860 
861 	hp = mdesc_grab();
862 
863 	set_core_ids(hp);
864 	set_proc_ids(hp);
865 
866 	mdesc_release(hp);
867 
868 	smp_fill_in_sib_core_maps();
869 }
870 
mdesc_read(struct file * file,char __user * buf,size_t len,loff_t * offp)871 static ssize_t mdesc_read(struct file *file, char __user *buf,
872 			  size_t len, loff_t *offp)
873 {
874 	struct mdesc_handle *hp = mdesc_grab();
875 	int err;
876 
877 	if (!hp)
878 		return -ENODEV;
879 
880 	err = hp->handle_size;
881 	if (len < hp->handle_size)
882 		err = -EMSGSIZE;
883 	else if (copy_to_user(buf, &hp->mdesc, hp->handle_size))
884 		err = -EFAULT;
885 	mdesc_release(hp);
886 
887 	return err;
888 }
889 
890 static const struct file_operations mdesc_fops = {
891 	.read	= mdesc_read,
892 	.owner	= THIS_MODULE,
893 	.llseek = noop_llseek,
894 };
895 
896 static struct miscdevice mdesc_misc = {
897 	.minor	= MISC_DYNAMIC_MINOR,
898 	.name	= "mdesc",
899 	.fops	= &mdesc_fops,
900 };
901 
mdesc_misc_init(void)902 static int __init mdesc_misc_init(void)
903 {
904 	return misc_register(&mdesc_misc);
905 }
906 
907 __initcall(mdesc_misc_init);
908 
sun4v_mdesc_init(void)909 void __init sun4v_mdesc_init(void)
910 {
911 	struct mdesc_handle *hp;
912 	unsigned long len, real_len, status;
913 
914 	(void) sun4v_mach_desc(0UL, 0UL, &len);
915 
916 	printk("MDESC: Size is %lu bytes.\n", len);
917 
918 	hp = mdesc_alloc(len, &memblock_mdesc_ops);
919 	if (hp == NULL) {
920 		prom_printf("MDESC: alloc of %lu bytes failed.\n", len);
921 		prom_halt();
922 	}
923 
924 	status = sun4v_mach_desc(__pa(&hp->mdesc), len, &real_len);
925 	if (status != HV_EOK || real_len > len) {
926 		prom_printf("sun4v_mach_desc fails, err(%lu), "
927 			    "len(%lu), real_len(%lu)\n",
928 			    status, len, real_len);
929 		mdesc_free(hp);
930 		prom_halt();
931 	}
932 
933 	cur_mdesc = hp;
934 
935 	report_platform_properties();
936 }
937