1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * spu management operations for of based platforms
4  *
5  * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6  * Copyright 2006 Sony Corp.
7  * (C) Copyright 2007 TOSHIBA CORPORATION
8  */
9 
10 #include <linux/interrupt.h>
11 #include <linux/list.h>
12 #include <linux/export.h>
13 #include <linux/ptrace.h>
14 #include <linux/wait.h>
15 #include <linux/mm.h>
16 #include <linux/io.h>
17 #include <linux/mutex.h>
18 #include <linux/device.h>
19 #include <linux/of_address.h>
20 #include <linux/of_irq.h>
21 
22 #include <asm/spu.h>
23 #include <asm/spu_priv1.h>
24 #include <asm/firmware.h>
25 
26 #include "spufs/spufs.h"
27 #include "interrupt.h"
28 
spu_devnode(struct spu * spu)29 struct device_node *spu_devnode(struct spu *spu)
30 {
31 	return spu->devnode;
32 }
33 
34 EXPORT_SYMBOL_GPL(spu_devnode);
35 
find_spu_unit_number(struct device_node * spe)36 static u64 __init find_spu_unit_number(struct device_node *spe)
37 {
38 	const unsigned int *prop;
39 	int proplen;
40 
41 	/* new device trees should provide the physical-id attribute */
42 	prop = of_get_property(spe, "physical-id", &proplen);
43 	if (proplen == 4)
44 		return (u64)*prop;
45 
46 	/* celleb device tree provides the unit-id */
47 	prop = of_get_property(spe, "unit-id", &proplen);
48 	if (proplen == 4)
49 		return (u64)*prop;
50 
51 	/* legacy device trees provide the id in the reg attribute */
52 	prop = of_get_property(spe, "reg", &proplen);
53 	if (proplen == 4)
54 		return (u64)*prop;
55 
56 	return 0;
57 }
58 
spu_unmap(struct spu * spu)59 static void spu_unmap(struct spu *spu)
60 {
61 	if (!firmware_has_feature(FW_FEATURE_LPAR))
62 		iounmap(spu->priv1);
63 	iounmap(spu->priv2);
64 	iounmap(spu->problem);
65 	iounmap((__force u8 __iomem *)spu->local_store);
66 }
67 
spu_map_interrupts_old(struct spu * spu,struct device_node * np)68 static int __init spu_map_interrupts_old(struct spu *spu,
69 	struct device_node *np)
70 {
71 	unsigned int isrc;
72 	const u32 *tmp;
73 	int nid;
74 
75 	/* Get the interrupt source unit from the device-tree */
76 	tmp = of_get_property(np, "isrc", NULL);
77 	if (!tmp)
78 		return -ENODEV;
79 	isrc = tmp[0];
80 
81 	tmp = of_get_property(np->parent->parent, "node-id", NULL);
82 	if (!tmp) {
83 		printk(KERN_WARNING "%s: can't find node-id\n", __func__);
84 		nid = spu->node;
85 	} else
86 		nid = tmp[0];
87 
88 	/* Add the node number */
89 	isrc |= nid << IIC_IRQ_NODE_SHIFT;
90 
91 	/* Now map interrupts of all 3 classes */
92 	spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc);
93 	spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc);
94 	spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc);
95 
96 	/* Right now, we only fail if class 2 failed */
97 	if (!spu->irqs[2])
98 		return -EINVAL;
99 
100 	return 0;
101 }
102 
spu_map_prop_old(struct spu * spu,struct device_node * n,const char * name)103 static void __iomem * __init spu_map_prop_old(struct spu *spu,
104 					      struct device_node *n,
105 					      const char *name)
106 {
107 	const struct address_prop {
108 		unsigned long address;
109 		unsigned int len;
110 	} __attribute__((packed)) *prop;
111 	int proplen;
112 
113 	prop = of_get_property(n, name, &proplen);
114 	if (prop == NULL || proplen != sizeof (struct address_prop))
115 		return NULL;
116 
117 	return ioremap(prop->address, prop->len);
118 }
119 
spu_map_device_old(struct spu * spu)120 static int __init spu_map_device_old(struct spu *spu)
121 {
122 	struct device_node *node = spu->devnode;
123 	const char *prop;
124 	int ret;
125 
126 	ret = -ENODEV;
127 	spu->name = of_get_property(node, "name", NULL);
128 	if (!spu->name)
129 		goto out;
130 
131 	prop = of_get_property(node, "local-store", NULL);
132 	if (!prop)
133 		goto out;
134 	spu->local_store_phys = *(unsigned long *)prop;
135 
136 	/* we use local store as ram, not io memory */
137 	spu->local_store = (void __force *)
138 		spu_map_prop_old(spu, node, "local-store");
139 	if (!spu->local_store)
140 		goto out;
141 
142 	prop = of_get_property(node, "problem", NULL);
143 	if (!prop)
144 		goto out_unmap;
145 	spu->problem_phys = *(unsigned long *)prop;
146 
147 	spu->problem = spu_map_prop_old(spu, node, "problem");
148 	if (!spu->problem)
149 		goto out_unmap;
150 
151 	spu->priv2 = spu_map_prop_old(spu, node, "priv2");
152 	if (!spu->priv2)
153 		goto out_unmap;
154 
155 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
156 		spu->priv1 = spu_map_prop_old(spu, node, "priv1");
157 		if (!spu->priv1)
158 			goto out_unmap;
159 	}
160 
161 	ret = 0;
162 	goto out;
163 
164 out_unmap:
165 	spu_unmap(spu);
166 out:
167 	return ret;
168 }
169 
spu_map_interrupts(struct spu * spu,struct device_node * np)170 static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
171 {
172 	int i;
173 
174 	for (i=0; i < 3; i++) {
175 		spu->irqs[i] = irq_of_parse_and_map(np, i);
176 		if (!spu->irqs[i])
177 			goto err;
178 	}
179 	return 0;
180 
181 err:
182 	pr_debug("failed to map irq %x for spu %s\n", i, spu->name);
183 	for (; i >= 0; i--) {
184 		if (spu->irqs[i])
185 			irq_dispose_mapping(spu->irqs[i]);
186 	}
187 	return -EINVAL;
188 }
189 
spu_map_resource(struct spu * spu,int nr,void __iomem ** virt,unsigned long * phys)190 static int __init spu_map_resource(struct spu *spu, int nr,
191 			    void __iomem** virt, unsigned long *phys)
192 {
193 	struct device_node *np = spu->devnode;
194 	struct resource resource = { };
195 	unsigned long len;
196 	int ret;
197 
198 	ret = of_address_to_resource(np, nr, &resource);
199 	if (ret)
200 		return ret;
201 	if (phys)
202 		*phys = resource.start;
203 	len = resource_size(&resource);
204 	*virt = ioremap(resource.start, len);
205 	if (!*virt)
206 		return -EINVAL;
207 	return 0;
208 }
209 
spu_map_device(struct spu * spu)210 static int __init spu_map_device(struct spu *spu)
211 {
212 	struct device_node *np = spu->devnode;
213 	int ret = -ENODEV;
214 
215 	spu->name = of_get_property(np, "name", NULL);
216 	if (!spu->name)
217 		goto out;
218 
219 	ret = spu_map_resource(spu, 0, (void __iomem**)&spu->local_store,
220 			       &spu->local_store_phys);
221 	if (ret) {
222 		pr_debug("spu_new: failed to map %pOF resource 0\n",
223 			 np);
224 		goto out;
225 	}
226 	ret = spu_map_resource(spu, 1, (void __iomem**)&spu->problem,
227 			       &spu->problem_phys);
228 	if (ret) {
229 		pr_debug("spu_new: failed to map %pOF resource 1\n",
230 			 np);
231 		goto out_unmap;
232 	}
233 	ret = spu_map_resource(spu, 2, (void __iomem**)&spu->priv2, NULL);
234 	if (ret) {
235 		pr_debug("spu_new: failed to map %pOF resource 2\n",
236 			 np);
237 		goto out_unmap;
238 	}
239 	if (!firmware_has_feature(FW_FEATURE_LPAR))
240 		ret = spu_map_resource(spu, 3,
241 			       (void __iomem**)&spu->priv1, NULL);
242 	if (ret) {
243 		pr_debug("spu_new: failed to map %pOF resource 3\n",
244 			 np);
245 		goto out_unmap;
246 	}
247 	pr_debug("spu_new: %pOF maps:\n", np);
248 	pr_debug("  local store   : 0x%016lx -> 0x%p\n",
249 		 spu->local_store_phys, spu->local_store);
250 	pr_debug("  problem state : 0x%016lx -> 0x%p\n",
251 		 spu->problem_phys, spu->problem);
252 	pr_debug("  priv2         :                       0x%p\n", spu->priv2);
253 	pr_debug("  priv1         :                       0x%p\n", spu->priv1);
254 
255 	return 0;
256 
257 out_unmap:
258 	spu_unmap(spu);
259 out:
260 	pr_debug("failed to map spe %s: %d\n", spu->name, ret);
261 	return ret;
262 }
263 
of_enumerate_spus(int (* fn)(void * data))264 static int __init of_enumerate_spus(int (*fn)(void *data))
265 {
266 	int ret;
267 	struct device_node *node;
268 	unsigned int n = 0;
269 
270 	ret = -ENODEV;
271 	for_each_node_by_type(node, "spe") {
272 		ret = fn(node);
273 		if (ret) {
274 			printk(KERN_WARNING "%s: Error initializing %pOFn\n",
275 				__func__, node);
276 			of_node_put(node);
277 			break;
278 		}
279 		n++;
280 	}
281 	return ret ? ret : n;
282 }
283 
of_create_spu(struct spu * spu,void * data)284 static int __init of_create_spu(struct spu *spu, void *data)
285 {
286 	int ret;
287 	struct device_node *spe = (struct device_node *)data;
288 	static int legacy_map = 0, legacy_irq = 0;
289 
290 	spu->devnode = of_node_get(spe);
291 	spu->spe_id = find_spu_unit_number(spe);
292 
293 	spu->node = of_node_to_nid(spe);
294 	if (spu->node >= MAX_NUMNODES) {
295 		printk(KERN_WARNING "SPE %pOF on node %d ignored,"
296 		       " node number too big\n", spe, spu->node);
297 		printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n");
298 		ret = -ENODEV;
299 		goto out;
300 	}
301 
302 	ret = spu_map_device(spu);
303 	if (ret) {
304 		if (!legacy_map) {
305 			legacy_map = 1;
306 			printk(KERN_WARNING "%s: Legacy device tree found, "
307 				"trying to map old style\n", __func__);
308 		}
309 		ret = spu_map_device_old(spu);
310 		if (ret) {
311 			printk(KERN_ERR "Unable to map %s\n",
312 				spu->name);
313 			goto out;
314 		}
315 	}
316 
317 	ret = spu_map_interrupts(spu, spe);
318 	if (ret) {
319 		if (!legacy_irq) {
320 			legacy_irq = 1;
321 			printk(KERN_WARNING "%s: Legacy device tree found, "
322 				"trying old style irq\n", __func__);
323 		}
324 		ret = spu_map_interrupts_old(spu, spe);
325 		if (ret) {
326 			printk(KERN_ERR "%s: could not map interrupts\n",
327 				spu->name);
328 			goto out_unmap;
329 		}
330 	}
331 
332 	pr_debug("Using SPE %s %p %p %p %p %d\n", spu->name,
333 		spu->local_store, spu->problem, spu->priv1,
334 		spu->priv2, spu->number);
335 	goto out;
336 
337 out_unmap:
338 	spu_unmap(spu);
339 out:
340 	return ret;
341 }
342 
of_destroy_spu(struct spu * spu)343 static int of_destroy_spu(struct spu *spu)
344 {
345 	spu_unmap(spu);
346 	of_node_put(spu->devnode);
347 	return 0;
348 }
349 
enable_spu_by_master_run(struct spu_context * ctx)350 static void enable_spu_by_master_run(struct spu_context *ctx)
351 {
352 	ctx->ops->master_start(ctx);
353 }
354 
disable_spu_by_master_run(struct spu_context * ctx)355 static void disable_spu_by_master_run(struct spu_context *ctx)
356 {
357 	ctx->ops->master_stop(ctx);
358 }
359 
360 /* Hardcoded affinity idxs for qs20 */
361 #define QS20_SPES_PER_BE 8
362 static int qs20_reg_idxs[QS20_SPES_PER_BE] =   { 0, 2, 4, 6, 7, 5, 3, 1 };
363 static int qs20_reg_memory[QS20_SPES_PER_BE] = { 1, 1, 0, 0, 0, 0, 0, 0 };
364 
spu_lookup_reg(int node,u32 reg)365 static struct spu *__init spu_lookup_reg(int node, u32 reg)
366 {
367 	struct spu *spu;
368 	const u32 *spu_reg;
369 
370 	list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
371 		spu_reg = of_get_property(spu_devnode(spu), "reg", NULL);
372 		if (*spu_reg == reg)
373 			return spu;
374 	}
375 	return NULL;
376 }
377 
init_affinity_qs20_harcoded(void)378 static void __init init_affinity_qs20_harcoded(void)
379 {
380 	int node, i;
381 	struct spu *last_spu, *spu;
382 	u32 reg;
383 
384 	for (node = 0; node < MAX_NUMNODES; node++) {
385 		last_spu = NULL;
386 		for (i = 0; i < QS20_SPES_PER_BE; i++) {
387 			reg = qs20_reg_idxs[i];
388 			spu = spu_lookup_reg(node, reg);
389 			if (!spu)
390 				continue;
391 			spu->has_mem_affinity = qs20_reg_memory[reg];
392 			if (last_spu)
393 				list_add_tail(&spu->aff_list,
394 						&last_spu->aff_list);
395 			last_spu = spu;
396 		}
397 	}
398 }
399 
of_has_vicinity(void)400 static int __init of_has_vicinity(void)
401 {
402 	struct device_node *dn;
403 
404 	for_each_node_by_type(dn, "spe") {
405 		if (of_find_property(dn, "vicinity", NULL))  {
406 			of_node_put(dn);
407 			return 1;
408 		}
409 	}
410 	return 0;
411 }
412 
devnode_spu(int cbe,struct device_node * dn)413 static struct spu *__init devnode_spu(int cbe, struct device_node *dn)
414 {
415 	struct spu *spu;
416 
417 	list_for_each_entry(spu, &cbe_spu_info[cbe].spus, cbe_list)
418 		if (spu_devnode(spu) == dn)
419 			return spu;
420 	return NULL;
421 }
422 
423 static struct spu * __init
neighbour_spu(int cbe,struct device_node * target,struct device_node * avoid)424 neighbour_spu(int cbe, struct device_node *target, struct device_node *avoid)
425 {
426 	struct spu *spu;
427 	struct device_node *spu_dn;
428 	const phandle *vic_handles;
429 	int lenp, i;
430 
431 	list_for_each_entry(spu, &cbe_spu_info[cbe].spus, cbe_list) {
432 		spu_dn = spu_devnode(spu);
433 		if (spu_dn == avoid)
434 			continue;
435 		vic_handles = of_get_property(spu_dn, "vicinity", &lenp);
436 		for (i=0; i < (lenp / sizeof(phandle)); i++) {
437 			if (vic_handles[i] == target->phandle)
438 				return spu;
439 		}
440 	}
441 	return NULL;
442 }
443 
init_affinity_node(int cbe)444 static void __init init_affinity_node(int cbe)
445 {
446 	struct spu *spu, *last_spu;
447 	struct device_node *vic_dn, *last_spu_dn;
448 	phandle avoid_ph;
449 	const phandle *vic_handles;
450 	int lenp, i, added;
451 
452 	last_spu = list_first_entry(&cbe_spu_info[cbe].spus, struct spu,
453 								cbe_list);
454 	avoid_ph = 0;
455 	for (added = 1; added < cbe_spu_info[cbe].n_spus; added++) {
456 		last_spu_dn = spu_devnode(last_spu);
457 		vic_handles = of_get_property(last_spu_dn, "vicinity", &lenp);
458 
459 		/*
460 		 * Walk through each phandle in vicinity property of the spu
461 		 * (typically two vicinity phandles per spe node)
462 		 */
463 		for (i = 0; i < (lenp / sizeof(phandle)); i++) {
464 			if (vic_handles[i] == avoid_ph)
465 				continue;
466 
467 			vic_dn = of_find_node_by_phandle(vic_handles[i]);
468 			if (!vic_dn)
469 				continue;
470 
471 			if (of_node_name_eq(vic_dn, "spe") ) {
472 				spu = devnode_spu(cbe, vic_dn);
473 				avoid_ph = last_spu_dn->phandle;
474 			} else {
475 				/*
476 				 * "mic-tm" and "bif0" nodes do not have
477 				 * vicinity property. So we need to find the
478 				 * spe which has vic_dn as neighbour, but
479 				 * skipping the one we came from (last_spu_dn)
480 				 */
481 				spu = neighbour_spu(cbe, vic_dn, last_spu_dn);
482 				if (!spu)
483 					continue;
484 				if (of_node_name_eq(vic_dn, "mic-tm")) {
485 					last_spu->has_mem_affinity = 1;
486 					spu->has_mem_affinity = 1;
487 				}
488 				avoid_ph = vic_dn->phandle;
489 			}
490 
491 			of_node_put(vic_dn);
492 
493 			list_add_tail(&spu->aff_list, &last_spu->aff_list);
494 			last_spu = spu;
495 			break;
496 		}
497 	}
498 }
499 
init_affinity_fw(void)500 static void __init init_affinity_fw(void)
501 {
502 	int cbe;
503 
504 	for (cbe = 0; cbe < MAX_NUMNODES; cbe++)
505 		init_affinity_node(cbe);
506 }
507 
init_affinity(void)508 static int __init init_affinity(void)
509 {
510 	if (of_has_vicinity()) {
511 		init_affinity_fw();
512 	} else {
513 		if (of_machine_is_compatible("IBM,CPBW-1.0"))
514 			init_affinity_qs20_harcoded();
515 		else
516 			printk("No affinity configuration found\n");
517 	}
518 
519 	return 0;
520 }
521 
522 const struct spu_management_ops spu_management_of_ops = {
523 	.enumerate_spus = of_enumerate_spus,
524 	.create_spu = of_create_spu,
525 	.destroy_spu = of_destroy_spu,
526 	.enable_spu = enable_spu_by_master_run,
527 	.disable_spu = disable_spu_by_master_run,
528 	.init_affinity = init_affinity,
529 };
530