1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Support for dynamic reconfiguration for PCI, Memory, and CPU
4  * Hotplug and Dynamic Logical Partitioning on RPA platforms.
5  *
6  * Copyright (C) 2009 Nathan Fontenot
7  * Copyright (C) 2009 IBM Corporation
8  */
9 
10 #define pr_fmt(fmt)	"dlpar: " fmt
11 
12 #include <linux/kernel.h>
13 #include <linux/notifier.h>
14 #include <linux/spinlock.h>
15 #include <linux/cpu.h>
16 #include <linux/slab.h>
17 #include <linux/of.h>
18 
19 #include "of_helpers.h"
20 #include "pseries.h"
21 
22 #include <asm/machdep.h>
23 #include <linux/uaccess.h>
24 #include <asm/rtas.h>
25 
26 static struct workqueue_struct *pseries_hp_wq;
27 
28 struct pseries_hp_work {
29 	struct work_struct work;
30 	struct pseries_hp_errorlog *errlog;
31 };
32 
33 struct cc_workarea {
34 	__be32	drc_index;
35 	__be32	zero;
36 	__be32	name_offset;
37 	__be32	prop_length;
38 	__be32	prop_offset;
39 };
40 
dlpar_free_cc_property(struct property * prop)41 void dlpar_free_cc_property(struct property *prop)
42 {
43 	kfree(prop->name);
44 	kfree(prop->value);
45 	kfree(prop);
46 }
47 
dlpar_parse_cc_property(struct cc_workarea * ccwa)48 static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa)
49 {
50 	struct property *prop;
51 	char *name;
52 	char *value;
53 
54 	prop = kzalloc(sizeof(*prop), GFP_KERNEL);
55 	if (!prop)
56 		return NULL;
57 
58 	name = (char *)ccwa + be32_to_cpu(ccwa->name_offset);
59 	prop->name = kstrdup(name, GFP_KERNEL);
60 	if (!prop->name) {
61 		dlpar_free_cc_property(prop);
62 		return NULL;
63 	}
64 
65 	prop->length = be32_to_cpu(ccwa->prop_length);
66 	value = (char *)ccwa + be32_to_cpu(ccwa->prop_offset);
67 	prop->value = kmemdup(value, prop->length, GFP_KERNEL);
68 	if (!prop->value) {
69 		dlpar_free_cc_property(prop);
70 		return NULL;
71 	}
72 
73 	return prop;
74 }
75 
dlpar_parse_cc_node(struct cc_workarea * ccwa)76 static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa)
77 {
78 	struct device_node *dn;
79 	const char *name;
80 
81 	dn = kzalloc(sizeof(*dn), GFP_KERNEL);
82 	if (!dn)
83 		return NULL;
84 
85 	name = (const char *)ccwa + be32_to_cpu(ccwa->name_offset);
86 	dn->full_name = kstrdup(name, GFP_KERNEL);
87 	if (!dn->full_name) {
88 		kfree(dn);
89 		return NULL;
90 	}
91 
92 	of_node_set_flag(dn, OF_DYNAMIC);
93 	of_node_init(dn);
94 
95 	return dn;
96 }
97 
dlpar_free_one_cc_node(struct device_node * dn)98 static void dlpar_free_one_cc_node(struct device_node *dn)
99 {
100 	struct property *prop;
101 
102 	while (dn->properties) {
103 		prop = dn->properties;
104 		dn->properties = prop->next;
105 		dlpar_free_cc_property(prop);
106 	}
107 
108 	kfree(dn->full_name);
109 	kfree(dn);
110 }
111 
dlpar_free_cc_nodes(struct device_node * dn)112 void dlpar_free_cc_nodes(struct device_node *dn)
113 {
114 	if (dn->child)
115 		dlpar_free_cc_nodes(dn->child);
116 
117 	if (dn->sibling)
118 		dlpar_free_cc_nodes(dn->sibling);
119 
120 	dlpar_free_one_cc_node(dn);
121 }
122 
123 #define COMPLETE	0
124 #define NEXT_SIBLING    1
125 #define NEXT_CHILD      2
126 #define NEXT_PROPERTY   3
127 #define PREV_PARENT     4
128 #define MORE_MEMORY     5
129 #define ERR_CFG_USE     -9003
130 
dlpar_configure_connector(__be32 drc_index,struct device_node * parent)131 struct device_node *dlpar_configure_connector(__be32 drc_index,
132 					      struct device_node *parent)
133 {
134 	struct device_node *dn;
135 	struct device_node *first_dn = NULL;
136 	struct device_node *last_dn = NULL;
137 	struct property *property;
138 	struct property *last_property = NULL;
139 	struct cc_workarea *ccwa;
140 	char *data_buf;
141 	int cc_token;
142 	int rc = -1;
143 
144 	cc_token = rtas_token("ibm,configure-connector");
145 	if (cc_token == RTAS_UNKNOWN_SERVICE)
146 		return NULL;
147 
148 	data_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
149 	if (!data_buf)
150 		return NULL;
151 
152 	ccwa = (struct cc_workarea *)&data_buf[0];
153 	ccwa->drc_index = drc_index;
154 	ccwa->zero = 0;
155 
156 	do {
157 		/* Since we release the rtas_data_buf lock between configure
158 		 * connector calls we want to re-populate the rtas_data_buffer
159 		 * with the contents of the previous call.
160 		 */
161 		spin_lock(&rtas_data_buf_lock);
162 
163 		memcpy(rtas_data_buf, data_buf, RTAS_DATA_BUF_SIZE);
164 		rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL);
165 		memcpy(data_buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
166 
167 		spin_unlock(&rtas_data_buf_lock);
168 
169 		if (rtas_busy_delay(rc))
170 			continue;
171 
172 		switch (rc) {
173 		case COMPLETE:
174 			break;
175 
176 		case NEXT_SIBLING:
177 			dn = dlpar_parse_cc_node(ccwa);
178 			if (!dn)
179 				goto cc_error;
180 
181 			dn->parent = last_dn->parent;
182 			last_dn->sibling = dn;
183 			last_dn = dn;
184 			break;
185 
186 		case NEXT_CHILD:
187 			dn = dlpar_parse_cc_node(ccwa);
188 			if (!dn)
189 				goto cc_error;
190 
191 			if (!first_dn) {
192 				dn->parent = parent;
193 				first_dn = dn;
194 			} else {
195 				dn->parent = last_dn;
196 				if (last_dn)
197 					last_dn->child = dn;
198 			}
199 
200 			last_dn = dn;
201 			break;
202 
203 		case NEXT_PROPERTY:
204 			property = dlpar_parse_cc_property(ccwa);
205 			if (!property)
206 				goto cc_error;
207 
208 			if (!last_dn->properties)
209 				last_dn->properties = property;
210 			else
211 				last_property->next = property;
212 
213 			last_property = property;
214 			break;
215 
216 		case PREV_PARENT:
217 			last_dn = last_dn->parent;
218 			break;
219 
220 		case MORE_MEMORY:
221 		case ERR_CFG_USE:
222 		default:
223 			printk(KERN_ERR "Unexpected Error (%d) "
224 			       "returned from configure-connector\n", rc);
225 			goto cc_error;
226 		}
227 	} while (rc);
228 
229 cc_error:
230 	kfree(data_buf);
231 
232 	if (rc) {
233 		if (first_dn)
234 			dlpar_free_cc_nodes(first_dn);
235 
236 		return NULL;
237 	}
238 
239 	return first_dn;
240 }
241 
dlpar_attach_node(struct device_node * dn,struct device_node * parent)242 int dlpar_attach_node(struct device_node *dn, struct device_node *parent)
243 {
244 	int rc;
245 
246 	dn->parent = parent;
247 
248 	rc = of_attach_node(dn);
249 	if (rc) {
250 		printk(KERN_ERR "Failed to add device node %pOF\n", dn);
251 		return rc;
252 	}
253 
254 	return 0;
255 }
256 
dlpar_detach_node(struct device_node * dn)257 int dlpar_detach_node(struct device_node *dn)
258 {
259 	struct device_node *child;
260 	int rc;
261 
262 	child = of_get_next_child(dn, NULL);
263 	while (child) {
264 		dlpar_detach_node(child);
265 		child = of_get_next_child(dn, child);
266 	}
267 
268 	rc = of_detach_node(dn);
269 	if (rc)
270 		return rc;
271 
272 	of_node_put(dn);
273 
274 	return 0;
275 }
276 
277 #define DR_ENTITY_SENSE		9003
278 #define DR_ENTITY_PRESENT	1
279 #define DR_ENTITY_UNUSABLE	2
280 #define ALLOCATION_STATE	9003
281 #define ALLOC_UNUSABLE		0
282 #define ALLOC_USABLE		1
283 #define ISOLATION_STATE		9001
284 #define ISOLATE			0
285 #define UNISOLATE		1
286 
dlpar_acquire_drc(u32 drc_index)287 int dlpar_acquire_drc(u32 drc_index)
288 {
289 	int dr_status, rc;
290 
291 	rc = rtas_get_sensor(DR_ENTITY_SENSE, drc_index, &dr_status);
292 	if (rc || dr_status != DR_ENTITY_UNUSABLE)
293 		return -1;
294 
295 	rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_USABLE);
296 	if (rc)
297 		return rc;
298 
299 	rc = rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
300 	if (rc) {
301 		rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
302 		return rc;
303 	}
304 
305 	return 0;
306 }
307 
dlpar_release_drc(u32 drc_index)308 int dlpar_release_drc(u32 drc_index)
309 {
310 	int dr_status, rc;
311 
312 	rc = rtas_get_sensor(DR_ENTITY_SENSE, drc_index, &dr_status);
313 	if (rc || dr_status != DR_ENTITY_PRESENT)
314 		return -1;
315 
316 	rc = rtas_set_indicator(ISOLATION_STATE, drc_index, ISOLATE);
317 	if (rc)
318 		return rc;
319 
320 	rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
321 	if (rc) {
322 		rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
323 		return rc;
324 	}
325 
326 	return 0;
327 }
328 
dlpar_unisolate_drc(u32 drc_index)329 int dlpar_unisolate_drc(u32 drc_index)
330 {
331 	int dr_status, rc;
332 
333 	rc = rtas_get_sensor(DR_ENTITY_SENSE, drc_index, &dr_status);
334 	if (rc || dr_status != DR_ENTITY_PRESENT)
335 		return -1;
336 
337 	rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
338 
339 	return 0;
340 }
341 
handle_dlpar_errorlog(struct pseries_hp_errorlog * hp_elog)342 int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
343 {
344 	int rc;
345 
346 	/* pseries error logs are in BE format, convert to cpu type */
347 	switch (hp_elog->id_type) {
348 	case PSERIES_HP_ELOG_ID_DRC_COUNT:
349 		hp_elog->_drc_u.drc_count =
350 				be32_to_cpu(hp_elog->_drc_u.drc_count);
351 		break;
352 	case PSERIES_HP_ELOG_ID_DRC_INDEX:
353 		hp_elog->_drc_u.drc_index =
354 				be32_to_cpu(hp_elog->_drc_u.drc_index);
355 		break;
356 	case PSERIES_HP_ELOG_ID_DRC_IC:
357 		hp_elog->_drc_u.ic.count =
358 				be32_to_cpu(hp_elog->_drc_u.ic.count);
359 		hp_elog->_drc_u.ic.index =
360 				be32_to_cpu(hp_elog->_drc_u.ic.index);
361 	}
362 
363 	switch (hp_elog->resource) {
364 	case PSERIES_HP_ELOG_RESOURCE_MEM:
365 		rc = dlpar_memory(hp_elog);
366 		break;
367 	case PSERIES_HP_ELOG_RESOURCE_CPU:
368 		rc = dlpar_cpu(hp_elog);
369 		break;
370 	case PSERIES_HP_ELOG_RESOURCE_PMEM:
371 		rc = dlpar_hp_pmem(hp_elog);
372 		break;
373 
374 	default:
375 		pr_warn_ratelimited("Invalid resource (%d) specified\n",
376 				    hp_elog->resource);
377 		rc = -EINVAL;
378 	}
379 
380 	return rc;
381 }
382 
pseries_hp_work_fn(struct work_struct * work)383 static void pseries_hp_work_fn(struct work_struct *work)
384 {
385 	struct pseries_hp_work *hp_work =
386 			container_of(work, struct pseries_hp_work, work);
387 
388 	handle_dlpar_errorlog(hp_work->errlog);
389 
390 	kfree(hp_work->errlog);
391 	kfree(work);
392 }
393 
queue_hotplug_event(struct pseries_hp_errorlog * hp_errlog)394 void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog)
395 {
396 	struct pseries_hp_work *work;
397 	struct pseries_hp_errorlog *hp_errlog_copy;
398 
399 	hp_errlog_copy = kmemdup(hp_errlog, sizeof(*hp_errlog), GFP_ATOMIC);
400 	if (!hp_errlog_copy)
401 		return;
402 
403 	work = kmalloc(sizeof(struct pseries_hp_work), GFP_ATOMIC);
404 	if (work) {
405 		INIT_WORK((struct work_struct *)work, pseries_hp_work_fn);
406 		work->errlog = hp_errlog_copy;
407 		queue_work(pseries_hp_wq, (struct work_struct *)work);
408 	} else {
409 		kfree(hp_errlog_copy);
410 	}
411 }
412 
dlpar_parse_resource(char ** cmd,struct pseries_hp_errorlog * hp_elog)413 static int dlpar_parse_resource(char **cmd, struct pseries_hp_errorlog *hp_elog)
414 {
415 	char *arg;
416 
417 	arg = strsep(cmd, " ");
418 	if (!arg)
419 		return -EINVAL;
420 
421 	if (sysfs_streq(arg, "memory")) {
422 		hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM;
423 	} else if (sysfs_streq(arg, "cpu")) {
424 		hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_CPU;
425 	} else {
426 		pr_err("Invalid resource specified.\n");
427 		return -EINVAL;
428 	}
429 
430 	return 0;
431 }
432 
dlpar_parse_action(char ** cmd,struct pseries_hp_errorlog * hp_elog)433 static int dlpar_parse_action(char **cmd, struct pseries_hp_errorlog *hp_elog)
434 {
435 	char *arg;
436 
437 	arg = strsep(cmd, " ");
438 	if (!arg)
439 		return -EINVAL;
440 
441 	if (sysfs_streq(arg, "add")) {
442 		hp_elog->action = PSERIES_HP_ELOG_ACTION_ADD;
443 	} else if (sysfs_streq(arg, "remove")) {
444 		hp_elog->action = PSERIES_HP_ELOG_ACTION_REMOVE;
445 	} else {
446 		pr_err("Invalid action specified.\n");
447 		return -EINVAL;
448 	}
449 
450 	return 0;
451 }
452 
dlpar_parse_id_type(char ** cmd,struct pseries_hp_errorlog * hp_elog)453 static int dlpar_parse_id_type(char **cmd, struct pseries_hp_errorlog *hp_elog)
454 {
455 	char *arg;
456 	u32 count, index;
457 
458 	arg = strsep(cmd, " ");
459 	if (!arg)
460 		return -EINVAL;
461 
462 	if (sysfs_streq(arg, "indexed-count")) {
463 		hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_IC;
464 		arg = strsep(cmd, " ");
465 		if (!arg) {
466 			pr_err("No DRC count specified.\n");
467 			return -EINVAL;
468 		}
469 
470 		if (kstrtou32(arg, 0, &count)) {
471 			pr_err("Invalid DRC count specified.\n");
472 			return -EINVAL;
473 		}
474 
475 		arg = strsep(cmd, " ");
476 		if (!arg) {
477 			pr_err("No DRC Index specified.\n");
478 			return -EINVAL;
479 		}
480 
481 		if (kstrtou32(arg, 0, &index)) {
482 			pr_err("Invalid DRC Index specified.\n");
483 			return -EINVAL;
484 		}
485 
486 		hp_elog->_drc_u.ic.count = cpu_to_be32(count);
487 		hp_elog->_drc_u.ic.index = cpu_to_be32(index);
488 	} else if (sysfs_streq(arg, "index")) {
489 		hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX;
490 		arg = strsep(cmd, " ");
491 		if (!arg) {
492 			pr_err("No DRC Index specified.\n");
493 			return -EINVAL;
494 		}
495 
496 		if (kstrtou32(arg, 0, &index)) {
497 			pr_err("Invalid DRC Index specified.\n");
498 			return -EINVAL;
499 		}
500 
501 		hp_elog->_drc_u.drc_index = cpu_to_be32(index);
502 	} else if (sysfs_streq(arg, "count")) {
503 		hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_COUNT;
504 		arg = strsep(cmd, " ");
505 		if (!arg) {
506 			pr_err("No DRC count specified.\n");
507 			return -EINVAL;
508 		}
509 
510 		if (kstrtou32(arg, 0, &count)) {
511 			pr_err("Invalid DRC count specified.\n");
512 			return -EINVAL;
513 		}
514 
515 		hp_elog->_drc_u.drc_count = cpu_to_be32(count);
516 	} else {
517 		pr_err("Invalid id_type specified.\n");
518 		return -EINVAL;
519 	}
520 
521 	return 0;
522 }
523 
dlpar_store(struct class * class,struct class_attribute * attr,const char * buf,size_t count)524 static ssize_t dlpar_store(struct class *class, struct class_attribute *attr,
525 			   const char *buf, size_t count)
526 {
527 	struct pseries_hp_errorlog hp_elog;
528 	char *argbuf;
529 	char *args;
530 	int rc;
531 
532 	args = argbuf = kstrdup(buf, GFP_KERNEL);
533 	if (!argbuf)
534 		return -ENOMEM;
535 
536 	/*
537 	 * Parse out the request from the user, this will be in the form:
538 	 * <resource> <action> <id_type> <id>
539 	 */
540 	rc = dlpar_parse_resource(&args, &hp_elog);
541 	if (rc)
542 		goto dlpar_store_out;
543 
544 	rc = dlpar_parse_action(&args, &hp_elog);
545 	if (rc)
546 		goto dlpar_store_out;
547 
548 	rc = dlpar_parse_id_type(&args, &hp_elog);
549 	if (rc)
550 		goto dlpar_store_out;
551 
552 	rc = handle_dlpar_errorlog(&hp_elog);
553 
554 dlpar_store_out:
555 	kfree(argbuf);
556 
557 	if (rc)
558 		pr_err("Could not handle DLPAR request \"%s\"\n", buf);
559 
560 	return rc ? rc : count;
561 }
562 
dlpar_show(struct class * class,struct class_attribute * attr,char * buf)563 static ssize_t dlpar_show(struct class *class, struct class_attribute *attr,
564 			  char *buf)
565 {
566 	return sprintf(buf, "%s\n", "memory,cpu");
567 }
568 
569 static CLASS_ATTR_RW(dlpar);
570 
dlpar_workqueue_init(void)571 int __init dlpar_workqueue_init(void)
572 {
573 	if (pseries_hp_wq)
574 		return 0;
575 
576 	pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue",
577 			WQ_UNBOUND, 1);
578 
579 	return pseries_hp_wq ? 0 : -ENOMEM;
580 }
581 
dlpar_sysfs_init(void)582 static int __init dlpar_sysfs_init(void)
583 {
584 	int rc;
585 
586 	rc = dlpar_workqueue_init();
587 	if (rc)
588 		return rc;
589 
590 	return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr);
591 }
592 machine_device_initcall(pseries, dlpar_sysfs_init);
593 
594