1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Support for Partition Mobility/Migration
4 *
5 * Copyright (C) 2010 Nathan Fontenot
6 * Copyright (C) 2010 IBM Corporation
7 */
8
9
10 #define pr_fmt(fmt) "mobility: " fmt
11
12 #include <linux/cpu.h>
13 #include <linux/kernel.h>
14 #include <linux/kobject.h>
15 #include <linux/nmi.h>
16 #include <linux/sched.h>
17 #include <linux/smp.h>
18 #include <linux/stat.h>
19 #include <linux/stop_machine.h>
20 #include <linux/completion.h>
21 #include <linux/device.h>
22 #include <linux/delay.h>
23 #include <linux/slab.h>
24 #include <linux/stringify.h>
25
26 #include <asm/machdep.h>
27 #include <asm/nmi.h>
28 #include <asm/rtas.h>
29 #include "pseries.h"
30 #include "vas.h" /* vas_migration_handler() */
31 #include "../../kernel/cacheinfo.h"
32
33 static struct kobject *mobility_kobj;
34
35 struct update_props_workarea {
36 __be32 phandle;
37 __be32 state;
38 __be64 reserved;
39 __be32 nprops;
40 } __packed;
41
42 #define NODE_ACTION_MASK 0xff000000
43 #define NODE_COUNT_MASK 0x00ffffff
44
45 #define DELETE_DT_NODE 0x01000000
46 #define UPDATE_DT_NODE 0x02000000
47 #define ADD_DT_NODE 0x03000000
48
49 #define MIGRATION_SCOPE (1)
50 #define PRRN_SCOPE -2
51
52 #ifdef CONFIG_PPC_WATCHDOG
53 static unsigned int nmi_wd_lpm_factor = 200;
54
55 #ifdef CONFIG_SYSCTL
56 static struct ctl_table nmi_wd_lpm_factor_ctl_table[] = {
57 {
58 .procname = "nmi_wd_lpm_factor",
59 .data = &nmi_wd_lpm_factor,
60 .maxlen = sizeof(int),
61 .mode = 0644,
62 .proc_handler = proc_douintvec_minmax,
63 },
64 {}
65 };
66
register_nmi_wd_lpm_factor_sysctl(void)67 static int __init register_nmi_wd_lpm_factor_sysctl(void)
68 {
69 register_sysctl("kernel", nmi_wd_lpm_factor_ctl_table);
70
71 return 0;
72 }
73 device_initcall(register_nmi_wd_lpm_factor_sysctl);
74 #endif /* CONFIG_SYSCTL */
75 #endif /* CONFIG_PPC_WATCHDOG */
76
mobility_rtas_call(int token,char * buf,s32 scope)77 static int mobility_rtas_call(int token, char *buf, s32 scope)
78 {
79 int rc;
80
81 spin_lock(&rtas_data_buf_lock);
82
83 memcpy(rtas_data_buf, buf, RTAS_DATA_BUF_SIZE);
84 rc = rtas_call(token, 2, 1, NULL, rtas_data_buf, scope);
85 memcpy(buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
86
87 spin_unlock(&rtas_data_buf_lock);
88 return rc;
89 }
90
delete_dt_node(struct device_node * dn)91 static int delete_dt_node(struct device_node *dn)
92 {
93 struct device_node *pdn;
94 bool is_platfac;
95
96 pdn = of_get_parent(dn);
97 is_platfac = of_node_is_type(dn, "ibm,platform-facilities") ||
98 of_node_is_type(pdn, "ibm,platform-facilities");
99 of_node_put(pdn);
100
101 /*
102 * The drivers that bind to nodes in the platform-facilities
103 * hierarchy don't support node removal, and the removal directive
104 * from firmware is always followed by an add of an equivalent
105 * node. The capability (e.g. RNG, encryption, compression)
106 * represented by the node is never interrupted by the migration.
107 * So ignore changes to this part of the tree.
108 */
109 if (is_platfac) {
110 pr_notice("ignoring remove operation for %pOFfp\n", dn);
111 return 0;
112 }
113
114 pr_debug("removing node %pOFfp\n", dn);
115 dlpar_detach_node(dn);
116 return 0;
117 }
118
update_dt_property(struct device_node * dn,struct property ** prop,const char * name,u32 vd,char * value)119 static int update_dt_property(struct device_node *dn, struct property **prop,
120 const char *name, u32 vd, char *value)
121 {
122 struct property *new_prop = *prop;
123 int more = 0;
124
125 /* A negative 'vd' value indicates that only part of the new property
126 * value is contained in the buffer and we need to call
127 * ibm,update-properties again to get the rest of the value.
128 *
129 * A negative value is also the two's compliment of the actual value.
130 */
131 if (vd & 0x80000000) {
132 vd = ~vd + 1;
133 more = 1;
134 }
135
136 if (new_prop) {
137 /* partial property fixup */
138 char *new_data = kzalloc(new_prop->length + vd, GFP_KERNEL);
139 if (!new_data)
140 return -ENOMEM;
141
142 memcpy(new_data, new_prop->value, new_prop->length);
143 memcpy(new_data + new_prop->length, value, vd);
144
145 kfree(new_prop->value);
146 new_prop->value = new_data;
147 new_prop->length += vd;
148 } else {
149 new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
150 if (!new_prop)
151 return -ENOMEM;
152
153 new_prop->name = kstrdup(name, GFP_KERNEL);
154 if (!new_prop->name) {
155 kfree(new_prop);
156 return -ENOMEM;
157 }
158
159 new_prop->length = vd;
160 new_prop->value = kzalloc(new_prop->length, GFP_KERNEL);
161 if (!new_prop->value) {
162 kfree(new_prop->name);
163 kfree(new_prop);
164 return -ENOMEM;
165 }
166
167 memcpy(new_prop->value, value, vd);
168 *prop = new_prop;
169 }
170
171 if (!more) {
172 pr_debug("updating node %pOF property %s\n", dn, name);
173 of_update_property(dn, new_prop);
174 *prop = NULL;
175 }
176
177 return 0;
178 }
179
update_dt_node(struct device_node * dn,s32 scope)180 static int update_dt_node(struct device_node *dn, s32 scope)
181 {
182 struct update_props_workarea *upwa;
183 struct property *prop = NULL;
184 int i, rc, rtas_rc;
185 char *prop_data;
186 char *rtas_buf;
187 int update_properties_token;
188 u32 nprops;
189 u32 vd;
190
191 update_properties_token = rtas_function_token(RTAS_FN_IBM_UPDATE_PROPERTIES);
192 if (update_properties_token == RTAS_UNKNOWN_SERVICE)
193 return -EINVAL;
194
195 rtas_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
196 if (!rtas_buf)
197 return -ENOMEM;
198
199 upwa = (struct update_props_workarea *)&rtas_buf[0];
200 upwa->phandle = cpu_to_be32(dn->phandle);
201
202 do {
203 rtas_rc = mobility_rtas_call(update_properties_token, rtas_buf,
204 scope);
205 if (rtas_rc < 0)
206 break;
207
208 prop_data = rtas_buf + sizeof(*upwa);
209 nprops = be32_to_cpu(upwa->nprops);
210
211 /* On the first call to ibm,update-properties for a node the
212 * first property value descriptor contains an empty
213 * property name, the property value length encoded as u32,
214 * and the property value is the node path being updated.
215 */
216 if (*prop_data == 0) {
217 prop_data++;
218 vd = be32_to_cpu(*(__be32 *)prop_data);
219 prop_data += vd + sizeof(vd);
220 nprops--;
221 }
222
223 for (i = 0; i < nprops; i++) {
224 char *prop_name;
225
226 prop_name = prop_data;
227 prop_data += strlen(prop_name) + 1;
228 vd = be32_to_cpu(*(__be32 *)prop_data);
229 prop_data += sizeof(vd);
230
231 switch (vd) {
232 case 0x00000000:
233 /* name only property, nothing to do */
234 break;
235
236 case 0x80000000:
237 of_remove_property(dn, of_find_property(dn,
238 prop_name, NULL));
239 prop = NULL;
240 break;
241
242 default:
243 rc = update_dt_property(dn, &prop, prop_name,
244 vd, prop_data);
245 if (rc) {
246 pr_err("updating %s property failed: %d\n",
247 prop_name, rc);
248 }
249
250 prop_data += vd;
251 break;
252 }
253
254 cond_resched();
255 }
256
257 cond_resched();
258 } while (rtas_rc == 1);
259
260 kfree(rtas_buf);
261 return 0;
262 }
263
add_dt_node(struct device_node * parent_dn,__be32 drc_index)264 static int add_dt_node(struct device_node *parent_dn, __be32 drc_index)
265 {
266 struct device_node *dn;
267 int rc;
268
269 dn = dlpar_configure_connector(drc_index, parent_dn);
270 if (!dn)
271 return -ENOENT;
272
273 /*
274 * Since delete_dt_node() ignores this node type, this is the
275 * necessary counterpart. We also know that a platform-facilities
276 * node returned from dlpar_configure_connector() has children
277 * attached, and dlpar_attach_node() only adds the parent, leaking
278 * the children. So ignore these on the add side for now.
279 */
280 if (of_node_is_type(dn, "ibm,platform-facilities")) {
281 pr_notice("ignoring add operation for %pOF\n", dn);
282 dlpar_free_cc_nodes(dn);
283 return 0;
284 }
285
286 rc = dlpar_attach_node(dn, parent_dn);
287 if (rc)
288 dlpar_free_cc_nodes(dn);
289
290 pr_debug("added node %pOFfp\n", dn);
291
292 return rc;
293 }
294
pseries_devicetree_update(s32 scope)295 static int pseries_devicetree_update(s32 scope)
296 {
297 char *rtas_buf;
298 __be32 *data;
299 int update_nodes_token;
300 int rc;
301
302 update_nodes_token = rtas_function_token(RTAS_FN_IBM_UPDATE_NODES);
303 if (update_nodes_token == RTAS_UNKNOWN_SERVICE)
304 return 0;
305
306 rtas_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
307 if (!rtas_buf)
308 return -ENOMEM;
309
310 do {
311 rc = mobility_rtas_call(update_nodes_token, rtas_buf, scope);
312 if (rc && rc != 1)
313 break;
314
315 data = (__be32 *)rtas_buf + 4;
316 while (be32_to_cpu(*data) & NODE_ACTION_MASK) {
317 int i;
318 u32 action = be32_to_cpu(*data) & NODE_ACTION_MASK;
319 u32 node_count = be32_to_cpu(*data) & NODE_COUNT_MASK;
320
321 data++;
322
323 for (i = 0; i < node_count; i++) {
324 struct device_node *np;
325 __be32 phandle = *data++;
326 __be32 drc_index;
327
328 np = of_find_node_by_phandle(be32_to_cpu(phandle));
329 if (!np) {
330 pr_warn("Failed lookup: phandle 0x%x for action 0x%x\n",
331 be32_to_cpu(phandle), action);
332 continue;
333 }
334
335 switch (action) {
336 case DELETE_DT_NODE:
337 delete_dt_node(np);
338 break;
339 case UPDATE_DT_NODE:
340 update_dt_node(np, scope);
341 break;
342 case ADD_DT_NODE:
343 drc_index = *data++;
344 add_dt_node(np, drc_index);
345 break;
346 }
347
348 of_node_put(np);
349 cond_resched();
350 }
351 }
352
353 cond_resched();
354 } while (rc == 1);
355
356 kfree(rtas_buf);
357 return rc;
358 }
359
post_mobility_fixup(void)360 void post_mobility_fixup(void)
361 {
362 int rc;
363
364 rtas_activate_firmware();
365
366 /*
367 * We don't want CPUs to go online/offline while the device
368 * tree is being updated.
369 */
370 cpus_read_lock();
371
372 /*
373 * It's common for the destination firmware to replace cache
374 * nodes. Release all of the cacheinfo hierarchy's references
375 * before updating the device tree.
376 */
377 cacheinfo_teardown();
378
379 rc = pseries_devicetree_update(MIGRATION_SCOPE);
380 if (rc)
381 pr_err("device tree update failed: %d\n", rc);
382
383 cacheinfo_rebuild();
384
385 cpus_read_unlock();
386
387 /* Possibly switch to a new L1 flush type */
388 pseries_setup_security_mitigations();
389
390 /* Reinitialise system information for hv-24x7 */
391 read_24x7_sys_info();
392
393 return;
394 }
395
poll_vasi_state(u64 handle,unsigned long * res)396 static int poll_vasi_state(u64 handle, unsigned long *res)
397 {
398 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
399 long hvrc;
400 int ret;
401
402 hvrc = plpar_hcall(H_VASI_STATE, retbuf, handle);
403 switch (hvrc) {
404 case H_SUCCESS:
405 ret = 0;
406 *res = retbuf[0];
407 break;
408 case H_PARAMETER:
409 ret = -EINVAL;
410 break;
411 case H_FUNCTION:
412 ret = -EOPNOTSUPP;
413 break;
414 case H_HARDWARE:
415 default:
416 pr_err("unexpected H_VASI_STATE result %ld\n", hvrc);
417 ret = -EIO;
418 break;
419 }
420 return ret;
421 }
422
wait_for_vasi_session_suspending(u64 handle)423 static int wait_for_vasi_session_suspending(u64 handle)
424 {
425 unsigned long state;
426 int ret;
427
428 /*
429 * Wait for transition from H_VASI_ENABLED to
430 * H_VASI_SUSPENDING. Treat anything else as an error.
431 */
432 while (true) {
433 ret = poll_vasi_state(handle, &state);
434
435 if (ret != 0 || state == H_VASI_SUSPENDING) {
436 break;
437 } else if (state == H_VASI_ENABLED) {
438 ssleep(1);
439 } else {
440 pr_err("unexpected H_VASI_STATE result %lu\n", state);
441 ret = -EIO;
442 break;
443 }
444 }
445
446 /*
447 * Proceed even if H_VASI_STATE is unavailable. If H_JOIN or
448 * ibm,suspend-me are also unimplemented, we'll recover then.
449 */
450 if (ret == -EOPNOTSUPP)
451 ret = 0;
452
453 return ret;
454 }
455
wait_for_vasi_session_completed(u64 handle)456 static void wait_for_vasi_session_completed(u64 handle)
457 {
458 unsigned long state = 0;
459 int ret;
460
461 pr_info("waiting for memory transfer to complete...\n");
462
463 /*
464 * Wait for transition from H_VASI_RESUMED to H_VASI_COMPLETED.
465 */
466 while (true) {
467 ret = poll_vasi_state(handle, &state);
468
469 /*
470 * If the memory transfer is already complete and the migration
471 * has been cleaned up by the hypervisor, H_PARAMETER is return,
472 * which is translate in EINVAL by poll_vasi_state().
473 */
474 if (ret == -EINVAL || (!ret && state == H_VASI_COMPLETED)) {
475 pr_info("memory transfer completed.\n");
476 break;
477 }
478
479 if (ret) {
480 pr_err("H_VASI_STATE return error (%d)\n", ret);
481 break;
482 }
483
484 if (state != H_VASI_RESUMED) {
485 pr_err("unexpected H_VASI_STATE result %lu\n", state);
486 break;
487 }
488
489 msleep(500);
490 }
491 }
492
prod_single(unsigned int target_cpu)493 static void prod_single(unsigned int target_cpu)
494 {
495 long hvrc;
496 int hwid;
497
498 hwid = get_hard_smp_processor_id(target_cpu);
499 hvrc = plpar_hcall_norets(H_PROD, hwid);
500 if (hvrc == H_SUCCESS)
501 return;
502 pr_err_ratelimited("H_PROD of CPU %u (hwid %d) error: %ld\n",
503 target_cpu, hwid, hvrc);
504 }
505
prod_others(void)506 static void prod_others(void)
507 {
508 unsigned int cpu;
509
510 for_each_online_cpu(cpu) {
511 if (cpu != smp_processor_id())
512 prod_single(cpu);
513 }
514 }
515
clamp_slb_size(void)516 static u16 clamp_slb_size(void)
517 {
518 #ifdef CONFIG_PPC_64S_HASH_MMU
519 u16 prev = mmu_slb_size;
520
521 slb_set_size(SLB_MIN_SIZE);
522
523 return prev;
524 #else
525 return 0;
526 #endif
527 }
528
do_suspend(void)529 static int do_suspend(void)
530 {
531 u16 saved_slb_size;
532 int status;
533 int ret;
534
535 pr_info("calling ibm,suspend-me on CPU %i\n", smp_processor_id());
536
537 /*
538 * The destination processor model may have fewer SLB entries
539 * than the source. We reduce mmu_slb_size to a safe minimum
540 * before suspending in order to minimize the possibility of
541 * programming non-existent entries on the destination. If
542 * suspend fails, we restore it before returning. On success
543 * the OF reconfig path will update it from the new device
544 * tree after resuming on the destination.
545 */
546 saved_slb_size = clamp_slb_size();
547
548 ret = rtas_ibm_suspend_me(&status);
549 if (ret != 0) {
550 pr_err("ibm,suspend-me error: %d\n", status);
551 slb_set_size(saved_slb_size);
552 }
553
554 return ret;
555 }
556
557 /**
558 * struct pseries_suspend_info - State shared between CPUs for join/suspend.
559 * @counter: Threads are to increment this upon resuming from suspend
560 * or if an error is received from H_JOIN. The thread which performs
561 * the first increment (i.e. sets it to 1) is responsible for
562 * waking the other threads.
563 * @done: False if join/suspend is in progress. True if the operation is
564 * complete (successful or not).
565 */
566 struct pseries_suspend_info {
567 atomic_t counter;
568 bool done;
569 };
570
do_join(void * arg)571 static int do_join(void *arg)
572 {
573 struct pseries_suspend_info *info = arg;
574 atomic_t *counter = &info->counter;
575 long hvrc;
576 int ret;
577
578 retry:
579 /* Must ensure MSR.EE off for H_JOIN. */
580 hard_irq_disable();
581 hvrc = plpar_hcall_norets(H_JOIN);
582
583 switch (hvrc) {
584 case H_CONTINUE:
585 /*
586 * All other CPUs are offline or in H_JOIN. This CPU
587 * attempts the suspend.
588 */
589 ret = do_suspend();
590 break;
591 case H_SUCCESS:
592 /*
593 * The suspend is complete and this cpu has received a
594 * prod, or we've received a stray prod from unrelated
595 * code (e.g. paravirt spinlocks) and we need to join
596 * again.
597 *
598 * This barrier orders the return from H_JOIN above vs
599 * the load of info->done. It pairs with the barrier
600 * in the wakeup/prod path below.
601 */
602 smp_mb();
603 if (READ_ONCE(info->done) == false) {
604 pr_info_ratelimited("premature return from H_JOIN on CPU %i, retrying",
605 smp_processor_id());
606 goto retry;
607 }
608 ret = 0;
609 break;
610 case H_BAD_MODE:
611 case H_HARDWARE:
612 default:
613 ret = -EIO;
614 pr_err_ratelimited("H_JOIN error %ld on CPU %i\n",
615 hvrc, smp_processor_id());
616 break;
617 }
618
619 if (atomic_inc_return(counter) == 1) {
620 pr_info("CPU %u waking all threads\n", smp_processor_id());
621 WRITE_ONCE(info->done, true);
622 /*
623 * This barrier orders the store to info->done vs subsequent
624 * H_PRODs to wake the other CPUs. It pairs with the barrier
625 * in the H_SUCCESS case above.
626 */
627 smp_mb();
628 prod_others();
629 }
630 /*
631 * Execution may have been suspended for several seconds, so reset
632 * the watchdogs. touch_nmi_watchdog() also touches the soft lockup
633 * watchdog.
634 */
635 rcu_cpu_stall_reset();
636 touch_nmi_watchdog();
637
638 return ret;
639 }
640
641 /*
642 * Abort reason code byte 0. We use only the 'Migrating partition' value.
643 */
644 enum vasi_aborting_entity {
645 ORCHESTRATOR = 1,
646 VSP_SOURCE = 2,
647 PARTITION_FIRMWARE = 3,
648 PLATFORM_FIRMWARE = 4,
649 VSP_TARGET = 5,
650 MIGRATING_PARTITION = 6,
651 };
652
pseries_cancel_migration(u64 handle,int err)653 static void pseries_cancel_migration(u64 handle, int err)
654 {
655 u32 reason_code;
656 u32 detail;
657 u8 entity;
658 long hvrc;
659
660 entity = MIGRATING_PARTITION;
661 detail = abs(err) & 0xffffff;
662 reason_code = (entity << 24) | detail;
663
664 hvrc = plpar_hcall_norets(H_VASI_SIGNAL, handle,
665 H_VASI_SIGNAL_CANCEL, reason_code);
666 if (hvrc)
667 pr_err("H_VASI_SIGNAL error: %ld\n", hvrc);
668 }
669
pseries_suspend(u64 handle)670 static int pseries_suspend(u64 handle)
671 {
672 const unsigned int max_attempts = 5;
673 unsigned int retry_interval_ms = 1;
674 unsigned int attempt = 1;
675 int ret;
676
677 while (true) {
678 struct pseries_suspend_info info;
679 unsigned long vasi_state;
680 int vasi_err;
681
682 info = (struct pseries_suspend_info) {
683 .counter = ATOMIC_INIT(0),
684 .done = false,
685 };
686
687 ret = stop_machine(do_join, &info, cpu_online_mask);
688 if (ret == 0)
689 break;
690 /*
691 * Encountered an error. If the VASI stream is still
692 * in Suspending state, it's likely a transient
693 * condition related to some device in the partition
694 * and we can retry in the hope that the cause has
695 * cleared after some delay.
696 *
697 * A better design would allow drivers etc to prepare
698 * for the suspend and avoid conditions which prevent
699 * the suspend from succeeding. For now, we have this
700 * mitigation.
701 */
702 pr_notice("Partition suspend attempt %u of %u error: %d\n",
703 attempt, max_attempts, ret);
704
705 if (attempt == max_attempts)
706 break;
707
708 vasi_err = poll_vasi_state(handle, &vasi_state);
709 if (vasi_err == 0) {
710 if (vasi_state != H_VASI_SUSPENDING) {
711 pr_notice("VASI state %lu after failed suspend\n",
712 vasi_state);
713 break;
714 }
715 } else if (vasi_err != -EOPNOTSUPP) {
716 pr_err("VASI state poll error: %d", vasi_err);
717 break;
718 }
719
720 pr_notice("Will retry partition suspend after %u ms\n",
721 retry_interval_ms);
722
723 msleep(retry_interval_ms);
724 retry_interval_ms *= 10;
725 attempt++;
726 }
727
728 return ret;
729 }
730
pseries_migrate_partition(u64 handle)731 static int pseries_migrate_partition(u64 handle)
732 {
733 int ret;
734 unsigned int factor = 0;
735
736 #ifdef CONFIG_PPC_WATCHDOG
737 factor = nmi_wd_lpm_factor;
738 #endif
739 /*
740 * When the migration is initiated, the hypervisor changes VAS
741 * mappings to prepare before OS gets the notification and
742 * closes all VAS windows. NX generates continuous faults during
743 * this time and the user space can not differentiate these
744 * faults from the migration event. So reduce this time window
745 * by closing VAS windows at the beginning of this function.
746 */
747 vas_migration_handler(VAS_SUSPEND);
748
749 ret = wait_for_vasi_session_suspending(handle);
750 if (ret)
751 goto out;
752
753 if (factor)
754 watchdog_hardlockup_set_timeout_pct(factor);
755
756 ret = pseries_suspend(handle);
757 if (ret == 0) {
758 post_mobility_fixup();
759 /*
760 * Wait until the memory transfer is complete, so that the user
761 * space process returns from the syscall after the transfer is
762 * complete. This allows the user hooks to be executed at the
763 * right time.
764 */
765 wait_for_vasi_session_completed(handle);
766 } else
767 pseries_cancel_migration(handle, ret);
768
769 if (factor)
770 watchdog_hardlockup_set_timeout_pct(0);
771
772 out:
773 vas_migration_handler(VAS_RESUME);
774
775 return ret;
776 }
777
rtas_syscall_dispatch_ibm_suspend_me(u64 handle)778 int rtas_syscall_dispatch_ibm_suspend_me(u64 handle)
779 {
780 return pseries_migrate_partition(handle);
781 }
782
migration_store(const struct class * class,const struct class_attribute * attr,const char * buf,size_t count)783 static ssize_t migration_store(const struct class *class,
784 const struct class_attribute *attr, const char *buf,
785 size_t count)
786 {
787 u64 streamid;
788 int rc;
789
790 rc = kstrtou64(buf, 0, &streamid);
791 if (rc)
792 return rc;
793
794 rc = pseries_migrate_partition(streamid);
795 if (rc)
796 return rc;
797
798 return count;
799 }
800
801 /*
802 * Used by drmgr to determine the kernel behavior of the migration interface.
803 *
804 * Version 1: Performs all PAPR requirements for migration including
805 * firmware activation and device tree update.
806 */
807 #define MIGRATION_API_VERSION 1
808
809 static CLASS_ATTR_WO(migration);
810 static CLASS_ATTR_STRING(api_version, 0444, __stringify(MIGRATION_API_VERSION));
811
mobility_sysfs_init(void)812 static int __init mobility_sysfs_init(void)
813 {
814 int rc;
815
816 mobility_kobj = kobject_create_and_add("mobility", kernel_kobj);
817 if (!mobility_kobj)
818 return -ENOMEM;
819
820 rc = sysfs_create_file(mobility_kobj, &class_attr_migration.attr);
821 if (rc)
822 pr_err("unable to create migration sysfs file (%d)\n", rc);
823
824 rc = sysfs_create_file(mobility_kobj, &class_attr_api_version.attr.attr);
825 if (rc)
826 pr_err("unable to create api_version sysfs file (%d)\n", rc);
827
828 return 0;
829 }
830 machine_device_initcall(pseries, mobility_sysfs_init);
831