1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * ppc64 code to implement the kexec_file_load syscall
4 *
5 * Copyright (C) 2004 Adam Litke (agl@us.ibm.com)
6 * Copyright (C) 2004 IBM Corp.
7 * Copyright (C) 2004,2005 Milton D Miller II, IBM Corporation
8 * Copyright (C) 2005 R Sharada (sharada@in.ibm.com)
9 * Copyright (C) 2006 Mohan Kumar M (mohan@in.ibm.com)
10 * Copyright (C) 2020 IBM Corporation
11 *
12 * Based on kexec-tools' kexec-ppc64.c, kexec-elf-rel-ppc64.c, fs2dt.c.
13 * Heavily modified for the kernel by
14 * Hari Bathini, IBM Corporation.
15 */
16
17 #include <linux/kexec.h>
18 #include <linux/of_fdt.h>
19 #include <linux/libfdt.h>
20 #include <linux/of_device.h>
21 #include <linux/memblock.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <asm/setup.h>
25 #include <asm/drmem.h>
26 #include <asm/firmware.h>
27 #include <asm/kexec_ranges.h>
28 #include <asm/crashdump-ppc64.h>
29
30 struct umem_info {
31 u64 *buf; /* data buffer for usable-memory property */
32 u32 size; /* size allocated for the data buffer */
33 u32 max_entries; /* maximum no. of entries */
34 u32 idx; /* index of current entry */
35
36 /* usable memory ranges to look up */
37 unsigned int nr_ranges;
38 const struct crash_mem_range *ranges;
39 };
40
41 const struct kexec_file_ops * const kexec_file_loaders[] = {
42 &kexec_elf64_ops,
43 NULL
44 };
45
46 /**
47 * get_exclude_memory_ranges - Get exclude memory ranges. This list includes
48 * regions like opal/rtas, tce-table, initrd,
49 * kernel, htab which should be avoided while
50 * setting up kexec load segments.
51 * @mem_ranges: Range list to add the memory ranges to.
52 *
53 * Returns 0 on success, negative errno on error.
54 */
get_exclude_memory_ranges(struct crash_mem ** mem_ranges)55 static int get_exclude_memory_ranges(struct crash_mem **mem_ranges)
56 {
57 int ret;
58
59 ret = add_tce_mem_ranges(mem_ranges);
60 if (ret)
61 goto out;
62
63 ret = add_initrd_mem_range(mem_ranges);
64 if (ret)
65 goto out;
66
67 ret = add_htab_mem_range(mem_ranges);
68 if (ret)
69 goto out;
70
71 ret = add_kernel_mem_range(mem_ranges);
72 if (ret)
73 goto out;
74
75 ret = add_rtas_mem_range(mem_ranges);
76 if (ret)
77 goto out;
78
79 ret = add_opal_mem_range(mem_ranges);
80 if (ret)
81 goto out;
82
83 ret = add_reserved_mem_ranges(mem_ranges);
84 if (ret)
85 goto out;
86
87 /* exclude memory ranges should be sorted for easy lookup */
88 sort_memory_ranges(*mem_ranges, true);
89 out:
90 if (ret)
91 pr_err("Failed to setup exclude memory ranges\n");
92 return ret;
93 }
94
95 /**
96 * get_usable_memory_ranges - Get usable memory ranges. This list includes
97 * regions like crashkernel, opal/rtas & tce-table,
98 * that kdump kernel could use.
99 * @mem_ranges: Range list to add the memory ranges to.
100 *
101 * Returns 0 on success, negative errno on error.
102 */
get_usable_memory_ranges(struct crash_mem ** mem_ranges)103 static int get_usable_memory_ranges(struct crash_mem **mem_ranges)
104 {
105 int ret;
106
107 /*
108 * Early boot failure observed on guests when low memory (first memory
109 * block?) is not added to usable memory. So, add [0, crashk_res.end]
110 * instead of [crashk_res.start, crashk_res.end] to workaround it.
111 * Also, crashed kernel's memory must be added to reserve map to
112 * avoid kdump kernel from using it.
113 */
114 ret = add_mem_range(mem_ranges, 0, crashk_res.end + 1);
115 if (ret)
116 goto out;
117
118 ret = add_rtas_mem_range(mem_ranges);
119 if (ret)
120 goto out;
121
122 ret = add_opal_mem_range(mem_ranges);
123 if (ret)
124 goto out;
125
126 ret = add_tce_mem_ranges(mem_ranges);
127 out:
128 if (ret)
129 pr_err("Failed to setup usable memory ranges\n");
130 return ret;
131 }
132
133 /**
134 * get_crash_memory_ranges - Get crash memory ranges. This list includes
135 * first/crashing kernel's memory regions that
136 * would be exported via an elfcore.
137 * @mem_ranges: Range list to add the memory ranges to.
138 *
139 * Returns 0 on success, negative errno on error.
140 */
get_crash_memory_ranges(struct crash_mem ** mem_ranges)141 static int get_crash_memory_ranges(struct crash_mem **mem_ranges)
142 {
143 phys_addr_t base, end;
144 struct crash_mem *tmem;
145 u64 i;
146 int ret;
147
148 for_each_mem_range(i, &base, &end) {
149 u64 size = end - base;
150
151 /* Skip backup memory region, which needs a separate entry */
152 if (base == BACKUP_SRC_START) {
153 if (size > BACKUP_SRC_SIZE) {
154 base = BACKUP_SRC_END + 1;
155 size -= BACKUP_SRC_SIZE;
156 } else
157 continue;
158 }
159
160 ret = add_mem_range(mem_ranges, base, size);
161 if (ret)
162 goto out;
163
164 /* Try merging adjacent ranges before reallocation attempt */
165 if ((*mem_ranges)->nr_ranges == (*mem_ranges)->max_nr_ranges)
166 sort_memory_ranges(*mem_ranges, true);
167 }
168
169 /* Reallocate memory ranges if there is no space to split ranges */
170 tmem = *mem_ranges;
171 if (tmem && (tmem->nr_ranges == tmem->max_nr_ranges)) {
172 tmem = realloc_mem_ranges(mem_ranges);
173 if (!tmem)
174 goto out;
175 }
176
177 /* Exclude crashkernel region */
178 ret = crash_exclude_mem_range(tmem, crashk_res.start, crashk_res.end);
179 if (ret)
180 goto out;
181
182 /*
183 * FIXME: For now, stay in parity with kexec-tools but if RTAS/OPAL
184 * regions are exported to save their context at the time of
185 * crash, they should actually be backed up just like the
186 * first 64K bytes of memory.
187 */
188 ret = add_rtas_mem_range(mem_ranges);
189 if (ret)
190 goto out;
191
192 ret = add_opal_mem_range(mem_ranges);
193 if (ret)
194 goto out;
195
196 /* create a separate program header for the backup region */
197 ret = add_mem_range(mem_ranges, BACKUP_SRC_START, BACKUP_SRC_SIZE);
198 if (ret)
199 goto out;
200
201 sort_memory_ranges(*mem_ranges, false);
202 out:
203 if (ret)
204 pr_err("Failed to setup crash memory ranges\n");
205 return ret;
206 }
207
208 /**
209 * get_reserved_memory_ranges - Get reserve memory ranges. This list includes
210 * memory regions that should be added to the
211 * memory reserve map to ensure the region is
212 * protected from any mischief.
213 * @mem_ranges: Range list to add the memory ranges to.
214 *
215 * Returns 0 on success, negative errno on error.
216 */
get_reserved_memory_ranges(struct crash_mem ** mem_ranges)217 static int get_reserved_memory_ranges(struct crash_mem **mem_ranges)
218 {
219 int ret;
220
221 ret = add_rtas_mem_range(mem_ranges);
222 if (ret)
223 goto out;
224
225 ret = add_tce_mem_ranges(mem_ranges);
226 if (ret)
227 goto out;
228
229 ret = add_reserved_mem_ranges(mem_ranges);
230 out:
231 if (ret)
232 pr_err("Failed to setup reserved memory ranges\n");
233 return ret;
234 }
235
236 /**
237 * __locate_mem_hole_top_down - Looks top down for a large enough memory hole
238 * in the memory regions between buf_min & buf_max
239 * for the buffer. If found, sets kbuf->mem.
240 * @kbuf: Buffer contents and memory parameters.
241 * @buf_min: Minimum address for the buffer.
242 * @buf_max: Maximum address for the buffer.
243 *
244 * Returns 0 on success, negative errno on error.
245 */
__locate_mem_hole_top_down(struct kexec_buf * kbuf,u64 buf_min,u64 buf_max)246 static int __locate_mem_hole_top_down(struct kexec_buf *kbuf,
247 u64 buf_min, u64 buf_max)
248 {
249 int ret = -EADDRNOTAVAIL;
250 phys_addr_t start, end;
251 u64 i;
252
253 for_each_mem_range_rev(i, &start, &end) {
254 /*
255 * memblock uses [start, end) convention while it is
256 * [start, end] here. Fix the off-by-one to have the
257 * same convention.
258 */
259 end -= 1;
260
261 if (start > buf_max)
262 continue;
263
264 /* Memory hole not found */
265 if (end < buf_min)
266 break;
267
268 /* Adjust memory region based on the given range */
269 if (start < buf_min)
270 start = buf_min;
271 if (end > buf_max)
272 end = buf_max;
273
274 start = ALIGN(start, kbuf->buf_align);
275 if (start < end && (end - start + 1) >= kbuf->memsz) {
276 /* Suitable memory range found. Set kbuf->mem */
277 kbuf->mem = ALIGN_DOWN(end - kbuf->memsz + 1,
278 kbuf->buf_align);
279 ret = 0;
280 break;
281 }
282 }
283
284 return ret;
285 }
286
287 /**
288 * locate_mem_hole_top_down_ppc64 - Skip special memory regions to find a
289 * suitable buffer with top down approach.
290 * @kbuf: Buffer contents and memory parameters.
291 * @buf_min: Minimum address for the buffer.
292 * @buf_max: Maximum address for the buffer.
293 * @emem: Exclude memory ranges.
294 *
295 * Returns 0 on success, negative errno on error.
296 */
locate_mem_hole_top_down_ppc64(struct kexec_buf * kbuf,u64 buf_min,u64 buf_max,const struct crash_mem * emem)297 static int locate_mem_hole_top_down_ppc64(struct kexec_buf *kbuf,
298 u64 buf_min, u64 buf_max,
299 const struct crash_mem *emem)
300 {
301 int i, ret = 0, err = -EADDRNOTAVAIL;
302 u64 start, end, tmin, tmax;
303
304 tmax = buf_max;
305 for (i = (emem->nr_ranges - 1); i >= 0; i--) {
306 start = emem->ranges[i].start;
307 end = emem->ranges[i].end;
308
309 if (start > tmax)
310 continue;
311
312 if (end < tmax) {
313 tmin = (end < buf_min ? buf_min : end + 1);
314 ret = __locate_mem_hole_top_down(kbuf, tmin, tmax);
315 if (!ret)
316 return 0;
317 }
318
319 tmax = start - 1;
320
321 if (tmax < buf_min) {
322 ret = err;
323 break;
324 }
325 ret = 0;
326 }
327
328 if (!ret) {
329 tmin = buf_min;
330 ret = __locate_mem_hole_top_down(kbuf, tmin, tmax);
331 }
332 return ret;
333 }
334
335 /**
336 * __locate_mem_hole_bottom_up - Looks bottom up for a large enough memory hole
337 * in the memory regions between buf_min & buf_max
338 * for the buffer. If found, sets kbuf->mem.
339 * @kbuf: Buffer contents and memory parameters.
340 * @buf_min: Minimum address for the buffer.
341 * @buf_max: Maximum address for the buffer.
342 *
343 * Returns 0 on success, negative errno on error.
344 */
__locate_mem_hole_bottom_up(struct kexec_buf * kbuf,u64 buf_min,u64 buf_max)345 static int __locate_mem_hole_bottom_up(struct kexec_buf *kbuf,
346 u64 buf_min, u64 buf_max)
347 {
348 int ret = -EADDRNOTAVAIL;
349 phys_addr_t start, end;
350 u64 i;
351
352 for_each_mem_range(i, &start, &end) {
353 /*
354 * memblock uses [start, end) convention while it is
355 * [start, end] here. Fix the off-by-one to have the
356 * same convention.
357 */
358 end -= 1;
359
360 if (end < buf_min)
361 continue;
362
363 /* Memory hole not found */
364 if (start > buf_max)
365 break;
366
367 /* Adjust memory region based on the given range */
368 if (start < buf_min)
369 start = buf_min;
370 if (end > buf_max)
371 end = buf_max;
372
373 start = ALIGN(start, kbuf->buf_align);
374 if (start < end && (end - start + 1) >= kbuf->memsz) {
375 /* Suitable memory range found. Set kbuf->mem */
376 kbuf->mem = start;
377 ret = 0;
378 break;
379 }
380 }
381
382 return ret;
383 }
384
385 /**
386 * locate_mem_hole_bottom_up_ppc64 - Skip special memory regions to find a
387 * suitable buffer with bottom up approach.
388 * @kbuf: Buffer contents and memory parameters.
389 * @buf_min: Minimum address for the buffer.
390 * @buf_max: Maximum address for the buffer.
391 * @emem: Exclude memory ranges.
392 *
393 * Returns 0 on success, negative errno on error.
394 */
locate_mem_hole_bottom_up_ppc64(struct kexec_buf * kbuf,u64 buf_min,u64 buf_max,const struct crash_mem * emem)395 static int locate_mem_hole_bottom_up_ppc64(struct kexec_buf *kbuf,
396 u64 buf_min, u64 buf_max,
397 const struct crash_mem *emem)
398 {
399 int i, ret = 0, err = -EADDRNOTAVAIL;
400 u64 start, end, tmin, tmax;
401
402 tmin = buf_min;
403 for (i = 0; i < emem->nr_ranges; i++) {
404 start = emem->ranges[i].start;
405 end = emem->ranges[i].end;
406
407 if (end < tmin)
408 continue;
409
410 if (start > tmin) {
411 tmax = (start > buf_max ? buf_max : start - 1);
412 ret = __locate_mem_hole_bottom_up(kbuf, tmin, tmax);
413 if (!ret)
414 return 0;
415 }
416
417 tmin = end + 1;
418
419 if (tmin > buf_max) {
420 ret = err;
421 break;
422 }
423 ret = 0;
424 }
425
426 if (!ret) {
427 tmax = buf_max;
428 ret = __locate_mem_hole_bottom_up(kbuf, tmin, tmax);
429 }
430 return ret;
431 }
432
433 /**
434 * check_realloc_usable_mem - Reallocate buffer if it can't accommodate entries
435 * @um_info: Usable memory buffer and ranges info.
436 * @cnt: No. of entries to accommodate.
437 *
438 * Frees up the old buffer if memory reallocation fails.
439 *
440 * Returns buffer on success, NULL on error.
441 */
check_realloc_usable_mem(struct umem_info * um_info,int cnt)442 static u64 *check_realloc_usable_mem(struct umem_info *um_info, int cnt)
443 {
444 u32 new_size;
445 u64 *tbuf;
446
447 if ((um_info->idx + cnt) <= um_info->max_entries)
448 return um_info->buf;
449
450 new_size = um_info->size + MEM_RANGE_CHUNK_SZ;
451 tbuf = krealloc(um_info->buf, new_size, GFP_KERNEL);
452 if (tbuf) {
453 um_info->buf = tbuf;
454 um_info->size = new_size;
455 um_info->max_entries = (um_info->size / sizeof(u64));
456 }
457
458 return tbuf;
459 }
460
461 /**
462 * add_usable_mem - Add the usable memory ranges within the given memory range
463 * to the buffer
464 * @um_info: Usable memory buffer and ranges info.
465 * @base: Base address of memory range to look for.
466 * @end: End address of memory range to look for.
467 *
468 * Returns 0 on success, negative errno on error.
469 */
add_usable_mem(struct umem_info * um_info,u64 base,u64 end)470 static int add_usable_mem(struct umem_info *um_info, u64 base, u64 end)
471 {
472 u64 loc_base, loc_end;
473 bool add;
474 int i;
475
476 for (i = 0; i < um_info->nr_ranges; i++) {
477 add = false;
478 loc_base = um_info->ranges[i].start;
479 loc_end = um_info->ranges[i].end;
480 if (loc_base >= base && loc_end <= end)
481 add = true;
482 else if (base < loc_end && end > loc_base) {
483 if (loc_base < base)
484 loc_base = base;
485 if (loc_end > end)
486 loc_end = end;
487 add = true;
488 }
489
490 if (add) {
491 if (!check_realloc_usable_mem(um_info, 2))
492 return -ENOMEM;
493
494 um_info->buf[um_info->idx++] = cpu_to_be64(loc_base);
495 um_info->buf[um_info->idx++] =
496 cpu_to_be64(loc_end - loc_base + 1);
497 }
498 }
499
500 return 0;
501 }
502
503 /**
504 * kdump_setup_usable_lmb - This is a callback function that gets called by
505 * walk_drmem_lmbs for every LMB to set its
506 * usable memory ranges.
507 * @lmb: LMB info.
508 * @usm: linux,drconf-usable-memory property value.
509 * @data: Pointer to usable memory buffer and ranges info.
510 *
511 * Returns 0 on success, negative errno on error.
512 */
kdump_setup_usable_lmb(struct drmem_lmb * lmb,const __be32 ** usm,void * data)513 static int kdump_setup_usable_lmb(struct drmem_lmb *lmb, const __be32 **usm,
514 void *data)
515 {
516 struct umem_info *um_info;
517 int tmp_idx, ret;
518 u64 base, end;
519
520 /*
521 * kdump load isn't supported on kernels already booted with
522 * linux,drconf-usable-memory property.
523 */
524 if (*usm) {
525 pr_err("linux,drconf-usable-memory property already exists!");
526 return -EINVAL;
527 }
528
529 um_info = data;
530 tmp_idx = um_info->idx;
531 if (!check_realloc_usable_mem(um_info, 1))
532 return -ENOMEM;
533
534 um_info->idx++;
535 base = lmb->base_addr;
536 end = base + drmem_lmb_size() - 1;
537 ret = add_usable_mem(um_info, base, end);
538 if (!ret) {
539 /*
540 * Update the no. of ranges added. Two entries (base & size)
541 * for every range added.
542 */
543 um_info->buf[tmp_idx] =
544 cpu_to_be64((um_info->idx - tmp_idx - 1) / 2);
545 }
546
547 return ret;
548 }
549
550 #define NODE_PATH_LEN 256
551 /**
552 * add_usable_mem_property - Add usable memory property for the given
553 * memory node.
554 * @fdt: Flattened device tree for the kdump kernel.
555 * @dn: Memory node.
556 * @um_info: Usable memory buffer and ranges info.
557 *
558 * Returns 0 on success, negative errno on error.
559 */
add_usable_mem_property(void * fdt,struct device_node * dn,struct umem_info * um_info)560 static int add_usable_mem_property(void *fdt, struct device_node *dn,
561 struct umem_info *um_info)
562 {
563 int n_mem_addr_cells, n_mem_size_cells, node;
564 char path[NODE_PATH_LEN];
565 int i, len, ranges, ret;
566 const __be32 *prop;
567 u64 base, end;
568
569 of_node_get(dn);
570
571 if (snprintf(path, NODE_PATH_LEN, "%pOF", dn) > (NODE_PATH_LEN - 1)) {
572 pr_err("Buffer (%d) too small for memory node: %pOF\n",
573 NODE_PATH_LEN, dn);
574 return -EOVERFLOW;
575 }
576 pr_debug("Memory node path: %s\n", path);
577
578 /* Now that we know the path, find its offset in kdump kernel's fdt */
579 node = fdt_path_offset(fdt, path);
580 if (node < 0) {
581 pr_err("Malformed device tree: error reading %s\n", path);
582 ret = -EINVAL;
583 goto out;
584 }
585
586 /* Get the address & size cells */
587 n_mem_addr_cells = of_n_addr_cells(dn);
588 n_mem_size_cells = of_n_size_cells(dn);
589 pr_debug("address cells: %d, size cells: %d\n", n_mem_addr_cells,
590 n_mem_size_cells);
591
592 um_info->idx = 0;
593 if (!check_realloc_usable_mem(um_info, 2)) {
594 ret = -ENOMEM;
595 goto out;
596 }
597
598 prop = of_get_property(dn, "reg", &len);
599 if (!prop || len <= 0) {
600 ret = 0;
601 goto out;
602 }
603
604 /*
605 * "reg" property represents sequence of (addr,size) tuples
606 * each representing a memory range.
607 */
608 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
609
610 for (i = 0; i < ranges; i++) {
611 base = of_read_number(prop, n_mem_addr_cells);
612 prop += n_mem_addr_cells;
613 end = base + of_read_number(prop, n_mem_size_cells) - 1;
614 prop += n_mem_size_cells;
615
616 ret = add_usable_mem(um_info, base, end);
617 if (ret)
618 goto out;
619 }
620
621 /*
622 * No kdump kernel usable memory found in this memory node.
623 * Write (0,0) tuple in linux,usable-memory property for
624 * this region to be ignored.
625 */
626 if (um_info->idx == 0) {
627 um_info->buf[0] = 0;
628 um_info->buf[1] = 0;
629 um_info->idx = 2;
630 }
631
632 ret = fdt_setprop(fdt, node, "linux,usable-memory", um_info->buf,
633 (um_info->idx * sizeof(u64)));
634
635 out:
636 of_node_put(dn);
637 return ret;
638 }
639
640
641 /**
642 * update_usable_mem_fdt - Updates kdump kernel's fdt with linux,usable-memory
643 * and linux,drconf-usable-memory DT properties as
644 * appropriate to restrict its memory usage.
645 * @fdt: Flattened device tree for the kdump kernel.
646 * @usable_mem: Usable memory ranges for kdump kernel.
647 *
648 * Returns 0 on success, negative errno on error.
649 */
update_usable_mem_fdt(void * fdt,struct crash_mem * usable_mem)650 static int update_usable_mem_fdt(void *fdt, struct crash_mem *usable_mem)
651 {
652 struct umem_info um_info;
653 struct device_node *dn;
654 int node, ret = 0;
655
656 if (!usable_mem) {
657 pr_err("Usable memory ranges for kdump kernel not found\n");
658 return -ENOENT;
659 }
660
661 node = fdt_path_offset(fdt, "/ibm,dynamic-reconfiguration-memory");
662 if (node == -FDT_ERR_NOTFOUND)
663 pr_debug("No dynamic reconfiguration memory found\n");
664 else if (node < 0) {
665 pr_err("Malformed device tree: error reading /ibm,dynamic-reconfiguration-memory.\n");
666 return -EINVAL;
667 }
668
669 um_info.buf = NULL;
670 um_info.size = 0;
671 um_info.max_entries = 0;
672 um_info.idx = 0;
673 /* Memory ranges to look up */
674 um_info.ranges = &(usable_mem->ranges[0]);
675 um_info.nr_ranges = usable_mem->nr_ranges;
676
677 dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
678 if (dn) {
679 ret = walk_drmem_lmbs(dn, &um_info, kdump_setup_usable_lmb);
680 of_node_put(dn);
681
682 if (ret) {
683 pr_err("Could not setup linux,drconf-usable-memory property for kdump\n");
684 goto out;
685 }
686
687 ret = fdt_setprop(fdt, node, "linux,drconf-usable-memory",
688 um_info.buf, (um_info.idx * sizeof(u64)));
689 if (ret) {
690 pr_err("Failed to update fdt with linux,drconf-usable-memory property");
691 goto out;
692 }
693 }
694
695 /*
696 * Walk through each memory node and set linux,usable-memory property
697 * for the corresponding node in kdump kernel's fdt.
698 */
699 for_each_node_by_type(dn, "memory") {
700 ret = add_usable_mem_property(fdt, dn, &um_info);
701 if (ret) {
702 pr_err("Failed to set linux,usable-memory property for %s node",
703 dn->full_name);
704 of_node_put(dn);
705 goto out;
706 }
707 }
708
709 out:
710 kfree(um_info.buf);
711 return ret;
712 }
713
714 /**
715 * load_backup_segment - Locate a memory hole to place the backup region.
716 * @image: Kexec image.
717 * @kbuf: Buffer contents and memory parameters.
718 *
719 * Returns 0 on success, negative errno on error.
720 */
load_backup_segment(struct kimage * image,struct kexec_buf * kbuf)721 static int load_backup_segment(struct kimage *image, struct kexec_buf *kbuf)
722 {
723 void *buf;
724 int ret;
725
726 /*
727 * Setup a source buffer for backup segment.
728 *
729 * A source buffer has no meaning for backup region as data will
730 * be copied from backup source, after crash, in the purgatory.
731 * But as load segment code doesn't recognize such segments,
732 * setup a dummy source buffer to keep it happy for now.
733 */
734 buf = vzalloc(BACKUP_SRC_SIZE);
735 if (!buf)
736 return -ENOMEM;
737
738 kbuf->buffer = buf;
739 kbuf->mem = KEXEC_BUF_MEM_UNKNOWN;
740 kbuf->bufsz = kbuf->memsz = BACKUP_SRC_SIZE;
741 kbuf->top_down = false;
742
743 ret = kexec_add_buffer(kbuf);
744 if (ret) {
745 vfree(buf);
746 return ret;
747 }
748
749 image->arch.backup_buf = buf;
750 image->arch.backup_start = kbuf->mem;
751 return 0;
752 }
753
754 /**
755 * update_backup_region_phdr - Update backup region's offset for the core to
756 * export the region appropriately.
757 * @image: Kexec image.
758 * @ehdr: ELF core header.
759 *
760 * Assumes an exclusive program header is setup for the backup region
761 * in the ELF headers
762 *
763 * Returns nothing.
764 */
update_backup_region_phdr(struct kimage * image,Elf64_Ehdr * ehdr)765 static void update_backup_region_phdr(struct kimage *image, Elf64_Ehdr *ehdr)
766 {
767 Elf64_Phdr *phdr;
768 unsigned int i;
769
770 phdr = (Elf64_Phdr *)(ehdr + 1);
771 for (i = 0; i < ehdr->e_phnum; i++) {
772 if (phdr->p_paddr == BACKUP_SRC_START) {
773 phdr->p_offset = image->arch.backup_start;
774 pr_debug("Backup region offset updated to 0x%lx\n",
775 image->arch.backup_start);
776 return;
777 }
778 }
779 }
780
781 /**
782 * load_elfcorehdr_segment - Setup crash memory ranges and initialize elfcorehdr
783 * segment needed to load kdump kernel.
784 * @image: Kexec image.
785 * @kbuf: Buffer contents and memory parameters.
786 *
787 * Returns 0 on success, negative errno on error.
788 */
load_elfcorehdr_segment(struct kimage * image,struct kexec_buf * kbuf)789 static int load_elfcorehdr_segment(struct kimage *image, struct kexec_buf *kbuf)
790 {
791 struct crash_mem *cmem = NULL;
792 unsigned long headers_sz;
793 void *headers = NULL;
794 int ret;
795
796 ret = get_crash_memory_ranges(&cmem);
797 if (ret)
798 goto out;
799
800 /* Setup elfcorehdr segment */
801 ret = crash_prepare_elf64_headers(cmem, false, &headers, &headers_sz);
802 if (ret) {
803 pr_err("Failed to prepare elf headers for the core\n");
804 goto out;
805 }
806
807 /* Fix the offset for backup region in the ELF header */
808 update_backup_region_phdr(image, headers);
809
810 kbuf->buffer = headers;
811 kbuf->mem = KEXEC_BUF_MEM_UNKNOWN;
812 kbuf->bufsz = kbuf->memsz = headers_sz;
813 kbuf->top_down = false;
814
815 ret = kexec_add_buffer(kbuf);
816 if (ret) {
817 vfree(headers);
818 goto out;
819 }
820
821 image->elf_load_addr = kbuf->mem;
822 image->elf_headers_sz = headers_sz;
823 image->elf_headers = headers;
824 out:
825 kfree(cmem);
826 return ret;
827 }
828
829 /**
830 * load_crashdump_segments_ppc64 - Initialize the additional segements needed
831 * to load kdump kernel.
832 * @image: Kexec image.
833 * @kbuf: Buffer contents and memory parameters.
834 *
835 * Returns 0 on success, negative errno on error.
836 */
load_crashdump_segments_ppc64(struct kimage * image,struct kexec_buf * kbuf)837 int load_crashdump_segments_ppc64(struct kimage *image,
838 struct kexec_buf *kbuf)
839 {
840 int ret;
841
842 /* Load backup segment - first 64K bytes of the crashing kernel */
843 ret = load_backup_segment(image, kbuf);
844 if (ret) {
845 pr_err("Failed to load backup segment\n");
846 return ret;
847 }
848 pr_debug("Loaded the backup region at 0x%lx\n", kbuf->mem);
849
850 /* Load elfcorehdr segment - to export crashing kernel's vmcore */
851 ret = load_elfcorehdr_segment(image, kbuf);
852 if (ret) {
853 pr_err("Failed to load elfcorehdr segment\n");
854 return ret;
855 }
856 pr_debug("Loaded elf core header at 0x%lx, bufsz=0x%lx memsz=0x%lx\n",
857 image->elf_load_addr, kbuf->bufsz, kbuf->memsz);
858
859 return 0;
860 }
861
862 /**
863 * setup_purgatory_ppc64 - initialize PPC64 specific purgatory's global
864 * variables and call setup_purgatory() to initialize
865 * common global variable.
866 * @image: kexec image.
867 * @slave_code: Slave code for the purgatory.
868 * @fdt: Flattened device tree for the next kernel.
869 * @kernel_load_addr: Address where the kernel is loaded.
870 * @fdt_load_addr: Address where the flattened device tree is loaded.
871 *
872 * Returns 0 on success, negative errno on error.
873 */
setup_purgatory_ppc64(struct kimage * image,const void * slave_code,const void * fdt,unsigned long kernel_load_addr,unsigned long fdt_load_addr)874 int setup_purgatory_ppc64(struct kimage *image, const void *slave_code,
875 const void *fdt, unsigned long kernel_load_addr,
876 unsigned long fdt_load_addr)
877 {
878 struct device_node *dn = NULL;
879 int ret;
880
881 ret = setup_purgatory(image, slave_code, fdt, kernel_load_addr,
882 fdt_load_addr);
883 if (ret)
884 goto out;
885
886 if (image->type == KEXEC_TYPE_CRASH) {
887 u32 my_run_at_load = 1;
888
889 /*
890 * Tell relocatable kernel to run at load address
891 * via the word meant for that at 0x5c.
892 */
893 ret = kexec_purgatory_get_set_symbol(image, "run_at_load",
894 &my_run_at_load,
895 sizeof(my_run_at_load),
896 false);
897 if (ret)
898 goto out;
899 }
900
901 /* Tell purgatory where to look for backup region */
902 ret = kexec_purgatory_get_set_symbol(image, "backup_start",
903 &image->arch.backup_start,
904 sizeof(image->arch.backup_start),
905 false);
906 if (ret)
907 goto out;
908
909 /* Setup OPAL base & entry values */
910 dn = of_find_node_by_path("/ibm,opal");
911 if (dn) {
912 u64 val;
913
914 of_property_read_u64(dn, "opal-base-address", &val);
915 ret = kexec_purgatory_get_set_symbol(image, "opal_base", &val,
916 sizeof(val), false);
917 if (ret)
918 goto out;
919
920 of_property_read_u64(dn, "opal-entry-address", &val);
921 ret = kexec_purgatory_get_set_symbol(image, "opal_entry", &val,
922 sizeof(val), false);
923 }
924 out:
925 if (ret)
926 pr_err("Failed to setup purgatory symbols");
927 of_node_put(dn);
928 return ret;
929 }
930
931 /**
932 * kexec_extra_fdt_size_ppc64 - Return the estimated additional size needed to
933 * setup FDT for kexec/kdump kernel.
934 * @image: kexec image being loaded.
935 *
936 * Returns the estimated extra size needed for kexec/kdump kernel FDT.
937 */
kexec_extra_fdt_size_ppc64(struct kimage * image)938 unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image)
939 {
940 u64 usm_entries;
941
942 if (image->type != KEXEC_TYPE_CRASH)
943 return 0;
944
945 /*
946 * For kdump kernel, account for linux,usable-memory and
947 * linux,drconf-usable-memory properties. Get an approximate on the
948 * number of usable memory entries and use for FDT size estimation.
949 */
950 usm_entries = ((memblock_end_of_DRAM() / drmem_lmb_size()) +
951 (2 * (resource_size(&crashk_res) / drmem_lmb_size())));
952 return (unsigned int)(usm_entries * sizeof(u64));
953 }
954
955 /**
956 * add_node_props - Reads node properties from device node structure and add
957 * them to fdt.
958 * @fdt: Flattened device tree of the kernel
959 * @node_offset: offset of the node to add a property at
960 * @dn: device node pointer
961 *
962 * Returns 0 on success, negative errno on error.
963 */
add_node_props(void * fdt,int node_offset,const struct device_node * dn)964 static int add_node_props(void *fdt, int node_offset, const struct device_node *dn)
965 {
966 int ret = 0;
967 struct property *pp;
968
969 if (!dn)
970 return -EINVAL;
971
972 for_each_property_of_node(dn, pp) {
973 ret = fdt_setprop(fdt, node_offset, pp->name, pp->value, pp->length);
974 if (ret < 0) {
975 pr_err("Unable to add %s property: %s\n", pp->name, fdt_strerror(ret));
976 return ret;
977 }
978 }
979 return ret;
980 }
981
982 /**
983 * update_cpus_node - Update cpus node of flattened device tree using of_root
984 * device node.
985 * @fdt: Flattened device tree of the kernel.
986 *
987 * Returns 0 on success, negative errno on error.
988 */
update_cpus_node(void * fdt)989 static int update_cpus_node(void *fdt)
990 {
991 struct device_node *cpus_node, *dn;
992 int cpus_offset, cpus_subnode_offset, ret = 0;
993
994 cpus_offset = fdt_path_offset(fdt, "/cpus");
995 if (cpus_offset < 0 && cpus_offset != -FDT_ERR_NOTFOUND) {
996 pr_err("Malformed device tree: error reading /cpus node: %s\n",
997 fdt_strerror(cpus_offset));
998 return cpus_offset;
999 }
1000
1001 if (cpus_offset > 0) {
1002 ret = fdt_del_node(fdt, cpus_offset);
1003 if (ret < 0) {
1004 pr_err("Error deleting /cpus node: %s\n", fdt_strerror(ret));
1005 return -EINVAL;
1006 }
1007 }
1008
1009 /* Add cpus node to fdt */
1010 cpus_offset = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"), "cpus");
1011 if (cpus_offset < 0) {
1012 pr_err("Error creating /cpus node: %s\n", fdt_strerror(cpus_offset));
1013 return -EINVAL;
1014 }
1015
1016 /* Add cpus node properties */
1017 cpus_node = of_find_node_by_path("/cpus");
1018 ret = add_node_props(fdt, cpus_offset, cpus_node);
1019 of_node_put(cpus_node);
1020 if (ret < 0)
1021 return ret;
1022
1023 /* Loop through all subnodes of cpus and add them to fdt */
1024 for_each_node_by_type(dn, "cpu") {
1025 cpus_subnode_offset = fdt_add_subnode(fdt, cpus_offset, dn->full_name);
1026 if (cpus_subnode_offset < 0) {
1027 pr_err("Unable to add %s subnode: %s\n", dn->full_name,
1028 fdt_strerror(cpus_subnode_offset));
1029 ret = cpus_subnode_offset;
1030 goto out;
1031 }
1032
1033 ret = add_node_props(fdt, cpus_subnode_offset, dn);
1034 if (ret < 0)
1035 goto out;
1036 }
1037 out:
1038 of_node_put(dn);
1039 return ret;
1040 }
1041
copy_property(void * fdt,int node_offset,const struct device_node * dn,const char * propname)1042 static int copy_property(void *fdt, int node_offset, const struct device_node *dn,
1043 const char *propname)
1044 {
1045 const void *prop, *fdtprop;
1046 int len = 0, fdtlen = 0;
1047
1048 prop = of_get_property(dn, propname, &len);
1049 fdtprop = fdt_getprop(fdt, node_offset, propname, &fdtlen);
1050
1051 if (fdtprop && !prop)
1052 return fdt_delprop(fdt, node_offset, propname);
1053 else if (prop)
1054 return fdt_setprop(fdt, node_offset, propname, prop, len);
1055 else
1056 return -FDT_ERR_NOTFOUND;
1057 }
1058
update_pci_dma_nodes(void * fdt,const char * dmapropname)1059 static int update_pci_dma_nodes(void *fdt, const char *dmapropname)
1060 {
1061 struct device_node *dn;
1062 int pci_offset, root_offset, ret = 0;
1063
1064 if (!firmware_has_feature(FW_FEATURE_LPAR))
1065 return 0;
1066
1067 root_offset = fdt_path_offset(fdt, "/");
1068 for_each_node_with_property(dn, dmapropname) {
1069 pci_offset = fdt_subnode_offset(fdt, root_offset, of_node_full_name(dn));
1070 if (pci_offset < 0)
1071 continue;
1072
1073 ret = copy_property(fdt, pci_offset, dn, "ibm,dma-window");
1074 if (ret < 0)
1075 break;
1076 ret = copy_property(fdt, pci_offset, dn, dmapropname);
1077 if (ret < 0)
1078 break;
1079 }
1080
1081 return ret;
1082 }
1083
1084 /**
1085 * setup_new_fdt_ppc64 - Update the flattend device-tree of the kernel
1086 * being loaded.
1087 * @image: kexec image being loaded.
1088 * @fdt: Flattened device tree for the next kernel.
1089 * @initrd_load_addr: Address where the next initrd will be loaded.
1090 * @initrd_len: Size of the next initrd, or 0 if there will be none.
1091 * @cmdline: Command line for the next kernel, or NULL if there will
1092 * be none.
1093 *
1094 * Returns 0 on success, negative errno on error.
1095 */
setup_new_fdt_ppc64(const struct kimage * image,void * fdt,unsigned long initrd_load_addr,unsigned long initrd_len,const char * cmdline)1096 int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
1097 unsigned long initrd_load_addr,
1098 unsigned long initrd_len, const char *cmdline)
1099 {
1100 struct crash_mem *umem = NULL, *rmem = NULL;
1101 int i, nr_ranges, ret;
1102
1103 /*
1104 * Restrict memory usage for kdump kernel by setting up
1105 * usable memory ranges and memory reserve map.
1106 */
1107 if (image->type == KEXEC_TYPE_CRASH) {
1108 ret = get_usable_memory_ranges(&umem);
1109 if (ret)
1110 goto out;
1111
1112 ret = update_usable_mem_fdt(fdt, umem);
1113 if (ret) {
1114 pr_err("Error setting up usable-memory property for kdump kernel\n");
1115 goto out;
1116 }
1117
1118 /*
1119 * Ensure we don't touch crashed kernel's memory except the
1120 * first 64K of RAM, which will be backed up.
1121 */
1122 ret = fdt_add_mem_rsv(fdt, BACKUP_SRC_END + 1,
1123 crashk_res.start - BACKUP_SRC_SIZE);
1124 if (ret) {
1125 pr_err("Error reserving crash memory: %s\n",
1126 fdt_strerror(ret));
1127 goto out;
1128 }
1129
1130 /* Ensure backup region is not used by kdump/capture kernel */
1131 ret = fdt_add_mem_rsv(fdt, image->arch.backup_start,
1132 BACKUP_SRC_SIZE);
1133 if (ret) {
1134 pr_err("Error reserving memory for backup: %s\n",
1135 fdt_strerror(ret));
1136 goto out;
1137 }
1138 }
1139
1140 /* Update cpus nodes information to account hotplug CPUs. */
1141 ret = update_cpus_node(fdt);
1142 if (ret < 0)
1143 goto out;
1144
1145 #define DIRECT64_PROPNAME "linux,direct64-ddr-window-info"
1146 #define DMA64_PROPNAME "linux,dma64-ddr-window-info"
1147 ret = update_pci_dma_nodes(fdt, DIRECT64_PROPNAME);
1148 if (ret < 0)
1149 goto out;
1150
1151 ret = update_pci_dma_nodes(fdt, DMA64_PROPNAME);
1152 if (ret < 0)
1153 goto out;
1154 #undef DMA64_PROPNAME
1155 #undef DIRECT64_PROPNAME
1156
1157 /* Update memory reserve map */
1158 ret = get_reserved_memory_ranges(&rmem);
1159 if (ret)
1160 goto out;
1161
1162 nr_ranges = rmem ? rmem->nr_ranges : 0;
1163 for (i = 0; i < nr_ranges; i++) {
1164 u64 base, size;
1165
1166 base = rmem->ranges[i].start;
1167 size = rmem->ranges[i].end - base + 1;
1168 ret = fdt_add_mem_rsv(fdt, base, size);
1169 if (ret) {
1170 pr_err("Error updating memory reserve map: %s\n",
1171 fdt_strerror(ret));
1172 goto out;
1173 }
1174 }
1175
1176 out:
1177 kfree(rmem);
1178 kfree(umem);
1179 return ret;
1180 }
1181
1182 /**
1183 * arch_kexec_locate_mem_hole - Skip special memory regions like rtas, opal,
1184 * tce-table, reserved-ranges & such (exclude
1185 * memory ranges) as they can't be used for kexec
1186 * segment buffer. Sets kbuf->mem when a suitable
1187 * memory hole is found.
1188 * @kbuf: Buffer contents and memory parameters.
1189 *
1190 * Assumes minimum of PAGE_SIZE alignment for kbuf->memsz & kbuf->buf_align.
1191 *
1192 * Returns 0 on success, negative errno on error.
1193 */
arch_kexec_locate_mem_hole(struct kexec_buf * kbuf)1194 int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf)
1195 {
1196 struct crash_mem **emem;
1197 u64 buf_min, buf_max;
1198 int ret;
1199
1200 /* Look up the exclude ranges list while locating the memory hole */
1201 emem = &(kbuf->image->arch.exclude_ranges);
1202 if (!(*emem) || ((*emem)->nr_ranges == 0)) {
1203 pr_warn("No exclude range list. Using the default locate mem hole method\n");
1204 return kexec_locate_mem_hole(kbuf);
1205 }
1206
1207 buf_min = kbuf->buf_min;
1208 buf_max = kbuf->buf_max;
1209 /* Segments for kdump kernel should be within crashkernel region */
1210 if (kbuf->image->type == KEXEC_TYPE_CRASH) {
1211 buf_min = (buf_min < crashk_res.start ?
1212 crashk_res.start : buf_min);
1213 buf_max = (buf_max > crashk_res.end ?
1214 crashk_res.end : buf_max);
1215 }
1216
1217 if (buf_min > buf_max) {
1218 pr_err("Invalid buffer min and/or max values\n");
1219 return -EINVAL;
1220 }
1221
1222 if (kbuf->top_down)
1223 ret = locate_mem_hole_top_down_ppc64(kbuf, buf_min, buf_max,
1224 *emem);
1225 else
1226 ret = locate_mem_hole_bottom_up_ppc64(kbuf, buf_min, buf_max,
1227 *emem);
1228
1229 /* Add the buffer allocated to the exclude list for the next lookup */
1230 if (!ret) {
1231 add_mem_range(emem, kbuf->mem, kbuf->memsz);
1232 sort_memory_ranges(*emem, true);
1233 } else {
1234 pr_err("Failed to locate memory buffer of size %lu\n",
1235 kbuf->memsz);
1236 }
1237 return ret;
1238 }
1239
1240 /**
1241 * arch_kexec_kernel_image_probe - Does additional handling needed to setup
1242 * kexec segments.
1243 * @image: kexec image being loaded.
1244 * @buf: Buffer pointing to elf data.
1245 * @buf_len: Length of the buffer.
1246 *
1247 * Returns 0 on success, negative errno on error.
1248 */
arch_kexec_kernel_image_probe(struct kimage * image,void * buf,unsigned long buf_len)1249 int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
1250 unsigned long buf_len)
1251 {
1252 int ret;
1253
1254 /* Get exclude memory ranges needed for setting up kexec segments */
1255 ret = get_exclude_memory_ranges(&(image->arch.exclude_ranges));
1256 if (ret) {
1257 pr_err("Failed to setup exclude memory ranges for buffer lookup\n");
1258 return ret;
1259 }
1260
1261 return kexec_image_probe_default(image, buf, buf_len);
1262 }
1263
1264 /**
1265 * arch_kimage_file_post_load_cleanup - Frees up all the allocations done
1266 * while loading the image.
1267 * @image: kexec image being loaded.
1268 *
1269 * Returns 0 on success, negative errno on error.
1270 */
arch_kimage_file_post_load_cleanup(struct kimage * image)1271 int arch_kimage_file_post_load_cleanup(struct kimage *image)
1272 {
1273 kfree(image->arch.exclude_ranges);
1274 image->arch.exclude_ranges = NULL;
1275
1276 vfree(image->arch.backup_buf);
1277 image->arch.backup_buf = NULL;
1278
1279 vfree(image->elf_headers);
1280 image->elf_headers = NULL;
1281 image->elf_headers_sz = 0;
1282
1283 kvfree(image->arch.fdt);
1284 image->arch.fdt = NULL;
1285
1286 return kexec_image_post_load_cleanup_default(image);
1287 }
1288