1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * fs/proc/vmcore.c Interface for accessing the crash
4 * dump from the system's previous life.
5 * Heavily borrowed from fs/proc/kcore.c
6 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
7 * Copyright (C) IBM Corporation, 2004. All rights reserved
8 *
9 */
10
11 #include <linux/mm.h>
12 #include <linux/kcore.h>
13 #include <linux/user.h>
14 #include <linux/elf.h>
15 #include <linux/elfcore.h>
16 #include <linux/export.h>
17 #include <linux/slab.h>
18 #include <linux/highmem.h>
19 #include <linux/printk.h>
20 #include <linux/memblock.h>
21 #include <linux/init.h>
22 #include <linux/crash_dump.h>
23 #include <linux/list.h>
24 #include <linux/moduleparam.h>
25 #include <linux/mutex.h>
26 #include <linux/vmalloc.h>
27 #include <linux/pagemap.h>
28 #include <linux/uaccess.h>
29 #include <linux/uio.h>
30 #include <linux/cc_platform.h>
31 #include <asm/io.h>
32 #include "internal.h"
33
34 /* List representing chunks of contiguous memory areas and their offsets in
35 * vmcore file.
36 */
37 static LIST_HEAD(vmcore_list);
38
39 /* Stores the pointer to the buffer containing kernel elf core headers. */
40 static char *elfcorebuf;
41 static size_t elfcorebuf_sz;
42 static size_t elfcorebuf_sz_orig;
43
44 static char *elfnotes_buf;
45 static size_t elfnotes_sz;
46 /* Size of all notes minus the device dump notes */
47 static size_t elfnotes_orig_sz;
48
49 /* Total size of vmcore file. */
50 static u64 vmcore_size;
51
52 static struct proc_dir_entry *proc_vmcore;
53
54 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
55 /* Device Dump list and mutex to synchronize access to list */
56 static LIST_HEAD(vmcoredd_list);
57 static DEFINE_MUTEX(vmcoredd_mutex);
58
59 static bool vmcoredd_disabled;
60 core_param(novmcoredd, vmcoredd_disabled, bool, 0);
61 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
62
63 /* Device Dump Size */
64 static size_t vmcoredd_orig_sz;
65
66 static DEFINE_SPINLOCK(vmcore_cb_lock);
67 DEFINE_STATIC_SRCU(vmcore_cb_srcu);
68 /* List of registered vmcore callbacks. */
69 static LIST_HEAD(vmcore_cb_list);
70 /* Whether the vmcore has been opened once. */
71 static bool vmcore_opened;
72
register_vmcore_cb(struct vmcore_cb * cb)73 void register_vmcore_cb(struct vmcore_cb *cb)
74 {
75 INIT_LIST_HEAD(&cb->next);
76 spin_lock(&vmcore_cb_lock);
77 list_add_tail(&cb->next, &vmcore_cb_list);
78 /*
79 * Registering a vmcore callback after the vmcore was opened is
80 * very unusual (e.g., manual driver loading).
81 */
82 if (vmcore_opened)
83 pr_warn_once("Unexpected vmcore callback registration\n");
84 spin_unlock(&vmcore_cb_lock);
85 }
86 EXPORT_SYMBOL_GPL(register_vmcore_cb);
87
unregister_vmcore_cb(struct vmcore_cb * cb)88 void unregister_vmcore_cb(struct vmcore_cb *cb)
89 {
90 spin_lock(&vmcore_cb_lock);
91 list_del_rcu(&cb->next);
92 /*
93 * Unregistering a vmcore callback after the vmcore was opened is
94 * very unusual (e.g., forced driver removal), but we cannot stop
95 * unregistering.
96 */
97 if (vmcore_opened)
98 pr_warn_once("Unexpected vmcore callback unregistration\n");
99 spin_unlock(&vmcore_cb_lock);
100
101 synchronize_srcu(&vmcore_cb_srcu);
102 }
103 EXPORT_SYMBOL_GPL(unregister_vmcore_cb);
104
pfn_is_ram(unsigned long pfn)105 static bool pfn_is_ram(unsigned long pfn)
106 {
107 struct vmcore_cb *cb;
108 bool ret = true;
109
110 list_for_each_entry_srcu(cb, &vmcore_cb_list, next,
111 srcu_read_lock_held(&vmcore_cb_srcu)) {
112 if (unlikely(!cb->pfn_is_ram))
113 continue;
114 ret = cb->pfn_is_ram(cb, pfn);
115 if (!ret)
116 break;
117 }
118
119 return ret;
120 }
121
open_vmcore(struct inode * inode,struct file * file)122 static int open_vmcore(struct inode *inode, struct file *file)
123 {
124 spin_lock(&vmcore_cb_lock);
125 vmcore_opened = true;
126 spin_unlock(&vmcore_cb_lock);
127
128 return 0;
129 }
130
131 /* Reads a page from the oldmem device from given offset. */
read_from_oldmem(struct iov_iter * iter,size_t count,u64 * ppos,bool encrypted)132 ssize_t read_from_oldmem(struct iov_iter *iter, size_t count,
133 u64 *ppos, bool encrypted)
134 {
135 unsigned long pfn, offset;
136 size_t nr_bytes;
137 ssize_t read = 0, tmp;
138 int idx;
139
140 if (!count)
141 return 0;
142
143 offset = (unsigned long)(*ppos % PAGE_SIZE);
144 pfn = (unsigned long)(*ppos / PAGE_SIZE);
145
146 idx = srcu_read_lock(&vmcore_cb_srcu);
147 do {
148 if (count > (PAGE_SIZE - offset))
149 nr_bytes = PAGE_SIZE - offset;
150 else
151 nr_bytes = count;
152
153 /* If pfn is not ram, return zeros for sparse dump files */
154 if (!pfn_is_ram(pfn)) {
155 tmp = iov_iter_zero(nr_bytes, iter);
156 } else {
157 if (encrypted)
158 tmp = copy_oldmem_page_encrypted(iter, pfn,
159 nr_bytes,
160 offset);
161 else
162 tmp = copy_oldmem_page(iter, pfn, nr_bytes,
163 offset);
164 }
165 if (tmp < nr_bytes) {
166 srcu_read_unlock(&vmcore_cb_srcu, idx);
167 return -EFAULT;
168 }
169
170 *ppos += nr_bytes;
171 count -= nr_bytes;
172 read += nr_bytes;
173 ++pfn;
174 offset = 0;
175 } while (count);
176 srcu_read_unlock(&vmcore_cb_srcu, idx);
177
178 return read;
179 }
180
181 /*
182 * Architectures may override this function to allocate ELF header in 2nd kernel
183 */
elfcorehdr_alloc(unsigned long long * addr,unsigned long long * size)184 int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
185 {
186 return 0;
187 }
188
189 /*
190 * Architectures may override this function to free header
191 */
elfcorehdr_free(unsigned long long addr)192 void __weak elfcorehdr_free(unsigned long long addr)
193 {}
194
195 /*
196 * Architectures may override this function to read from ELF header
197 */
elfcorehdr_read(char * buf,size_t count,u64 * ppos)198 ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
199 {
200 struct kvec kvec = { .iov_base = buf, .iov_len = count };
201 struct iov_iter iter;
202
203 iov_iter_kvec(&iter, READ, &kvec, 1, count);
204
205 return read_from_oldmem(&iter, count, ppos, false);
206 }
207
208 /*
209 * Architectures may override this function to read from notes sections
210 */
elfcorehdr_read_notes(char * buf,size_t count,u64 * ppos)211 ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
212 {
213 struct kvec kvec = { .iov_base = buf, .iov_len = count };
214 struct iov_iter iter;
215
216 iov_iter_kvec(&iter, READ, &kvec, 1, count);
217
218 return read_from_oldmem(&iter, count, ppos,
219 cc_platform_has(CC_ATTR_MEM_ENCRYPT));
220 }
221
222 /*
223 * Architectures may override this function to map oldmem
224 */
remap_oldmem_pfn_range(struct vm_area_struct * vma,unsigned long from,unsigned long pfn,unsigned long size,pgprot_t prot)225 int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
226 unsigned long from, unsigned long pfn,
227 unsigned long size, pgprot_t prot)
228 {
229 prot = pgprot_encrypted(prot);
230 return remap_pfn_range(vma, from, pfn, size, prot);
231 }
232
233 /*
234 * Architectures which support memory encryption override this.
235 */
copy_oldmem_page_encrypted(struct iov_iter * iter,unsigned long pfn,size_t csize,unsigned long offset)236 ssize_t __weak copy_oldmem_page_encrypted(struct iov_iter *iter,
237 unsigned long pfn, size_t csize, unsigned long offset)
238 {
239 return copy_oldmem_page(iter, pfn, csize, offset);
240 }
241
242 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
vmcoredd_copy_dumps(struct iov_iter * iter,u64 start,size_t size)243 static int vmcoredd_copy_dumps(struct iov_iter *iter, u64 start, size_t size)
244 {
245 struct vmcoredd_node *dump;
246 u64 offset = 0;
247 int ret = 0;
248 size_t tsz;
249 char *buf;
250
251 mutex_lock(&vmcoredd_mutex);
252 list_for_each_entry(dump, &vmcoredd_list, list) {
253 if (start < offset + dump->size) {
254 tsz = min(offset + (u64)dump->size - start, (u64)size);
255 buf = dump->buf + start - offset;
256 if (copy_to_iter(buf, tsz, iter) < tsz) {
257 ret = -EFAULT;
258 goto out_unlock;
259 }
260
261 size -= tsz;
262 start += tsz;
263
264 /* Leave now if buffer filled already */
265 if (!size)
266 goto out_unlock;
267 }
268 offset += dump->size;
269 }
270
271 out_unlock:
272 mutex_unlock(&vmcoredd_mutex);
273 return ret;
274 }
275
276 #ifdef CONFIG_MMU
vmcoredd_mmap_dumps(struct vm_area_struct * vma,unsigned long dst,u64 start,size_t size)277 static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
278 u64 start, size_t size)
279 {
280 struct vmcoredd_node *dump;
281 u64 offset = 0;
282 int ret = 0;
283 size_t tsz;
284 char *buf;
285
286 mutex_lock(&vmcoredd_mutex);
287 list_for_each_entry(dump, &vmcoredd_list, list) {
288 if (start < offset + dump->size) {
289 tsz = min(offset + (u64)dump->size - start, (u64)size);
290 buf = dump->buf + start - offset;
291 if (remap_vmalloc_range_partial(vma, dst, buf, 0,
292 tsz)) {
293 ret = -EFAULT;
294 goto out_unlock;
295 }
296
297 size -= tsz;
298 start += tsz;
299 dst += tsz;
300
301 /* Leave now if buffer filled already */
302 if (!size)
303 goto out_unlock;
304 }
305 offset += dump->size;
306 }
307
308 out_unlock:
309 mutex_unlock(&vmcoredd_mutex);
310 return ret;
311 }
312 #endif /* CONFIG_MMU */
313 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
314
315 /* Read from the ELF header and then the crash dump. On error, negative value is
316 * returned otherwise number of bytes read are returned.
317 */
__read_vmcore(struct iov_iter * iter,loff_t * fpos)318 static ssize_t __read_vmcore(struct iov_iter *iter, loff_t *fpos)
319 {
320 ssize_t acc = 0, tmp;
321 size_t tsz;
322 u64 start;
323 struct vmcore *m = NULL;
324
325 if (!iov_iter_count(iter) || *fpos >= vmcore_size)
326 return 0;
327
328 iov_iter_truncate(iter, vmcore_size - *fpos);
329
330 /* Read ELF core header */
331 if (*fpos < elfcorebuf_sz) {
332 tsz = min(elfcorebuf_sz - (size_t)*fpos, iov_iter_count(iter));
333 if (copy_to_iter(elfcorebuf + *fpos, tsz, iter) < tsz)
334 return -EFAULT;
335 *fpos += tsz;
336 acc += tsz;
337
338 /* leave now if filled buffer already */
339 if (!iov_iter_count(iter))
340 return acc;
341 }
342
343 /* Read Elf note segment */
344 if (*fpos < elfcorebuf_sz + elfnotes_sz) {
345 void *kaddr;
346
347 /* We add device dumps before other elf notes because the
348 * other elf notes may not fill the elf notes buffer
349 * completely and we will end up with zero-filled data
350 * between the elf notes and the device dumps. Tools will
351 * then try to decode this zero-filled data as valid notes
352 * and we don't want that. Hence, adding device dumps before
353 * the other elf notes ensure that zero-filled data can be
354 * avoided.
355 */
356 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
357 /* Read device dumps */
358 if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) {
359 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
360 (size_t)*fpos, iov_iter_count(iter));
361 start = *fpos - elfcorebuf_sz;
362 if (vmcoredd_copy_dumps(iter, start, tsz))
363 return -EFAULT;
364
365 *fpos += tsz;
366 acc += tsz;
367
368 /* leave now if filled buffer already */
369 if (!iov_iter_count(iter))
370 return acc;
371 }
372 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
373
374 /* Read remaining elf notes */
375 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos,
376 iov_iter_count(iter));
377 kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz;
378 if (copy_to_iter(kaddr, tsz, iter) < tsz)
379 return -EFAULT;
380
381 *fpos += tsz;
382 acc += tsz;
383
384 /* leave now if filled buffer already */
385 if (!iov_iter_count(iter))
386 return acc;
387 }
388
389 list_for_each_entry(m, &vmcore_list, list) {
390 if (*fpos < m->offset + m->size) {
391 tsz = (size_t)min_t(unsigned long long,
392 m->offset + m->size - *fpos,
393 iov_iter_count(iter));
394 start = m->paddr + *fpos - m->offset;
395 tmp = read_from_oldmem(iter, tsz, &start,
396 cc_platform_has(CC_ATTR_MEM_ENCRYPT));
397 if (tmp < 0)
398 return tmp;
399 *fpos += tsz;
400 acc += tsz;
401
402 /* leave now if filled buffer already */
403 if (!iov_iter_count(iter))
404 return acc;
405 }
406 }
407
408 return acc;
409 }
410
read_vmcore(struct kiocb * iocb,struct iov_iter * iter)411 static ssize_t read_vmcore(struct kiocb *iocb, struct iov_iter *iter)
412 {
413 return __read_vmcore(iter, &iocb->ki_pos);
414 }
415
416 /*
417 * The vmcore fault handler uses the page cache and fills data using the
418 * standard __read_vmcore() function.
419 *
420 * On s390 the fault handler is used for memory regions that can't be mapped
421 * directly with remap_pfn_range().
422 */
mmap_vmcore_fault(struct vm_fault * vmf)423 static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
424 {
425 #ifdef CONFIG_S390
426 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
427 pgoff_t index = vmf->pgoff;
428 struct iov_iter iter;
429 struct kvec kvec;
430 struct page *page;
431 loff_t offset;
432 int rc;
433
434 page = find_or_create_page(mapping, index, GFP_KERNEL);
435 if (!page)
436 return VM_FAULT_OOM;
437 if (!PageUptodate(page)) {
438 offset = (loff_t) index << PAGE_SHIFT;
439 kvec.iov_base = page_address(page);
440 kvec.iov_len = PAGE_SIZE;
441 iov_iter_kvec(&iter, READ, &kvec, 1, PAGE_SIZE);
442
443 rc = __read_vmcore(&iter, &offset);
444 if (rc < 0) {
445 unlock_page(page);
446 put_page(page);
447 return vmf_error(rc);
448 }
449 SetPageUptodate(page);
450 }
451 unlock_page(page);
452 vmf->page = page;
453 return 0;
454 #else
455 return VM_FAULT_SIGBUS;
456 #endif
457 }
458
459 static const struct vm_operations_struct vmcore_mmap_ops = {
460 .fault = mmap_vmcore_fault,
461 };
462
463 /**
464 * vmcore_alloc_buf - allocate buffer in vmalloc memory
465 * @size: size of buffer
466 *
467 * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
468 * the buffer to user-space by means of remap_vmalloc_range().
469 *
470 * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
471 * disabled and there's no need to allow users to mmap the buffer.
472 */
vmcore_alloc_buf(size_t size)473 static inline char *vmcore_alloc_buf(size_t size)
474 {
475 #ifdef CONFIG_MMU
476 return vmalloc_user(size);
477 #else
478 return vzalloc(size);
479 #endif
480 }
481
482 /*
483 * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
484 * essential for mmap_vmcore() in order to map physically
485 * non-contiguous objects (ELF header, ELF note segment and memory
486 * regions in the 1st kernel pointed to by PT_LOAD entries) into
487 * virtually contiguous user-space in ELF layout.
488 */
489 #ifdef CONFIG_MMU
490 /*
491 * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
492 * reported as not being ram with the zero page.
493 *
494 * @vma: vm_area_struct describing requested mapping
495 * @from: start remapping from
496 * @pfn: page frame number to start remapping to
497 * @size: remapping size
498 * @prot: protection bits
499 *
500 * Returns zero on success, -EAGAIN on failure.
501 */
remap_oldmem_pfn_checked(struct vm_area_struct * vma,unsigned long from,unsigned long pfn,unsigned long size,pgprot_t prot)502 static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
503 unsigned long from, unsigned long pfn,
504 unsigned long size, pgprot_t prot)
505 {
506 unsigned long map_size;
507 unsigned long pos_start, pos_end, pos;
508 unsigned long zeropage_pfn = my_zero_pfn(0);
509 size_t len = 0;
510
511 pos_start = pfn;
512 pos_end = pfn + (size >> PAGE_SHIFT);
513
514 for (pos = pos_start; pos < pos_end; ++pos) {
515 if (!pfn_is_ram(pos)) {
516 /*
517 * We hit a page which is not ram. Remap the continuous
518 * region between pos_start and pos-1 and replace
519 * the non-ram page at pos with the zero page.
520 */
521 if (pos > pos_start) {
522 /* Remap continuous region */
523 map_size = (pos - pos_start) << PAGE_SHIFT;
524 if (remap_oldmem_pfn_range(vma, from + len,
525 pos_start, map_size,
526 prot))
527 goto fail;
528 len += map_size;
529 }
530 /* Remap the zero page */
531 if (remap_oldmem_pfn_range(vma, from + len,
532 zeropage_pfn,
533 PAGE_SIZE, prot))
534 goto fail;
535 len += PAGE_SIZE;
536 pos_start = pos + 1;
537 }
538 }
539 if (pos > pos_start) {
540 /* Remap the rest */
541 map_size = (pos - pos_start) << PAGE_SHIFT;
542 if (remap_oldmem_pfn_range(vma, from + len, pos_start,
543 map_size, prot))
544 goto fail;
545 }
546 return 0;
547 fail:
548 do_munmap(vma->vm_mm, from, len, NULL);
549 return -EAGAIN;
550 }
551
vmcore_remap_oldmem_pfn(struct vm_area_struct * vma,unsigned long from,unsigned long pfn,unsigned long size,pgprot_t prot)552 static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
553 unsigned long from, unsigned long pfn,
554 unsigned long size, pgprot_t prot)
555 {
556 int ret, idx;
557
558 /*
559 * Check if a callback was registered to avoid looping over all
560 * pages without a reason.
561 */
562 idx = srcu_read_lock(&vmcore_cb_srcu);
563 if (!list_empty(&vmcore_cb_list))
564 ret = remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
565 else
566 ret = remap_oldmem_pfn_range(vma, from, pfn, size, prot);
567 srcu_read_unlock(&vmcore_cb_srcu, idx);
568 return ret;
569 }
570
mmap_vmcore(struct file * file,struct vm_area_struct * vma)571 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
572 {
573 size_t size = vma->vm_end - vma->vm_start;
574 u64 start, end, len, tsz;
575 struct vmcore *m;
576
577 start = (u64)vma->vm_pgoff << PAGE_SHIFT;
578 end = start + size;
579
580 if (size > vmcore_size || end > vmcore_size)
581 return -EINVAL;
582
583 if (vma->vm_flags & (VM_WRITE | VM_EXEC))
584 return -EPERM;
585
586 vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
587 vma->vm_flags |= VM_MIXEDMAP;
588 vma->vm_ops = &vmcore_mmap_ops;
589
590 len = 0;
591
592 if (start < elfcorebuf_sz) {
593 u64 pfn;
594
595 tsz = min(elfcorebuf_sz - (size_t)start, size);
596 pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
597 if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
598 vma->vm_page_prot))
599 return -EAGAIN;
600 size -= tsz;
601 start += tsz;
602 len += tsz;
603
604 if (size == 0)
605 return 0;
606 }
607
608 if (start < elfcorebuf_sz + elfnotes_sz) {
609 void *kaddr;
610
611 /* We add device dumps before other elf notes because the
612 * other elf notes may not fill the elf notes buffer
613 * completely and we will end up with zero-filled data
614 * between the elf notes and the device dumps. Tools will
615 * then try to decode this zero-filled data as valid notes
616 * and we don't want that. Hence, adding device dumps before
617 * the other elf notes ensure that zero-filled data can be
618 * avoided. This also ensures that the device dumps and
619 * other elf notes can be properly mmaped at page aligned
620 * address.
621 */
622 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
623 /* Read device dumps */
624 if (start < elfcorebuf_sz + vmcoredd_orig_sz) {
625 u64 start_off;
626
627 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
628 (size_t)start, size);
629 start_off = start - elfcorebuf_sz;
630 if (vmcoredd_mmap_dumps(vma, vma->vm_start + len,
631 start_off, tsz))
632 goto fail;
633
634 size -= tsz;
635 start += tsz;
636 len += tsz;
637
638 /* leave now if filled buffer already */
639 if (!size)
640 return 0;
641 }
642 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
643
644 /* Read remaining elf notes */
645 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
646 kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
647 if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
648 kaddr, 0, tsz))
649 goto fail;
650
651 size -= tsz;
652 start += tsz;
653 len += tsz;
654
655 if (size == 0)
656 return 0;
657 }
658
659 list_for_each_entry(m, &vmcore_list, list) {
660 if (start < m->offset + m->size) {
661 u64 paddr = 0;
662
663 tsz = (size_t)min_t(unsigned long long,
664 m->offset + m->size - start, size);
665 paddr = m->paddr + start - m->offset;
666 if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
667 paddr >> PAGE_SHIFT, tsz,
668 vma->vm_page_prot))
669 goto fail;
670 size -= tsz;
671 start += tsz;
672 len += tsz;
673
674 if (size == 0)
675 return 0;
676 }
677 }
678
679 return 0;
680 fail:
681 do_munmap(vma->vm_mm, vma->vm_start, len, NULL);
682 return -EAGAIN;
683 }
684 #else
mmap_vmcore(struct file * file,struct vm_area_struct * vma)685 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
686 {
687 return -ENOSYS;
688 }
689 #endif
690
691 static const struct proc_ops vmcore_proc_ops = {
692 .proc_open = open_vmcore,
693 .proc_read_iter = read_vmcore,
694 .proc_lseek = default_llseek,
695 .proc_mmap = mmap_vmcore,
696 };
697
get_new_element(void)698 static struct vmcore* __init get_new_element(void)
699 {
700 return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
701 }
702
get_vmcore_size(size_t elfsz,size_t elfnotesegsz,struct list_head * vc_list)703 static u64 get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
704 struct list_head *vc_list)
705 {
706 u64 size;
707 struct vmcore *m;
708
709 size = elfsz + elfnotesegsz;
710 list_for_each_entry(m, vc_list, list) {
711 size += m->size;
712 }
713 return size;
714 }
715
716 /**
717 * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
718 *
719 * @ehdr_ptr: ELF header
720 *
721 * This function updates p_memsz member of each PT_NOTE entry in the
722 * program header table pointed to by @ehdr_ptr to real size of ELF
723 * note segment.
724 */
update_note_header_size_elf64(const Elf64_Ehdr * ehdr_ptr)725 static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
726 {
727 int i, rc=0;
728 Elf64_Phdr *phdr_ptr;
729 Elf64_Nhdr *nhdr_ptr;
730
731 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
732 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
733 void *notes_section;
734 u64 offset, max_sz, sz, real_sz = 0;
735 if (phdr_ptr->p_type != PT_NOTE)
736 continue;
737 max_sz = phdr_ptr->p_memsz;
738 offset = phdr_ptr->p_offset;
739 notes_section = kmalloc(max_sz, GFP_KERNEL);
740 if (!notes_section)
741 return -ENOMEM;
742 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
743 if (rc < 0) {
744 kfree(notes_section);
745 return rc;
746 }
747 nhdr_ptr = notes_section;
748 while (nhdr_ptr->n_namesz != 0) {
749 sz = sizeof(Elf64_Nhdr) +
750 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
751 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
752 if ((real_sz + sz) > max_sz) {
753 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
754 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
755 break;
756 }
757 real_sz += sz;
758 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
759 }
760 kfree(notes_section);
761 phdr_ptr->p_memsz = real_sz;
762 if (real_sz == 0) {
763 pr_warn("Warning: Zero PT_NOTE entries found\n");
764 }
765 }
766
767 return 0;
768 }
769
770 /**
771 * get_note_number_and_size_elf64 - get the number of PT_NOTE program
772 * headers and sum of real size of their ELF note segment headers and
773 * data.
774 *
775 * @ehdr_ptr: ELF header
776 * @nr_ptnote: buffer for the number of PT_NOTE program headers
777 * @sz_ptnote: buffer for size of unique PT_NOTE program header
778 *
779 * This function is used to merge multiple PT_NOTE program headers
780 * into a unique single one. The resulting unique entry will have
781 * @sz_ptnote in its phdr->p_mem.
782 *
783 * It is assumed that program headers with PT_NOTE type pointed to by
784 * @ehdr_ptr has already been updated by update_note_header_size_elf64
785 * and each of PT_NOTE program headers has actual ELF note segment
786 * size in its p_memsz member.
787 */
get_note_number_and_size_elf64(const Elf64_Ehdr * ehdr_ptr,int * nr_ptnote,u64 * sz_ptnote)788 static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
789 int *nr_ptnote, u64 *sz_ptnote)
790 {
791 int i;
792 Elf64_Phdr *phdr_ptr;
793
794 *nr_ptnote = *sz_ptnote = 0;
795
796 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
797 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
798 if (phdr_ptr->p_type != PT_NOTE)
799 continue;
800 *nr_ptnote += 1;
801 *sz_ptnote += phdr_ptr->p_memsz;
802 }
803
804 return 0;
805 }
806
807 /**
808 * copy_notes_elf64 - copy ELF note segments in a given buffer
809 *
810 * @ehdr_ptr: ELF header
811 * @notes_buf: buffer into which ELF note segments are copied
812 *
813 * This function is used to copy ELF note segment in the 1st kernel
814 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
815 * size of the buffer @notes_buf is equal to or larger than sum of the
816 * real ELF note segment headers and data.
817 *
818 * It is assumed that program headers with PT_NOTE type pointed to by
819 * @ehdr_ptr has already been updated by update_note_header_size_elf64
820 * and each of PT_NOTE program headers has actual ELF note segment
821 * size in its p_memsz member.
822 */
copy_notes_elf64(const Elf64_Ehdr * ehdr_ptr,char * notes_buf)823 static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
824 {
825 int i, rc=0;
826 Elf64_Phdr *phdr_ptr;
827
828 phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
829
830 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
831 u64 offset;
832 if (phdr_ptr->p_type != PT_NOTE)
833 continue;
834 offset = phdr_ptr->p_offset;
835 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
836 &offset);
837 if (rc < 0)
838 return rc;
839 notes_buf += phdr_ptr->p_memsz;
840 }
841
842 return 0;
843 }
844
845 /* Merges all the PT_NOTE headers into one. */
merge_note_headers_elf64(char * elfptr,size_t * elfsz,char ** notes_buf,size_t * notes_sz)846 static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
847 char **notes_buf, size_t *notes_sz)
848 {
849 int i, nr_ptnote=0, rc=0;
850 char *tmp;
851 Elf64_Ehdr *ehdr_ptr;
852 Elf64_Phdr phdr;
853 u64 phdr_sz = 0, note_off;
854
855 ehdr_ptr = (Elf64_Ehdr *)elfptr;
856
857 rc = update_note_header_size_elf64(ehdr_ptr);
858 if (rc < 0)
859 return rc;
860
861 rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
862 if (rc < 0)
863 return rc;
864
865 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
866 *notes_buf = vmcore_alloc_buf(*notes_sz);
867 if (!*notes_buf)
868 return -ENOMEM;
869
870 rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
871 if (rc < 0)
872 return rc;
873
874 /* Prepare merged PT_NOTE program header. */
875 phdr.p_type = PT_NOTE;
876 phdr.p_flags = 0;
877 note_off = sizeof(Elf64_Ehdr) +
878 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
879 phdr.p_offset = roundup(note_off, PAGE_SIZE);
880 phdr.p_vaddr = phdr.p_paddr = 0;
881 phdr.p_filesz = phdr.p_memsz = phdr_sz;
882 phdr.p_align = 0;
883
884 /* Add merged PT_NOTE program header*/
885 tmp = elfptr + sizeof(Elf64_Ehdr);
886 memcpy(tmp, &phdr, sizeof(phdr));
887 tmp += sizeof(phdr);
888
889 /* Remove unwanted PT_NOTE program headers. */
890 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
891 *elfsz = *elfsz - i;
892 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
893 memset(elfptr + *elfsz, 0, i);
894 *elfsz = roundup(*elfsz, PAGE_SIZE);
895
896 /* Modify e_phnum to reflect merged headers. */
897 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
898
899 /* Store the size of all notes. We need this to update the note
900 * header when the device dumps will be added.
901 */
902 elfnotes_orig_sz = phdr.p_memsz;
903
904 return 0;
905 }
906
907 /**
908 * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
909 *
910 * @ehdr_ptr: ELF header
911 *
912 * This function updates p_memsz member of each PT_NOTE entry in the
913 * program header table pointed to by @ehdr_ptr to real size of ELF
914 * note segment.
915 */
update_note_header_size_elf32(const Elf32_Ehdr * ehdr_ptr)916 static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
917 {
918 int i, rc=0;
919 Elf32_Phdr *phdr_ptr;
920 Elf32_Nhdr *nhdr_ptr;
921
922 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
923 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
924 void *notes_section;
925 u64 offset, max_sz, sz, real_sz = 0;
926 if (phdr_ptr->p_type != PT_NOTE)
927 continue;
928 max_sz = phdr_ptr->p_memsz;
929 offset = phdr_ptr->p_offset;
930 notes_section = kmalloc(max_sz, GFP_KERNEL);
931 if (!notes_section)
932 return -ENOMEM;
933 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
934 if (rc < 0) {
935 kfree(notes_section);
936 return rc;
937 }
938 nhdr_ptr = notes_section;
939 while (nhdr_ptr->n_namesz != 0) {
940 sz = sizeof(Elf32_Nhdr) +
941 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
942 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
943 if ((real_sz + sz) > max_sz) {
944 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
945 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
946 break;
947 }
948 real_sz += sz;
949 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
950 }
951 kfree(notes_section);
952 phdr_ptr->p_memsz = real_sz;
953 if (real_sz == 0) {
954 pr_warn("Warning: Zero PT_NOTE entries found\n");
955 }
956 }
957
958 return 0;
959 }
960
961 /**
962 * get_note_number_and_size_elf32 - get the number of PT_NOTE program
963 * headers and sum of real size of their ELF note segment headers and
964 * data.
965 *
966 * @ehdr_ptr: ELF header
967 * @nr_ptnote: buffer for the number of PT_NOTE program headers
968 * @sz_ptnote: buffer for size of unique PT_NOTE program header
969 *
970 * This function is used to merge multiple PT_NOTE program headers
971 * into a unique single one. The resulting unique entry will have
972 * @sz_ptnote in its phdr->p_mem.
973 *
974 * It is assumed that program headers with PT_NOTE type pointed to by
975 * @ehdr_ptr has already been updated by update_note_header_size_elf32
976 * and each of PT_NOTE program headers has actual ELF note segment
977 * size in its p_memsz member.
978 */
get_note_number_and_size_elf32(const Elf32_Ehdr * ehdr_ptr,int * nr_ptnote,u64 * sz_ptnote)979 static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
980 int *nr_ptnote, u64 *sz_ptnote)
981 {
982 int i;
983 Elf32_Phdr *phdr_ptr;
984
985 *nr_ptnote = *sz_ptnote = 0;
986
987 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
988 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
989 if (phdr_ptr->p_type != PT_NOTE)
990 continue;
991 *nr_ptnote += 1;
992 *sz_ptnote += phdr_ptr->p_memsz;
993 }
994
995 return 0;
996 }
997
998 /**
999 * copy_notes_elf32 - copy ELF note segments in a given buffer
1000 *
1001 * @ehdr_ptr: ELF header
1002 * @notes_buf: buffer into which ELF note segments are copied
1003 *
1004 * This function is used to copy ELF note segment in the 1st kernel
1005 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
1006 * size of the buffer @notes_buf is equal to or larger than sum of the
1007 * real ELF note segment headers and data.
1008 *
1009 * It is assumed that program headers with PT_NOTE type pointed to by
1010 * @ehdr_ptr has already been updated by update_note_header_size_elf32
1011 * and each of PT_NOTE program headers has actual ELF note segment
1012 * size in its p_memsz member.
1013 */
copy_notes_elf32(const Elf32_Ehdr * ehdr_ptr,char * notes_buf)1014 static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
1015 {
1016 int i, rc=0;
1017 Elf32_Phdr *phdr_ptr;
1018
1019 phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
1020
1021 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1022 u64 offset;
1023 if (phdr_ptr->p_type != PT_NOTE)
1024 continue;
1025 offset = phdr_ptr->p_offset;
1026 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
1027 &offset);
1028 if (rc < 0)
1029 return rc;
1030 notes_buf += phdr_ptr->p_memsz;
1031 }
1032
1033 return 0;
1034 }
1035
1036 /* Merges all the PT_NOTE headers into one. */
merge_note_headers_elf32(char * elfptr,size_t * elfsz,char ** notes_buf,size_t * notes_sz)1037 static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
1038 char **notes_buf, size_t *notes_sz)
1039 {
1040 int i, nr_ptnote=0, rc=0;
1041 char *tmp;
1042 Elf32_Ehdr *ehdr_ptr;
1043 Elf32_Phdr phdr;
1044 u64 phdr_sz = 0, note_off;
1045
1046 ehdr_ptr = (Elf32_Ehdr *)elfptr;
1047
1048 rc = update_note_header_size_elf32(ehdr_ptr);
1049 if (rc < 0)
1050 return rc;
1051
1052 rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
1053 if (rc < 0)
1054 return rc;
1055
1056 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
1057 *notes_buf = vmcore_alloc_buf(*notes_sz);
1058 if (!*notes_buf)
1059 return -ENOMEM;
1060
1061 rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
1062 if (rc < 0)
1063 return rc;
1064
1065 /* Prepare merged PT_NOTE program header. */
1066 phdr.p_type = PT_NOTE;
1067 phdr.p_flags = 0;
1068 note_off = sizeof(Elf32_Ehdr) +
1069 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
1070 phdr.p_offset = roundup(note_off, PAGE_SIZE);
1071 phdr.p_vaddr = phdr.p_paddr = 0;
1072 phdr.p_filesz = phdr.p_memsz = phdr_sz;
1073 phdr.p_align = 0;
1074
1075 /* Add merged PT_NOTE program header*/
1076 tmp = elfptr + sizeof(Elf32_Ehdr);
1077 memcpy(tmp, &phdr, sizeof(phdr));
1078 tmp += sizeof(phdr);
1079
1080 /* Remove unwanted PT_NOTE program headers. */
1081 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
1082 *elfsz = *elfsz - i;
1083 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
1084 memset(elfptr + *elfsz, 0, i);
1085 *elfsz = roundup(*elfsz, PAGE_SIZE);
1086
1087 /* Modify e_phnum to reflect merged headers. */
1088 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
1089
1090 /* Store the size of all notes. We need this to update the note
1091 * header when the device dumps will be added.
1092 */
1093 elfnotes_orig_sz = phdr.p_memsz;
1094
1095 return 0;
1096 }
1097
1098 /* Add memory chunks represented by program headers to vmcore list. Also update
1099 * the new offset fields of exported program headers. */
process_ptload_program_headers_elf64(char * elfptr,size_t elfsz,size_t elfnotes_sz,struct list_head * vc_list)1100 static int __init process_ptload_program_headers_elf64(char *elfptr,
1101 size_t elfsz,
1102 size_t elfnotes_sz,
1103 struct list_head *vc_list)
1104 {
1105 int i;
1106 Elf64_Ehdr *ehdr_ptr;
1107 Elf64_Phdr *phdr_ptr;
1108 loff_t vmcore_off;
1109 struct vmcore *new;
1110
1111 ehdr_ptr = (Elf64_Ehdr *)elfptr;
1112 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
1113
1114 /* Skip Elf header, program headers and Elf note segment. */
1115 vmcore_off = elfsz + elfnotes_sz;
1116
1117 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1118 u64 paddr, start, end, size;
1119
1120 if (phdr_ptr->p_type != PT_LOAD)
1121 continue;
1122
1123 paddr = phdr_ptr->p_offset;
1124 start = rounddown(paddr, PAGE_SIZE);
1125 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1126 size = end - start;
1127
1128 /* Add this contiguous chunk of memory to vmcore list.*/
1129 new = get_new_element();
1130 if (!new)
1131 return -ENOMEM;
1132 new->paddr = start;
1133 new->size = size;
1134 list_add_tail(&new->list, vc_list);
1135
1136 /* Update the program header offset. */
1137 phdr_ptr->p_offset = vmcore_off + (paddr - start);
1138 vmcore_off = vmcore_off + size;
1139 }
1140 return 0;
1141 }
1142
process_ptload_program_headers_elf32(char * elfptr,size_t elfsz,size_t elfnotes_sz,struct list_head * vc_list)1143 static int __init process_ptload_program_headers_elf32(char *elfptr,
1144 size_t elfsz,
1145 size_t elfnotes_sz,
1146 struct list_head *vc_list)
1147 {
1148 int i;
1149 Elf32_Ehdr *ehdr_ptr;
1150 Elf32_Phdr *phdr_ptr;
1151 loff_t vmcore_off;
1152 struct vmcore *new;
1153
1154 ehdr_ptr = (Elf32_Ehdr *)elfptr;
1155 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
1156
1157 /* Skip Elf header, program headers and Elf note segment. */
1158 vmcore_off = elfsz + elfnotes_sz;
1159
1160 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1161 u64 paddr, start, end, size;
1162
1163 if (phdr_ptr->p_type != PT_LOAD)
1164 continue;
1165
1166 paddr = phdr_ptr->p_offset;
1167 start = rounddown(paddr, PAGE_SIZE);
1168 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1169 size = end - start;
1170
1171 /* Add this contiguous chunk of memory to vmcore list.*/
1172 new = get_new_element();
1173 if (!new)
1174 return -ENOMEM;
1175 new->paddr = start;
1176 new->size = size;
1177 list_add_tail(&new->list, vc_list);
1178
1179 /* Update the program header offset */
1180 phdr_ptr->p_offset = vmcore_off + (paddr - start);
1181 vmcore_off = vmcore_off + size;
1182 }
1183 return 0;
1184 }
1185
1186 /* Sets offset fields of vmcore elements. */
set_vmcore_list_offsets(size_t elfsz,size_t elfnotes_sz,struct list_head * vc_list)1187 static void set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
1188 struct list_head *vc_list)
1189 {
1190 loff_t vmcore_off;
1191 struct vmcore *m;
1192
1193 /* Skip Elf header, program headers and Elf note segment. */
1194 vmcore_off = elfsz + elfnotes_sz;
1195
1196 list_for_each_entry(m, vc_list, list) {
1197 m->offset = vmcore_off;
1198 vmcore_off += m->size;
1199 }
1200 }
1201
free_elfcorebuf(void)1202 static void free_elfcorebuf(void)
1203 {
1204 free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
1205 elfcorebuf = NULL;
1206 vfree(elfnotes_buf);
1207 elfnotes_buf = NULL;
1208 }
1209
parse_crash_elf64_headers(void)1210 static int __init parse_crash_elf64_headers(void)
1211 {
1212 int rc=0;
1213 Elf64_Ehdr ehdr;
1214 u64 addr;
1215
1216 addr = elfcorehdr_addr;
1217
1218 /* Read Elf header */
1219 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
1220 if (rc < 0)
1221 return rc;
1222
1223 /* Do some basic Verification. */
1224 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1225 (ehdr.e_type != ET_CORE) ||
1226 !vmcore_elf64_check_arch(&ehdr) ||
1227 ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1228 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1229 ehdr.e_version != EV_CURRENT ||
1230 ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1231 ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1232 ehdr.e_phnum == 0) {
1233 pr_warn("Warning: Core image elf header is not sane\n");
1234 return -EINVAL;
1235 }
1236
1237 /* Read in all elf headers. */
1238 elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1239 ehdr.e_phnum * sizeof(Elf64_Phdr);
1240 elfcorebuf_sz = elfcorebuf_sz_orig;
1241 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1242 get_order(elfcorebuf_sz_orig));
1243 if (!elfcorebuf)
1244 return -ENOMEM;
1245 addr = elfcorehdr_addr;
1246 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1247 if (rc < 0)
1248 goto fail;
1249
1250 /* Merge all PT_NOTE headers into one. */
1251 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1252 &elfnotes_buf, &elfnotes_sz);
1253 if (rc)
1254 goto fail;
1255 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
1256 elfnotes_sz, &vmcore_list);
1257 if (rc)
1258 goto fail;
1259 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1260 return 0;
1261 fail:
1262 free_elfcorebuf();
1263 return rc;
1264 }
1265
parse_crash_elf32_headers(void)1266 static int __init parse_crash_elf32_headers(void)
1267 {
1268 int rc=0;
1269 Elf32_Ehdr ehdr;
1270 u64 addr;
1271
1272 addr = elfcorehdr_addr;
1273
1274 /* Read Elf header */
1275 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
1276 if (rc < 0)
1277 return rc;
1278
1279 /* Do some basic Verification. */
1280 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1281 (ehdr.e_type != ET_CORE) ||
1282 !vmcore_elf32_check_arch(&ehdr) ||
1283 ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1284 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1285 ehdr.e_version != EV_CURRENT ||
1286 ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1287 ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1288 ehdr.e_phnum == 0) {
1289 pr_warn("Warning: Core image elf header is not sane\n");
1290 return -EINVAL;
1291 }
1292
1293 /* Read in all elf headers. */
1294 elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1295 elfcorebuf_sz = elfcorebuf_sz_orig;
1296 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1297 get_order(elfcorebuf_sz_orig));
1298 if (!elfcorebuf)
1299 return -ENOMEM;
1300 addr = elfcorehdr_addr;
1301 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1302 if (rc < 0)
1303 goto fail;
1304
1305 /* Merge all PT_NOTE headers into one. */
1306 rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1307 &elfnotes_buf, &elfnotes_sz);
1308 if (rc)
1309 goto fail;
1310 rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
1311 elfnotes_sz, &vmcore_list);
1312 if (rc)
1313 goto fail;
1314 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1315 return 0;
1316 fail:
1317 free_elfcorebuf();
1318 return rc;
1319 }
1320
parse_crash_elf_headers(void)1321 static int __init parse_crash_elf_headers(void)
1322 {
1323 unsigned char e_ident[EI_NIDENT];
1324 u64 addr;
1325 int rc=0;
1326
1327 addr = elfcorehdr_addr;
1328 rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
1329 if (rc < 0)
1330 return rc;
1331 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
1332 pr_warn("Warning: Core image elf header not found\n");
1333 return -EINVAL;
1334 }
1335
1336 if (e_ident[EI_CLASS] == ELFCLASS64) {
1337 rc = parse_crash_elf64_headers();
1338 if (rc)
1339 return rc;
1340 } else if (e_ident[EI_CLASS] == ELFCLASS32) {
1341 rc = parse_crash_elf32_headers();
1342 if (rc)
1343 return rc;
1344 } else {
1345 pr_warn("Warning: Core image elf header is not sane\n");
1346 return -EINVAL;
1347 }
1348
1349 /* Determine vmcore size. */
1350 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1351 &vmcore_list);
1352
1353 return 0;
1354 }
1355
1356 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1357 /**
1358 * vmcoredd_write_header - Write vmcore device dump header at the
1359 * beginning of the dump's buffer.
1360 * @buf: Output buffer where the note is written
1361 * @data: Dump info
1362 * @size: Size of the dump
1363 *
1364 * Fills beginning of the dump's buffer with vmcore device dump header.
1365 */
vmcoredd_write_header(void * buf,struct vmcoredd_data * data,u32 size)1366 static void vmcoredd_write_header(void *buf, struct vmcoredd_data *data,
1367 u32 size)
1368 {
1369 struct vmcoredd_header *vdd_hdr = (struct vmcoredd_header *)buf;
1370
1371 vdd_hdr->n_namesz = sizeof(vdd_hdr->name);
1372 vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name);
1373 vdd_hdr->n_type = NT_VMCOREDD;
1374
1375 strncpy((char *)vdd_hdr->name, VMCOREDD_NOTE_NAME,
1376 sizeof(vdd_hdr->name));
1377 memcpy(vdd_hdr->dump_name, data->dump_name, sizeof(vdd_hdr->dump_name));
1378 }
1379
1380 /**
1381 * vmcoredd_update_program_headers - Update all Elf program headers
1382 * @elfptr: Pointer to elf header
1383 * @elfnotesz: Size of elf notes aligned to page size
1384 * @vmcoreddsz: Size of device dumps to be added to elf note header
1385 *
1386 * Determine type of Elf header (Elf64 or Elf32) and update the elf note size.
1387 * Also update the offsets of all the program headers after the elf note header.
1388 */
vmcoredd_update_program_headers(char * elfptr,size_t elfnotesz,size_t vmcoreddsz)1389 static void vmcoredd_update_program_headers(char *elfptr, size_t elfnotesz,
1390 size_t vmcoreddsz)
1391 {
1392 unsigned char *e_ident = (unsigned char *)elfptr;
1393 u64 start, end, size;
1394 loff_t vmcore_off;
1395 u32 i;
1396
1397 vmcore_off = elfcorebuf_sz + elfnotesz;
1398
1399 if (e_ident[EI_CLASS] == ELFCLASS64) {
1400 Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfptr;
1401 Elf64_Phdr *phdr = (Elf64_Phdr *)(elfptr + sizeof(Elf64_Ehdr));
1402
1403 /* Update all program headers */
1404 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1405 if (phdr->p_type == PT_NOTE) {
1406 /* Update note size */
1407 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1408 phdr->p_filesz = phdr->p_memsz;
1409 continue;
1410 }
1411
1412 start = rounddown(phdr->p_offset, PAGE_SIZE);
1413 end = roundup(phdr->p_offset + phdr->p_memsz,
1414 PAGE_SIZE);
1415 size = end - start;
1416 phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1417 vmcore_off += size;
1418 }
1419 } else {
1420 Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfptr;
1421 Elf32_Phdr *phdr = (Elf32_Phdr *)(elfptr + sizeof(Elf32_Ehdr));
1422
1423 /* Update all program headers */
1424 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1425 if (phdr->p_type == PT_NOTE) {
1426 /* Update note size */
1427 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1428 phdr->p_filesz = phdr->p_memsz;
1429 continue;
1430 }
1431
1432 start = rounddown(phdr->p_offset, PAGE_SIZE);
1433 end = roundup(phdr->p_offset + phdr->p_memsz,
1434 PAGE_SIZE);
1435 size = end - start;
1436 phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1437 vmcore_off += size;
1438 }
1439 }
1440 }
1441
1442 /**
1443 * vmcoredd_update_size - Update the total size of the device dumps and update
1444 * Elf header
1445 * @dump_size: Size of the current device dump to be added to total size
1446 *
1447 * Update the total size of all the device dumps and update the Elf program
1448 * headers. Calculate the new offsets for the vmcore list and update the
1449 * total vmcore size.
1450 */
vmcoredd_update_size(size_t dump_size)1451 static void vmcoredd_update_size(size_t dump_size)
1452 {
1453 vmcoredd_orig_sz += dump_size;
1454 elfnotes_sz = roundup(elfnotes_orig_sz, PAGE_SIZE) + vmcoredd_orig_sz;
1455 vmcoredd_update_program_headers(elfcorebuf, elfnotes_sz,
1456 vmcoredd_orig_sz);
1457
1458 /* Update vmcore list offsets */
1459 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1460
1461 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1462 &vmcore_list);
1463 proc_vmcore->size = vmcore_size;
1464 }
1465
1466 /**
1467 * vmcore_add_device_dump - Add a buffer containing device dump to vmcore
1468 * @data: dump info.
1469 *
1470 * Allocate a buffer and invoke the calling driver's dump collect routine.
1471 * Write Elf note at the beginning of the buffer to indicate vmcore device
1472 * dump and add the dump to global list.
1473 */
vmcore_add_device_dump(struct vmcoredd_data * data)1474 int vmcore_add_device_dump(struct vmcoredd_data *data)
1475 {
1476 struct vmcoredd_node *dump;
1477 void *buf = NULL;
1478 size_t data_size;
1479 int ret;
1480
1481 if (vmcoredd_disabled) {
1482 pr_err_once("Device dump is disabled\n");
1483 return -EINVAL;
1484 }
1485
1486 if (!data || !strlen(data->dump_name) ||
1487 !data->vmcoredd_callback || !data->size)
1488 return -EINVAL;
1489
1490 dump = vzalloc(sizeof(*dump));
1491 if (!dump) {
1492 ret = -ENOMEM;
1493 goto out_err;
1494 }
1495
1496 /* Keep size of the buffer page aligned so that it can be mmaped */
1497 data_size = roundup(sizeof(struct vmcoredd_header) + data->size,
1498 PAGE_SIZE);
1499
1500 /* Allocate buffer for driver's to write their dumps */
1501 buf = vmcore_alloc_buf(data_size);
1502 if (!buf) {
1503 ret = -ENOMEM;
1504 goto out_err;
1505 }
1506
1507 vmcoredd_write_header(buf, data, data_size -
1508 sizeof(struct vmcoredd_header));
1509
1510 /* Invoke the driver's dump collection routing */
1511 ret = data->vmcoredd_callback(data, buf +
1512 sizeof(struct vmcoredd_header));
1513 if (ret)
1514 goto out_err;
1515
1516 dump->buf = buf;
1517 dump->size = data_size;
1518
1519 /* Add the dump to driver sysfs list */
1520 mutex_lock(&vmcoredd_mutex);
1521 list_add_tail(&dump->list, &vmcoredd_list);
1522 mutex_unlock(&vmcoredd_mutex);
1523
1524 vmcoredd_update_size(data_size);
1525 return 0;
1526
1527 out_err:
1528 vfree(buf);
1529 vfree(dump);
1530
1531 return ret;
1532 }
1533 EXPORT_SYMBOL(vmcore_add_device_dump);
1534 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1535
1536 /* Free all dumps in vmcore device dump list */
vmcore_free_device_dumps(void)1537 static void vmcore_free_device_dumps(void)
1538 {
1539 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1540 mutex_lock(&vmcoredd_mutex);
1541 while (!list_empty(&vmcoredd_list)) {
1542 struct vmcoredd_node *dump;
1543
1544 dump = list_first_entry(&vmcoredd_list, struct vmcoredd_node,
1545 list);
1546 list_del(&dump->list);
1547 vfree(dump->buf);
1548 vfree(dump);
1549 }
1550 mutex_unlock(&vmcoredd_mutex);
1551 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1552 }
1553
1554 /* Init function for vmcore module. */
vmcore_init(void)1555 static int __init vmcore_init(void)
1556 {
1557 int rc = 0;
1558
1559 /* Allow architectures to allocate ELF header in 2nd kernel */
1560 rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1561 if (rc)
1562 return rc;
1563 /*
1564 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
1565 * then capture the dump.
1566 */
1567 if (!(is_vmcore_usable()))
1568 return rc;
1569 rc = parse_crash_elf_headers();
1570 if (rc) {
1571 pr_warn("Kdump: vmcore not initialized\n");
1572 return rc;
1573 }
1574 elfcorehdr_free(elfcorehdr_addr);
1575 elfcorehdr_addr = ELFCORE_ADDR_ERR;
1576
1577 proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &vmcore_proc_ops);
1578 if (proc_vmcore)
1579 proc_vmcore->size = vmcore_size;
1580 return 0;
1581 }
1582 fs_initcall(vmcore_init);
1583
1584 /* Cleanup function for vmcore module. */
vmcore_cleanup(void)1585 void vmcore_cleanup(void)
1586 {
1587 if (proc_vmcore) {
1588 proc_remove(proc_vmcore);
1589 proc_vmcore = NULL;
1590 }
1591
1592 /* clear the vmcore list. */
1593 while (!list_empty(&vmcore_list)) {
1594 struct vmcore *m;
1595
1596 m = list_first_entry(&vmcore_list, struct vmcore, list);
1597 list_del(&m->list);
1598 kfree(m);
1599 }
1600 free_elfcorebuf();
1601
1602 /* clear vmcore device dump list */
1603 vmcore_free_device_dumps();
1604 }
1605