1 /*
2 * fs/proc/vmcore.c Interface for accessing the crash
3 * dump from the system's previous life.
4 * Heavily borrowed from fs/proc/kcore.c
5 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 * Copyright (C) IBM Corporation, 2004. All rights reserved
7 *
8 */
9
10 #include <linux/mm.h>
11 #include <linux/proc_fs.h>
12 #include <linux/user.h>
13 #include <linux/elf.h>
14 #include <linux/elfcore.h>
15 #include <linux/export.h>
16 #include <linux/slab.h>
17 #include <linux/highmem.h>
18 #include <linux/bootmem.h>
19 #include <linux/init.h>
20 #include <linux/crash_dump.h>
21 #include <linux/list.h>
22 #include <asm/uaccess.h>
23 #include <asm/io.h>
24
25 /* List representing chunks of contiguous memory areas and their offsets in
26 * vmcore file.
27 */
28 static LIST_HEAD(vmcore_list);
29
30 /* Stores the pointer to the buffer containing kernel elf core headers. */
31 static char *elfcorebuf;
32 static size_t elfcorebuf_sz;
33
34 /* Total size of vmcore file. */
35 static u64 vmcore_size;
36
37 static struct proc_dir_entry *proc_vmcore = NULL;
38
39 /*
40 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
41 * The called function has to take care of module refcounting.
42 */
43 static int (*oldmem_pfn_is_ram)(unsigned long pfn);
44
register_oldmem_pfn_is_ram(int (* fn)(unsigned long pfn))45 int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
46 {
47 if (oldmem_pfn_is_ram)
48 return -EBUSY;
49 oldmem_pfn_is_ram = fn;
50 return 0;
51 }
52 EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
53
unregister_oldmem_pfn_is_ram(void)54 void unregister_oldmem_pfn_is_ram(void)
55 {
56 oldmem_pfn_is_ram = NULL;
57 wmb();
58 }
59 EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
60
pfn_is_ram(unsigned long pfn)61 static int pfn_is_ram(unsigned long pfn)
62 {
63 int (*fn)(unsigned long pfn);
64 /* pfn is ram unless fn() checks pagetype */
65 int ret = 1;
66
67 /*
68 * Ask hypervisor if the pfn is really ram.
69 * A ballooned page contains no data and reading from such a page
70 * will cause high load in the hypervisor.
71 */
72 fn = oldmem_pfn_is_ram;
73 if (fn)
74 ret = fn(pfn);
75
76 return ret;
77 }
78
79 /* Reads a page from the oldmem device from given offset. */
read_from_oldmem(char * buf,size_t count,u64 * ppos,int userbuf)80 static ssize_t read_from_oldmem(char *buf, size_t count,
81 u64 *ppos, int userbuf)
82 {
83 unsigned long pfn, offset;
84 size_t nr_bytes;
85 ssize_t read = 0, tmp;
86
87 if (!count)
88 return 0;
89
90 offset = (unsigned long)(*ppos % PAGE_SIZE);
91 pfn = (unsigned long)(*ppos / PAGE_SIZE);
92
93 do {
94 if (count > (PAGE_SIZE - offset))
95 nr_bytes = PAGE_SIZE - offset;
96 else
97 nr_bytes = count;
98
99 /* If pfn is not ram, return zeros for sparse dump files */
100 if (pfn_is_ram(pfn) == 0)
101 memset(buf, 0, nr_bytes);
102 else {
103 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
104 offset, userbuf);
105 if (tmp < 0)
106 return tmp;
107 }
108 *ppos += nr_bytes;
109 count -= nr_bytes;
110 buf += nr_bytes;
111 read += nr_bytes;
112 ++pfn;
113 offset = 0;
114 } while (count);
115
116 return read;
117 }
118
119 /* Maps vmcore file offset to respective physical address in memroy. */
map_offset_to_paddr(loff_t offset,struct list_head * vc_list,struct vmcore ** m_ptr)120 static u64 map_offset_to_paddr(loff_t offset, struct list_head *vc_list,
121 struct vmcore **m_ptr)
122 {
123 struct vmcore *m;
124 u64 paddr;
125
126 list_for_each_entry(m, vc_list, list) {
127 u64 start, end;
128 start = m->offset;
129 end = m->offset + m->size - 1;
130 if (offset >= start && offset <= end) {
131 paddr = m->paddr + offset - start;
132 *m_ptr = m;
133 return paddr;
134 }
135 }
136 *m_ptr = NULL;
137 return 0;
138 }
139
140 /* Read from the ELF header and then the crash dump. On error, negative value is
141 * returned otherwise number of bytes read are returned.
142 */
read_vmcore(struct file * file,char __user * buffer,size_t buflen,loff_t * fpos)143 static ssize_t read_vmcore(struct file *file, char __user *buffer,
144 size_t buflen, loff_t *fpos)
145 {
146 ssize_t acc = 0, tmp;
147 size_t tsz;
148 u64 start, nr_bytes;
149 struct vmcore *curr_m = NULL;
150
151 if (buflen == 0 || *fpos >= vmcore_size)
152 return 0;
153
154 /* trim buflen to not go beyond EOF */
155 if (buflen > vmcore_size - *fpos)
156 buflen = vmcore_size - *fpos;
157
158 /* Read ELF core header */
159 if (*fpos < elfcorebuf_sz) {
160 tsz = elfcorebuf_sz - *fpos;
161 if (buflen < tsz)
162 tsz = buflen;
163 if (copy_to_user(buffer, elfcorebuf + *fpos, tsz))
164 return -EFAULT;
165 buflen -= tsz;
166 *fpos += tsz;
167 buffer += tsz;
168 acc += tsz;
169
170 /* leave now if filled buffer already */
171 if (buflen == 0)
172 return acc;
173 }
174
175 start = map_offset_to_paddr(*fpos, &vmcore_list, &curr_m);
176 if (!curr_m)
177 return -EINVAL;
178 if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
179 tsz = buflen;
180
181 /* Calculate left bytes in current memory segment. */
182 nr_bytes = (curr_m->size - (start - curr_m->paddr));
183 if (tsz > nr_bytes)
184 tsz = nr_bytes;
185
186 while (buflen) {
187 tmp = read_from_oldmem(buffer, tsz, &start, 1);
188 if (tmp < 0)
189 return tmp;
190 buflen -= tsz;
191 *fpos += tsz;
192 buffer += tsz;
193 acc += tsz;
194 if (start >= (curr_m->paddr + curr_m->size)) {
195 if (curr_m->list.next == &vmcore_list)
196 return acc; /*EOF*/
197 curr_m = list_entry(curr_m->list.next,
198 struct vmcore, list);
199 start = curr_m->paddr;
200 }
201 if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
202 tsz = buflen;
203 /* Calculate left bytes in current memory segment. */
204 nr_bytes = (curr_m->size - (start - curr_m->paddr));
205 if (tsz > nr_bytes)
206 tsz = nr_bytes;
207 }
208 return acc;
209 }
210
211 static const struct file_operations proc_vmcore_operations = {
212 .read = read_vmcore,
213 .llseek = default_llseek,
214 };
215
get_new_element(void)216 static struct vmcore* __init get_new_element(void)
217 {
218 return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
219 }
220
get_vmcore_size_elf64(char * elfptr)221 static u64 __init get_vmcore_size_elf64(char *elfptr)
222 {
223 int i;
224 u64 size;
225 Elf64_Ehdr *ehdr_ptr;
226 Elf64_Phdr *phdr_ptr;
227
228 ehdr_ptr = (Elf64_Ehdr *)elfptr;
229 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr));
230 size = sizeof(Elf64_Ehdr) + ((ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr));
231 for (i = 0; i < ehdr_ptr->e_phnum; i++) {
232 size += phdr_ptr->p_memsz;
233 phdr_ptr++;
234 }
235 return size;
236 }
237
get_vmcore_size_elf32(char * elfptr)238 static u64 __init get_vmcore_size_elf32(char *elfptr)
239 {
240 int i;
241 u64 size;
242 Elf32_Ehdr *ehdr_ptr;
243 Elf32_Phdr *phdr_ptr;
244
245 ehdr_ptr = (Elf32_Ehdr *)elfptr;
246 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr));
247 size = sizeof(Elf32_Ehdr) + ((ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr));
248 for (i = 0; i < ehdr_ptr->e_phnum; i++) {
249 size += phdr_ptr->p_memsz;
250 phdr_ptr++;
251 }
252 return size;
253 }
254
255 /* Merges all the PT_NOTE headers into one. */
merge_note_headers_elf64(char * elfptr,size_t * elfsz,struct list_head * vc_list)256 static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
257 struct list_head *vc_list)
258 {
259 int i, nr_ptnote=0, rc=0;
260 char *tmp;
261 Elf64_Ehdr *ehdr_ptr;
262 Elf64_Phdr phdr, *phdr_ptr;
263 Elf64_Nhdr *nhdr_ptr;
264 u64 phdr_sz = 0, note_off;
265
266 ehdr_ptr = (Elf64_Ehdr *)elfptr;
267 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr));
268 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
269 int j;
270 void *notes_section;
271 struct vmcore *new;
272 u64 offset, max_sz, sz, real_sz = 0;
273 if (phdr_ptr->p_type != PT_NOTE)
274 continue;
275 nr_ptnote++;
276 max_sz = phdr_ptr->p_memsz;
277 offset = phdr_ptr->p_offset;
278 notes_section = kmalloc(max_sz, GFP_KERNEL);
279 if (!notes_section)
280 return -ENOMEM;
281 rc = read_from_oldmem(notes_section, max_sz, &offset, 0);
282 if (rc < 0) {
283 kfree(notes_section);
284 return rc;
285 }
286 nhdr_ptr = notes_section;
287 for (j = 0; j < max_sz; j += sz) {
288 if (nhdr_ptr->n_namesz == 0)
289 break;
290 sz = sizeof(Elf64_Nhdr) +
291 ((nhdr_ptr->n_namesz + 3) & ~3) +
292 ((nhdr_ptr->n_descsz + 3) & ~3);
293 real_sz += sz;
294 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
295 }
296
297 /* Add this contiguous chunk of notes section to vmcore list.*/
298 new = get_new_element();
299 if (!new) {
300 kfree(notes_section);
301 return -ENOMEM;
302 }
303 new->paddr = phdr_ptr->p_offset;
304 new->size = real_sz;
305 list_add_tail(&new->list, vc_list);
306 phdr_sz += real_sz;
307 kfree(notes_section);
308 }
309
310 /* Prepare merged PT_NOTE program header. */
311 phdr.p_type = PT_NOTE;
312 phdr.p_flags = 0;
313 note_off = sizeof(Elf64_Ehdr) +
314 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
315 phdr.p_offset = note_off;
316 phdr.p_vaddr = phdr.p_paddr = 0;
317 phdr.p_filesz = phdr.p_memsz = phdr_sz;
318 phdr.p_align = 0;
319
320 /* Add merged PT_NOTE program header*/
321 tmp = elfptr + sizeof(Elf64_Ehdr);
322 memcpy(tmp, &phdr, sizeof(phdr));
323 tmp += sizeof(phdr);
324
325 /* Remove unwanted PT_NOTE program headers. */
326 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
327 *elfsz = *elfsz - i;
328 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
329
330 /* Modify e_phnum to reflect merged headers. */
331 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
332
333 return 0;
334 }
335
336 /* Merges all the PT_NOTE headers into one. */
merge_note_headers_elf32(char * elfptr,size_t * elfsz,struct list_head * vc_list)337 static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
338 struct list_head *vc_list)
339 {
340 int i, nr_ptnote=0, rc=0;
341 char *tmp;
342 Elf32_Ehdr *ehdr_ptr;
343 Elf32_Phdr phdr, *phdr_ptr;
344 Elf32_Nhdr *nhdr_ptr;
345 u64 phdr_sz = 0, note_off;
346
347 ehdr_ptr = (Elf32_Ehdr *)elfptr;
348 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr));
349 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
350 int j;
351 void *notes_section;
352 struct vmcore *new;
353 u64 offset, max_sz, sz, real_sz = 0;
354 if (phdr_ptr->p_type != PT_NOTE)
355 continue;
356 nr_ptnote++;
357 max_sz = phdr_ptr->p_memsz;
358 offset = phdr_ptr->p_offset;
359 notes_section = kmalloc(max_sz, GFP_KERNEL);
360 if (!notes_section)
361 return -ENOMEM;
362 rc = read_from_oldmem(notes_section, max_sz, &offset, 0);
363 if (rc < 0) {
364 kfree(notes_section);
365 return rc;
366 }
367 nhdr_ptr = notes_section;
368 for (j = 0; j < max_sz; j += sz) {
369 if (nhdr_ptr->n_namesz == 0)
370 break;
371 sz = sizeof(Elf32_Nhdr) +
372 ((nhdr_ptr->n_namesz + 3) & ~3) +
373 ((nhdr_ptr->n_descsz + 3) & ~3);
374 real_sz += sz;
375 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
376 }
377
378 /* Add this contiguous chunk of notes section to vmcore list.*/
379 new = get_new_element();
380 if (!new) {
381 kfree(notes_section);
382 return -ENOMEM;
383 }
384 new->paddr = phdr_ptr->p_offset;
385 new->size = real_sz;
386 list_add_tail(&new->list, vc_list);
387 phdr_sz += real_sz;
388 kfree(notes_section);
389 }
390
391 /* Prepare merged PT_NOTE program header. */
392 phdr.p_type = PT_NOTE;
393 phdr.p_flags = 0;
394 note_off = sizeof(Elf32_Ehdr) +
395 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
396 phdr.p_offset = note_off;
397 phdr.p_vaddr = phdr.p_paddr = 0;
398 phdr.p_filesz = phdr.p_memsz = phdr_sz;
399 phdr.p_align = 0;
400
401 /* Add merged PT_NOTE program header*/
402 tmp = elfptr + sizeof(Elf32_Ehdr);
403 memcpy(tmp, &phdr, sizeof(phdr));
404 tmp += sizeof(phdr);
405
406 /* Remove unwanted PT_NOTE program headers. */
407 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
408 *elfsz = *elfsz - i;
409 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
410
411 /* Modify e_phnum to reflect merged headers. */
412 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
413
414 return 0;
415 }
416
417 /* Add memory chunks represented by program headers to vmcore list. Also update
418 * the new offset fields of exported program headers. */
process_ptload_program_headers_elf64(char * elfptr,size_t elfsz,struct list_head * vc_list)419 static int __init process_ptload_program_headers_elf64(char *elfptr,
420 size_t elfsz,
421 struct list_head *vc_list)
422 {
423 int i;
424 Elf64_Ehdr *ehdr_ptr;
425 Elf64_Phdr *phdr_ptr;
426 loff_t vmcore_off;
427 struct vmcore *new;
428
429 ehdr_ptr = (Elf64_Ehdr *)elfptr;
430 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
431
432 /* First program header is PT_NOTE header. */
433 vmcore_off = sizeof(Elf64_Ehdr) +
434 (ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr) +
435 phdr_ptr->p_memsz; /* Note sections */
436
437 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
438 if (phdr_ptr->p_type != PT_LOAD)
439 continue;
440
441 /* Add this contiguous chunk of memory to vmcore list.*/
442 new = get_new_element();
443 if (!new)
444 return -ENOMEM;
445 new->paddr = phdr_ptr->p_offset;
446 new->size = phdr_ptr->p_memsz;
447 list_add_tail(&new->list, vc_list);
448
449 /* Update the program header offset. */
450 phdr_ptr->p_offset = vmcore_off;
451 vmcore_off = vmcore_off + phdr_ptr->p_memsz;
452 }
453 return 0;
454 }
455
process_ptload_program_headers_elf32(char * elfptr,size_t elfsz,struct list_head * vc_list)456 static int __init process_ptload_program_headers_elf32(char *elfptr,
457 size_t elfsz,
458 struct list_head *vc_list)
459 {
460 int i;
461 Elf32_Ehdr *ehdr_ptr;
462 Elf32_Phdr *phdr_ptr;
463 loff_t vmcore_off;
464 struct vmcore *new;
465
466 ehdr_ptr = (Elf32_Ehdr *)elfptr;
467 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
468
469 /* First program header is PT_NOTE header. */
470 vmcore_off = sizeof(Elf32_Ehdr) +
471 (ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr) +
472 phdr_ptr->p_memsz; /* Note sections */
473
474 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
475 if (phdr_ptr->p_type != PT_LOAD)
476 continue;
477
478 /* Add this contiguous chunk of memory to vmcore list.*/
479 new = get_new_element();
480 if (!new)
481 return -ENOMEM;
482 new->paddr = phdr_ptr->p_offset;
483 new->size = phdr_ptr->p_memsz;
484 list_add_tail(&new->list, vc_list);
485
486 /* Update the program header offset */
487 phdr_ptr->p_offset = vmcore_off;
488 vmcore_off = vmcore_off + phdr_ptr->p_memsz;
489 }
490 return 0;
491 }
492
493 /* Sets offset fields of vmcore elements. */
set_vmcore_list_offsets_elf64(char * elfptr,struct list_head * vc_list)494 static void __init set_vmcore_list_offsets_elf64(char *elfptr,
495 struct list_head *vc_list)
496 {
497 loff_t vmcore_off;
498 Elf64_Ehdr *ehdr_ptr;
499 struct vmcore *m;
500
501 ehdr_ptr = (Elf64_Ehdr *)elfptr;
502
503 /* Skip Elf header and program headers. */
504 vmcore_off = sizeof(Elf64_Ehdr) +
505 (ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr);
506
507 list_for_each_entry(m, vc_list, list) {
508 m->offset = vmcore_off;
509 vmcore_off += m->size;
510 }
511 }
512
513 /* Sets offset fields of vmcore elements. */
set_vmcore_list_offsets_elf32(char * elfptr,struct list_head * vc_list)514 static void __init set_vmcore_list_offsets_elf32(char *elfptr,
515 struct list_head *vc_list)
516 {
517 loff_t vmcore_off;
518 Elf32_Ehdr *ehdr_ptr;
519 struct vmcore *m;
520
521 ehdr_ptr = (Elf32_Ehdr *)elfptr;
522
523 /* Skip Elf header and program headers. */
524 vmcore_off = sizeof(Elf32_Ehdr) +
525 (ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr);
526
527 list_for_each_entry(m, vc_list, list) {
528 m->offset = vmcore_off;
529 vmcore_off += m->size;
530 }
531 }
532
parse_crash_elf64_headers(void)533 static int __init parse_crash_elf64_headers(void)
534 {
535 int rc=0;
536 Elf64_Ehdr ehdr;
537 u64 addr;
538
539 addr = elfcorehdr_addr;
540
541 /* Read Elf header */
542 rc = read_from_oldmem((char*)&ehdr, sizeof(Elf64_Ehdr), &addr, 0);
543 if (rc < 0)
544 return rc;
545
546 /* Do some basic Verification. */
547 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
548 (ehdr.e_type != ET_CORE) ||
549 !vmcore_elf64_check_arch(&ehdr) ||
550 ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
551 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
552 ehdr.e_version != EV_CURRENT ||
553 ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
554 ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
555 ehdr.e_phnum == 0) {
556 printk(KERN_WARNING "Warning: Core image elf header is not"
557 "sane\n");
558 return -EINVAL;
559 }
560
561 /* Read in all elf headers. */
562 elfcorebuf_sz = sizeof(Elf64_Ehdr) + ehdr.e_phnum * sizeof(Elf64_Phdr);
563 elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL);
564 if (!elfcorebuf)
565 return -ENOMEM;
566 addr = elfcorehdr_addr;
567 rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0);
568 if (rc < 0) {
569 kfree(elfcorebuf);
570 return rc;
571 }
572
573 /* Merge all PT_NOTE headers into one. */
574 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, &vmcore_list);
575 if (rc) {
576 kfree(elfcorebuf);
577 return rc;
578 }
579 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
580 &vmcore_list);
581 if (rc) {
582 kfree(elfcorebuf);
583 return rc;
584 }
585 set_vmcore_list_offsets_elf64(elfcorebuf, &vmcore_list);
586 return 0;
587 }
588
parse_crash_elf32_headers(void)589 static int __init parse_crash_elf32_headers(void)
590 {
591 int rc=0;
592 Elf32_Ehdr ehdr;
593 u64 addr;
594
595 addr = elfcorehdr_addr;
596
597 /* Read Elf header */
598 rc = read_from_oldmem((char*)&ehdr, sizeof(Elf32_Ehdr), &addr, 0);
599 if (rc < 0)
600 return rc;
601
602 /* Do some basic Verification. */
603 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
604 (ehdr.e_type != ET_CORE) ||
605 !elf_check_arch(&ehdr) ||
606 ehdr.e_ident[EI_CLASS] != ELFCLASS32||
607 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
608 ehdr.e_version != EV_CURRENT ||
609 ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
610 ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
611 ehdr.e_phnum == 0) {
612 printk(KERN_WARNING "Warning: Core image elf header is not"
613 "sane\n");
614 return -EINVAL;
615 }
616
617 /* Read in all elf headers. */
618 elfcorebuf_sz = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
619 elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL);
620 if (!elfcorebuf)
621 return -ENOMEM;
622 addr = elfcorehdr_addr;
623 rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0);
624 if (rc < 0) {
625 kfree(elfcorebuf);
626 return rc;
627 }
628
629 /* Merge all PT_NOTE headers into one. */
630 rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz, &vmcore_list);
631 if (rc) {
632 kfree(elfcorebuf);
633 return rc;
634 }
635 rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
636 &vmcore_list);
637 if (rc) {
638 kfree(elfcorebuf);
639 return rc;
640 }
641 set_vmcore_list_offsets_elf32(elfcorebuf, &vmcore_list);
642 return 0;
643 }
644
parse_crash_elf_headers(void)645 static int __init parse_crash_elf_headers(void)
646 {
647 unsigned char e_ident[EI_NIDENT];
648 u64 addr;
649 int rc=0;
650
651 addr = elfcorehdr_addr;
652 rc = read_from_oldmem(e_ident, EI_NIDENT, &addr, 0);
653 if (rc < 0)
654 return rc;
655 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
656 printk(KERN_WARNING "Warning: Core image elf header"
657 " not found\n");
658 return -EINVAL;
659 }
660
661 if (e_ident[EI_CLASS] == ELFCLASS64) {
662 rc = parse_crash_elf64_headers();
663 if (rc)
664 return rc;
665
666 /* Determine vmcore size. */
667 vmcore_size = get_vmcore_size_elf64(elfcorebuf);
668 } else if (e_ident[EI_CLASS] == ELFCLASS32) {
669 rc = parse_crash_elf32_headers();
670 if (rc)
671 return rc;
672
673 /* Determine vmcore size. */
674 vmcore_size = get_vmcore_size_elf32(elfcorebuf);
675 } else {
676 printk(KERN_WARNING "Warning: Core image elf header is not"
677 " sane\n");
678 return -EINVAL;
679 }
680 return 0;
681 }
682
683 /* Init function for vmcore module. */
vmcore_init(void)684 static int __init vmcore_init(void)
685 {
686 int rc = 0;
687
688 /* If elfcorehdr= has been passed in cmdline, then capture the dump.*/
689 if (!(is_vmcore_usable()))
690 return rc;
691 rc = parse_crash_elf_headers();
692 if (rc) {
693 printk(KERN_WARNING "Kdump: vmcore not initialized\n");
694 return rc;
695 }
696
697 proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
698 if (proc_vmcore)
699 proc_vmcore->size = vmcore_size;
700 return 0;
701 }
module_init(vmcore_init)702 module_init(vmcore_init)
703
704 /* Cleanup function for vmcore module. */
705 void vmcore_cleanup(void)
706 {
707 struct list_head *pos, *next;
708
709 if (proc_vmcore) {
710 remove_proc_entry(proc_vmcore->name, proc_vmcore->parent);
711 proc_vmcore = NULL;
712 }
713
714 /* clear the vmcore list. */
715 list_for_each_safe(pos, next, &vmcore_list) {
716 struct vmcore *m;
717
718 m = list_entry(pos, struct vmcore, list);
719 list_del(&m->list);
720 kfree(m);
721 }
722 kfree(elfcorebuf);
723 elfcorebuf = NULL;
724 }
725 EXPORT_SYMBOL_GPL(vmcore_cleanup);
726