1 /*
2  * linux/fs/binfmt_elf.c
3  *
4  * These are the functions used to load ELF format executables as used
5  * on SVr4 machines.  Information on the format may be found in the book
6  * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
7  * Tools".
8  *
9  * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
10  */
11 
12 #include <linux/module.h>
13 
14 #include <linux/fs.h>
15 #include <linux/stat.h>
16 #include <linux/sched.h>
17 #include <linux/mm.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp_lock.h>
34 #include <linux/compiler.h>
35 #include <linux/highmem.h>
36 
37 #include <asm/uaccess.h>
38 #include <asm/param.h>
39 #include <asm/pgalloc.h>
40 
41 #define DLINFO_ITEMS 13
42 
43 #include <linux/elf.h>
44 
45 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
46 static int load_elf_library(struct file*);
47 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int);
48 extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
49 extern void dump_thread(struct pt_regs *, struct user *);
50 
51 #ifndef elf_addr_t
52 #define elf_addr_t unsigned long
53 #define elf_caddr_t char *
54 #endif
55 
56 /*
57  * If we don't support core dumping, then supply a NULL so we
58  * don't even try.
59  */
60 #ifdef USE_ELF_CORE_DUMP
61 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file);
62 #else
63 #define elf_core_dump	NULL
64 #endif
65 
66 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
67 # define ELF_MIN_ALIGN	ELF_EXEC_PAGESIZE
68 #else
69 # define ELF_MIN_ALIGN	PAGE_SIZE
70 #endif
71 
72 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
73 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
74 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
75 
76 static struct linux_binfmt elf_format = {
77 	NULL, THIS_MODULE, load_elf_binary, load_elf_library, elf_core_dump, ELF_EXEC_PAGESIZE
78 };
79 
80 #define BAD_ADDR(x)	((unsigned long)(x) >= TASK_SIZE)
81 
set_brk(unsigned long start,unsigned long end)82 static int set_brk(unsigned long start, unsigned long end)
83 {
84 	start = ELF_PAGEALIGN(start);
85 	end = ELF_PAGEALIGN(end);
86 	if (end > start) {
87 		unsigned long addr;
88 		down_write(&current->mm->mmap_sem);
89 		addr = do_brk(start, end - start);
90 		up_write(&current->mm->mmap_sem);
91 		if (BAD_ADDR(addr))
92 			return addr;
93 	}
94 	current->mm->start_brk = current->mm->brk = end;
95 	return 0;
96 }
97 
98 
99 /* We need to explicitly zero any fractional pages
100    after the data section (i.e. bss).  This would
101    contain the junk from the file that should not
102    be in memory */
103 
104 
padzero(unsigned long elf_bss)105 static void padzero(unsigned long elf_bss)
106 {
107 	unsigned long nbyte;
108 
109 	nbyte = ELF_PAGEOFFSET(elf_bss);
110 	if (nbyte) {
111 		nbyte = ELF_MIN_ALIGN - nbyte;
112 		clear_user((void *) elf_bss, nbyte);
113 	}
114 }
115 
116 static elf_addr_t *
create_elf_tables(char * p,int argc,int envc,struct elfhdr * exec,unsigned long load_addr,unsigned long load_bias,unsigned long interp_load_addr,int ibcs)117 create_elf_tables(char *p, int argc, int envc,
118 		  struct elfhdr * exec,
119 		  unsigned long load_addr,
120 		  unsigned long load_bias,
121 		  unsigned long interp_load_addr, int ibcs)
122 {
123 	elf_caddr_t *argv;
124 	elf_caddr_t *envp;
125 	elf_addr_t *sp, *csp;
126 	char *k_platform, *u_platform;
127 	long hwcap;
128 	size_t platform_len = 0;
129 	size_t len;
130 
131 	/*
132 	 * Get hold of platform and hardware capabilities masks for
133 	 * the machine we are running on.  In some cases (Sparc),
134 	 * this info is impossible to get, in others (i386) it is
135 	 * merely difficult.
136 	 */
137 
138 	hwcap = ELF_HWCAP;
139 	k_platform = ELF_PLATFORM;
140 
141 	if (k_platform) {
142 		platform_len = strlen(k_platform) + 1;
143 		u_platform = p - platform_len;
144 		__copy_to_user(u_platform, k_platform, platform_len);
145 	} else
146 		u_platform = p;
147 
148 #if defined(__i386__) && defined(CONFIG_SMP)
149 	/*
150 	 * In some cases (e.g. Hyper-Threading), we want to avoid L1 evictions
151 	 * by the processes running on the same package. One thing we can do
152 	 * is to shuffle the initial stack for them.
153 	 *
154 	 * The conditionals here are unneeded, but kept in to make the
155 	 * code behaviour the same as pre change unless we have hyperthreaded
156 	 * processors. This keeps Mr Marcelo Person happier but should be
157 	 * removed for 2.5
158 	 */
159 
160 	if(smp_num_siblings > 1)
161 		u_platform = u_platform - ((current->pid % 64) << 7);
162 #endif
163 
164 	/*
165 	 * Force 16 byte _final_ alignment here for generality.
166 	 */
167 	sp = (elf_addr_t *)(~15UL & (unsigned long)(u_platform));
168 	csp = sp;
169 	csp -= (1+DLINFO_ITEMS)*2 + (k_platform ? 2 : 0);
170 #ifdef DLINFO_ARCH_ITEMS
171 	csp -= DLINFO_ARCH_ITEMS*2;
172 #endif
173 	csp -= envc+1;
174 	csp -= argc+1;
175 	csp -= (!ibcs ? 3 : 1);	/* argc itself */
176 	if ((unsigned long)csp & 15UL)
177 		sp -= ((unsigned long)csp & 15UL) / sizeof(*sp);
178 
179 	/*
180 	 * Put the ELF interpreter info on the stack
181 	 */
182 #define NEW_AUX_ENT(nr, id, val) \
183 	  __put_user ((id), sp+(nr*2)); \
184 	  __put_user ((val), sp+(nr*2+1)); \
185 
186 	sp -= 2;
187 	NEW_AUX_ENT(0, AT_NULL, 0);
188 	if (k_platform) {
189 		sp -= 2;
190 		NEW_AUX_ENT(0, AT_PLATFORM, (elf_addr_t)(unsigned long) u_platform);
191 	}
192 	sp -= DLINFO_ITEMS*2;
193 	NEW_AUX_ENT( 0, AT_HWCAP, hwcap);
194 	NEW_AUX_ENT( 1, AT_PAGESZ, ELF_EXEC_PAGESIZE);
195 	NEW_AUX_ENT( 2, AT_CLKTCK, CLOCKS_PER_SEC);
196 	NEW_AUX_ENT( 3, AT_PHDR, load_addr + exec->e_phoff);
197 	NEW_AUX_ENT( 4, AT_PHENT, sizeof (struct elf_phdr));
198 	NEW_AUX_ENT( 5, AT_PHNUM, exec->e_phnum);
199 	NEW_AUX_ENT( 6, AT_BASE, interp_load_addr);
200 	NEW_AUX_ENT( 7, AT_FLAGS, 0);
201 	NEW_AUX_ENT( 8, AT_ENTRY, load_bias + exec->e_entry);
202 	NEW_AUX_ENT( 9, AT_UID, (elf_addr_t) current->uid);
203 	NEW_AUX_ENT(10, AT_EUID, (elf_addr_t) current->euid);
204 	NEW_AUX_ENT(11, AT_GID, (elf_addr_t) current->gid);
205 	NEW_AUX_ENT(12, AT_EGID, (elf_addr_t) current->egid);
206 #ifdef ARCH_DLINFO
207 	/*
208 	 * ARCH_DLINFO must come last so platform specific code can enforce
209 	 * special alignment requirements on the AUXV if necessary (eg. PPC).
210 	 */
211 	ARCH_DLINFO;
212 #endif
213 #undef NEW_AUX_ENT
214 
215 	sp -= envc+1;
216 	envp = (elf_caddr_t *) sp;
217 	sp -= argc+1;
218 	argv = (elf_caddr_t *) sp;
219 	if (!ibcs) {
220 		__put_user((elf_addr_t)(unsigned long) envp,--sp);
221 		__put_user((elf_addr_t)(unsigned long) argv,--sp);
222 	}
223 
224 	__put_user((elf_addr_t)argc,--sp);
225 	current->mm->arg_start = current->mm->arg_end = (unsigned long) p;
226 	while (argc-->0) {
227 		__put_user((elf_caddr_t)(unsigned long)p,argv++);
228 		len = strnlen_user(p, PAGE_SIZE*MAX_ARG_PAGES);
229 		if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
230 			return NULL;
231 		p += len;
232 	}
233 	__put_user(NULL, argv);
234 	current->mm->arg_end = current->mm->env_start = (unsigned long) p;
235 	while (envc-->0) {
236 		__put_user((elf_caddr_t)(unsigned long)p,envp++);
237 		len = strnlen_user(p, PAGE_SIZE*MAX_ARG_PAGES);
238 		if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
239 			return NULL;
240 		p += len;
241 	}
242 	__put_user(NULL, envp);
243 	current->mm->env_end = (unsigned long) p;
244 	return sp;
245 }
246 
247 #ifndef elf_map
248 
249 static inline unsigned long
elf_map(struct file * filep,unsigned long addr,struct elf_phdr * eppnt,int prot,int type)250 elf_map (struct file *filep, unsigned long addr, struct elf_phdr *eppnt, int prot, int type)
251 {
252 	unsigned long map_addr;
253 
254 	down_write(&current->mm->mmap_sem);
255 	map_addr = do_mmap(filep, ELF_PAGESTART(addr),
256 			   eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr), prot, type,
257 			   eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr));
258 	up_write(&current->mm->mmap_sem);
259 	return(map_addr);
260 }
261 
262 #endif /* !elf_map */
263 
264 /* This is much more generalized than the library routine read function,
265    so we keep this separate.  Technically the library read function
266    is only provided so that we can read a.out libraries that have
267    an ELF header */
268 
load_elf_interp(struct elfhdr * interp_elf_ex,struct file * interpreter,unsigned long * interp_load_addr)269 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
270 				     struct file * interpreter,
271 				     unsigned long *interp_load_addr)
272 {
273 	struct elf_phdr *elf_phdata;
274 	struct elf_phdr *eppnt;
275 	unsigned long load_addr = 0;
276 	int load_addr_set = 0;
277 	unsigned long last_bss = 0, elf_bss = 0;
278 	unsigned long error = ~0UL;
279 	int retval, i, size;
280 
281 	/* First of all, some simple consistency checks */
282 	if (interp_elf_ex->e_type != ET_EXEC &&
283 	    interp_elf_ex->e_type != ET_DYN)
284 		goto out;
285 	if (!elf_check_arch(interp_elf_ex))
286 		goto out;
287 	if (!interpreter->f_op || !interpreter->f_op->mmap)
288 		goto out;
289 
290 	/*
291 	 * If the size of this structure has changed, then punt, since
292 	 * we will be doing the wrong thing.
293 	 */
294 	if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
295 		goto out;
296 
297 	if (interp_elf_ex->e_phnum < 1 ||
298 	    interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
299 		goto out;
300 
301 	/* Now read in all of the header information */
302 
303 	size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
304 	if (size > ELF_MIN_ALIGN)
305 		goto out;
306 	elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
307 	if (!elf_phdata)
308 		goto out;
309 
310 	retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size);
311 	error = -EIO;
312 	if (retval != size) {
313 		if (retval < 0)
314 			error = retval;
315 		goto out_close;
316 	}
317 
318 	eppnt = elf_phdata;
319 	for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
320 	  if (eppnt->p_type == PT_LOAD) {
321 	    int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
322 	    int elf_prot = 0;
323 	    unsigned long vaddr = 0;
324 	    unsigned long k, map_addr;
325 
326 	    if (eppnt->p_flags & PF_R) elf_prot =  PROT_READ;
327 	    if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
328 	    if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
329 	    vaddr = eppnt->p_vaddr;
330 	    if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
331 	    	elf_type |= MAP_FIXED;
332 
333 	    map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type);
334 	    if (BAD_ADDR(map_addr))
335 	    	goto out_close;
336 
337 	    if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
338 		load_addr = map_addr - ELF_PAGESTART(vaddr);
339 		load_addr_set = 1;
340 	    }
341 
342 	    /*
343 	     * Check to see if the section's size will overflow the
344 	     * allowed task size. Note that p_filesz must always be
345 	     * <= p_memsize so it is only necessary to check p_memsz.
346 	     */
347 	    k = load_addr + eppnt->p_vaddr;
348 	    if (BAD_ADDR(k) || eppnt->p_filesz > eppnt->p_memsz ||
349 		eppnt->p_memsz > TASK_SIZE || TASK_SIZE - eppnt->p_memsz < k) {
350 	        error = -ENOMEM;
351 		goto out_close;
352 	    }
353 
354 	    /*
355 	     * Find the end of the file mapping for this phdr, and keep
356 	     * track of the largest address we see for this.
357 	     */
358 	    k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
359 	    if (k > elf_bss)
360 		elf_bss = k;
361 
362 	    /*
363 	     * Do the same thing for the memory mapping - between
364 	     * elf_bss and last_bss is the bss section.
365 	     */
366 	    k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
367 	    if (k > last_bss)
368 		last_bss = k;
369 	  }
370 	}
371 
372 	/* Now use mmap to map the library into memory. */
373 
374 	/*
375 	 * Now fill out the bss section.  First pad the last page up
376 	 * to the page boundary, and then perform a mmap to make sure
377 	 * that there are zero-mapped pages up to and including the
378 	 * last bss page.
379 	 */
380 	padzero(elf_bss);
381 	elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);	/* What we have mapped so far */
382 
383 	/* Map the last of the bss segment */
384 	if (last_bss > elf_bss) {
385 		down_write(&current->mm->mmap_sem);
386 		error = do_brk(elf_bss, last_bss - elf_bss);
387 		up_write(&current->mm->mmap_sem);
388 		if (BAD_ADDR(error))
389 			goto out_close;
390 	}
391 
392 	*interp_load_addr = load_addr;
393 	/*
394 	 * XXX: is everything deallocated properly if this happens
395 	 * to be ~0UL (that is, we succeeded, but the header is broken
396 	 * and thus the caller will think that we failed)? We'd better
397 	 * switch to out-of-band error reporting.
398 	 */
399 	error = ((unsigned long) interp_elf_ex->e_entry) + load_addr;
400 
401 out_close:
402 	kfree(elf_phdata);
403 out:
404 	return error;
405 }
406 
load_aout_interp(struct exec * interp_ex,struct file * interpreter)407 static unsigned long load_aout_interp(struct exec * interp_ex,
408 			     struct file * interpreter)
409 {
410 	unsigned long text_data, elf_entry = ~0UL;
411 	char * addr;
412 	loff_t offset;
413 
414 	current->mm->end_code = interp_ex->a_text;
415 	text_data = interp_ex->a_text + interp_ex->a_data;
416 	current->mm->end_data = text_data;
417 	current->mm->brk = interp_ex->a_bss + text_data;
418 
419 	switch (N_MAGIC(*interp_ex)) {
420 	case OMAGIC:
421 		offset = 32;
422 		addr = (char *) 0;
423 		break;
424 	case ZMAGIC:
425 	case QMAGIC:
426 		offset = N_TXTOFF(*interp_ex);
427 		addr = (char *) N_TXTADDR(*interp_ex);
428 		break;
429 	default:
430 		goto out;
431 	}
432 
433 	down_write(&current->mm->mmap_sem);
434 	do_brk(0, text_data);
435 	up_write(&current->mm->mmap_sem);
436 
437 	if (!interpreter->f_op || !interpreter->f_op->read)
438 		goto out;
439 	if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
440 		goto out;
441 	flush_icache_range((unsigned long)addr,
442 	                   (unsigned long)addr + text_data);
443 
444 	down_write(&current->mm->mmap_sem);
445 	do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
446 		interp_ex->a_bss);
447 	up_write(&current->mm->mmap_sem);
448 
449 	elf_entry = interp_ex->a_entry;
450 
451 out:
452 	return elf_entry;
453 }
454 
455 /*
456  * These are the functions used to load ELF style executables and shared
457  * libraries.  There is no binary dependent code anywhere else.
458  */
459 
460 #define INTERPRETER_NONE 0
461 #define INTERPRETER_AOUT 1
462 #define INTERPRETER_ELF 2
463 
464 
load_elf_binary(struct linux_binprm * bprm,struct pt_regs * regs)465 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
466 {
467 	struct file *interpreter = NULL; /* to shut gcc up */
468  	unsigned long load_addr = 0, load_bias = 0;
469 	int load_addr_set = 0;
470 	char * elf_interpreter = NULL;
471 	unsigned int interpreter_type = INTERPRETER_NONE;
472 	unsigned char ibcs2_interpreter = 0;
473 	unsigned long error;
474 	struct elf_phdr * elf_ppnt, *elf_phdata;
475 	unsigned long elf_bss, k, elf_brk;
476 	int elf_exec_fileno;
477 	int retval, i;
478 	unsigned int size;
479 	unsigned long elf_entry, interp_load_addr = 0;
480 	unsigned long start_code, end_code, start_data, end_data;
481 	unsigned long reloc_func_desc = 0;
482 	struct elfhdr elf_ex;
483 	struct elfhdr interp_elf_ex;
484   	struct exec interp_ex;
485 	char passed_fileno[6];
486 	struct files_struct *files;
487 
488 	/* Get the exec-header */
489 	elf_ex = *((struct elfhdr *) bprm->buf);
490 
491 	retval = -ENOEXEC;
492 	/* First of all, some simple consistency checks */
493 	if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
494 		goto out;
495 
496 	if (elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN)
497 		goto out;
498 	if (!elf_check_arch(&elf_ex))
499 		goto out;
500 	if (!bprm->file->f_op||!bprm->file->f_op->mmap)
501 		goto out;
502 
503 	/* Now read in all of the header information */
504 
505 	if (elf_ex.e_phentsize != sizeof(struct elf_phdr))
506 		goto out;
507 	if (elf_ex.e_phnum < 1 ||
508 	    elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
509 		goto out;
510 	size = elf_ex.e_phnum * sizeof(struct elf_phdr);
511 	retval = -ENOMEM;
512 	elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
513 	if (!elf_phdata)
514 		goto out;
515 
516 	retval = kernel_read(bprm->file, elf_ex.e_phoff, (char *) elf_phdata, size);
517 	if (retval != size) {
518 		if (retval >= 0)
519 			retval = -EIO;
520 		goto out_free_ph;
521 	}
522 
523 	files = current->files;		/* Refcounted so ok */
524 	retval = unshare_files();
525 	if (retval < 0)
526 		goto out_free_ph;
527 	if (files == current->files) {
528 		put_files_struct(files);
529 		files = NULL;
530 	}
531 
532 	/* exec will make our files private anyway, but for the a.out
533 	   loader stuff we need to do it earlier */
534 
535 	retval = get_unused_fd();
536 	if (retval < 0)
537 		goto out_free_fh;
538 	get_file(bprm->file);
539 	fd_install(elf_exec_fileno = retval, bprm->file);
540 
541 	elf_ppnt = elf_phdata;
542 	elf_bss = 0;
543 	elf_brk = 0;
544 
545 	start_code = ~0UL;
546 	end_code = 0;
547 	start_data = 0;
548 	end_data = 0;
549 
550 	for (i = 0; i < elf_ex.e_phnum; i++) {
551 		if (elf_ppnt->p_type == PT_INTERP) {
552 			/* This is the program interpreter used for
553 			 * shared libraries - for now assume that this
554 			 * is an a.out format binary
555 			 */
556 
557 			retval = -ENOEXEC;
558 			if (elf_ppnt->p_filesz > PATH_MAX ||
559 			    elf_ppnt->p_filesz < 2)
560 				goto out_free_file;
561 
562 			retval = -ENOMEM;
563 			elf_interpreter = (char *) kmalloc(elf_ppnt->p_filesz,
564 							   GFP_KERNEL);
565 			if (!elf_interpreter)
566 				goto out_free_file;
567 
568 			retval = kernel_read(bprm->file, elf_ppnt->p_offset,
569 					   elf_interpreter,
570 					   elf_ppnt->p_filesz);
571 			if (retval != elf_ppnt->p_filesz) {
572 				if (retval >= 0)
573 					retval = -EIO;
574 				goto out_free_interp;
575 			}
576 			/* make sure path is NULL terminated */
577 			retval = -ENOEXEC;
578 			if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
579 				goto out_free_interp;
580 
581 			/* If the program interpreter is one of these two,
582 			 * then assume an iBCS2 image. Otherwise assume
583 			 * a native linux image.
584 			 */
585 			if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
586 			    strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
587 				ibcs2_interpreter = 1;
588 #if 0
589 			printk("Using ELF interpreter %s\n", elf_interpreter);
590 #endif
591 
592 			SET_PERSONALITY(elf_ex, ibcs2_interpreter);
593 
594 			interpreter = open_exec(elf_interpreter);
595 			retval = PTR_ERR(interpreter);
596 			if (IS_ERR(interpreter))
597 				goto out_free_interp;
598 			retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE);
599 			if (retval != BINPRM_BUF_SIZE) {
600 				if (retval >= 0)
601 					retval = -EIO;
602 				goto out_free_dentry;
603 			}
604 
605 			/* Get the exec headers */
606 			interp_ex = *((struct exec *) bprm->buf);
607 			interp_elf_ex = *((struct elfhdr *) bprm->buf);
608 			break;
609 		}
610 		elf_ppnt++;
611 	}
612 
613 	/* Some simple consistency checks for the interpreter */
614 	if (elf_interpreter) {
615 		interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
616 
617 		/* Now figure out which format our binary is */
618 		if ((N_MAGIC(interp_ex) != OMAGIC) &&
619 		    (N_MAGIC(interp_ex) != ZMAGIC) &&
620 		    (N_MAGIC(interp_ex) != QMAGIC))
621 			interpreter_type = INTERPRETER_ELF;
622 
623 		if (memcmp(interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
624 			interpreter_type &= ~INTERPRETER_ELF;
625 
626 		retval = -ELIBBAD;
627 		if (!interpreter_type)
628 			goto out_free_dentry;
629 
630 		/* Make sure only one type was selected */
631 		if ((interpreter_type & INTERPRETER_ELF) &&
632 		     interpreter_type != INTERPRETER_ELF) {
633 	     		// FIXME - ratelimit this before re-enabling
634 			// printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
635 			interpreter_type = INTERPRETER_ELF;
636 		}
637 		/* Verify the interpreter has a valid arch */
638 		if ((interpreter_type == INTERPRETER_ELF) &&
639 		    !elf_check_arch(&interp_elf_ex))
640 			goto out_free_dentry;
641 	} else {
642 		/* Executables without an interpreter also need a personality  */
643 		SET_PERSONALITY(elf_ex, ibcs2_interpreter);
644 	}
645 
646 	if (BAD_ADDR(elf_ex.e_entry)) {
647 		retval = -ENOEXEC;
648 		goto out_free_dentry;
649 	}
650 
651 	/* OK, we are done with that, now set up the arg stuff,
652 	   and then start this sucker up */
653 
654 	if (!bprm->sh_bang) {
655 		char * passed_p;
656 
657 		if (interpreter_type == INTERPRETER_AOUT) {
658 		  sprintf(passed_fileno, "%d", elf_exec_fileno);
659 		  passed_p = passed_fileno;
660 
661 		  if (elf_interpreter) {
662 		    retval = copy_strings_kernel(1,&passed_p,bprm);
663 			if (retval)
664 				goto out_free_dentry;
665 		    bprm->argc++;
666 		  }
667 		}
668 	}
669 
670 	/* Flush all traces of the currently running executable */
671 	retval = flush_old_exec(bprm);
672 	if (retval)
673 		goto out_free_dentry;
674 
675 	/* Discard our unneeded old files struct */
676 	if (files) {
677 		steal_locks(files);
678 		put_files_struct(files);
679 		files = NULL;
680 	}
681 
682 	/* OK, This is the point of no return */
683 	current->mm->start_data = 0;
684 	current->mm->end_data = 0;
685 	current->mm->end_code = 0;
686 	current->mm->mmap = NULL;
687 	current->flags &= ~PF_FORKNOEXEC;
688 	elf_entry = (unsigned long) elf_ex.e_entry;
689 
690 	/* Do this so that we can load the interpreter, if need be.  We will
691 	   change some of these later */
692 	current->mm->rss = 0;
693 	retval = setup_arg_pages(bprm);
694 	if (retval < 0) {
695 		send_sig(SIGKILL, current, 0);
696 		return retval;
697 	}
698 
699 	current->mm->start_stack = bprm->p;
700 
701 	/* Now we do a little grungy work by mmaping the ELF image into
702 	   the correct location in memory.  At this point, we assume that
703 	   the image should be loaded at fixed address, not at a variable
704 	   address. */
705 
706 	for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
707 		int elf_prot = 0, elf_flags;
708 		unsigned long vaddr;
709 
710 		if (elf_ppnt->p_type != PT_LOAD)
711 			continue;
712 
713 		if (unlikely (elf_brk > elf_bss)) {
714 			unsigned long nbyte;
715 
716 			/* There was a PT_LOAD segment with p_memsz > p_filesz
717 			   before this one. Map anonymous pages, if needed,
718 			   and clear the area.  */
719 			retval = set_brk (elf_bss + load_bias,
720 					  elf_brk + load_bias);
721 			if (retval) {
722 				send_sig(SIGKILL, current, 0);
723 				goto out_free_dentry;
724 			}
725 			nbyte = ELF_PAGEOFFSET(elf_bss);
726 			if (nbyte) {
727 				nbyte = ELF_MIN_ALIGN - nbyte;
728 				if (nbyte > elf_brk - elf_bss)
729 					nbyte = elf_brk - elf_bss;
730 				clear_user((void *) elf_bss + load_bias, nbyte);
731 			}
732 		}
733 
734 		if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
735 		if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
736 		if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
737 
738 		elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;
739 
740 		vaddr = elf_ppnt->p_vaddr;
741 		if (elf_ex.e_type == ET_EXEC || load_addr_set) {
742 			elf_flags |= MAP_FIXED;
743 		} else if (elf_ex.e_type == ET_DYN) {
744 			/* Try and get dynamic programs out of the way of the default mmap
745 			   base, as well as whatever program they might try to exec.  This
746 		           is because the brk will follow the loader, and is not movable.  */
747 			load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
748 		}
749 
750 		error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags);
751 		if (BAD_ADDR(error)) {
752 			send_sig(SIGKILL, current, 0);
753 			goto out_free_dentry;
754 		}
755 
756 		if (!load_addr_set) {
757 			load_addr_set = 1;
758 			load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
759 			if (elf_ex.e_type == ET_DYN) {
760 				load_bias += error -
761 				             ELF_PAGESTART(load_bias + vaddr);
762 				load_addr += load_bias;
763 				reloc_func_desc = load_addr;
764 			}
765 		}
766 		k = elf_ppnt->p_vaddr;
767 		if (k < start_code) start_code = k;
768 		if (start_data < k) start_data = k;
769 
770 		/*
771 		 * Check to see if the section's size will overflow the
772 		 * allowed task size. Note that p_filesz must always be
773 		 * <= p_memsz so it is only necessary to check p_memsz.
774 		 */
775 		if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
776 		    elf_ppnt->p_memsz > TASK_SIZE ||
777 		    TASK_SIZE - elf_ppnt->p_memsz < k) {
778 			/* set_brk can never work.  Avoid overflows.  */
779 			send_sig(SIGKILL, current, 0);
780 			goto out_free_dentry;
781 		}
782 
783 		k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
784 
785 		if (k > elf_bss)
786 			elf_bss = k;
787 		if ((elf_ppnt->p_flags & PF_X) && end_code <  k)
788 			end_code = k;
789 		if (end_data < k)
790 			end_data = k;
791 		k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
792 		if (k > elf_brk)
793 			elf_brk = k;
794 	}
795 
796 	elf_entry += load_bias;
797 	elf_bss += load_bias;
798 	elf_brk += load_bias;
799 	start_code += load_bias;
800 	end_code += load_bias;
801 	start_data += load_bias;
802 	end_data += load_bias;
803 
804 	/* Calling set_brk effectively mmaps the pages that we need
805 	 * for the bss and break sections.  We must do this before
806 	 * mapping in the interpreter, to make sure it doesn't wind
807 	 * up getting placed where the bss needs to go.
808 	 */
809 	retval = set_brk(elf_bss, elf_brk);
810 	if (retval) {
811 		send_sig(SIGKILL, current, 0);
812 		goto out_free_dentry;
813 	}
814 	padzero(elf_bss);
815 
816 	if (elf_interpreter) {
817 		if (interpreter_type == INTERPRETER_AOUT)
818 			elf_entry = load_aout_interp(&interp_ex,
819 						     interpreter);
820 		else
821 			elf_entry = load_elf_interp(&interp_elf_ex,
822 						    interpreter,
823 						    &interp_load_addr);
824 		if (BAD_ADDR(elf_entry)) {
825 	     		// FIXME - ratelimit this before re-enabling
826 			// printk(KERN_ERR "Unable to load interpreter %.128s\n",
827 			//        elf_interpreter);
828 
829 			force_sig(SIGSEGV, current);
830 			retval = IS_ERR((void *)elf_entry) ?
831 					(int)elf_entry : -EINVAL;
832 			goto out_free_dentry;
833 		}
834 		reloc_func_desc = interp_load_addr;
835 
836 		allow_write_access(interpreter);
837 		fput(interpreter);
838 		kfree(elf_interpreter);
839 	} else {
840 		if (BAD_ADDR(elf_entry)) {
841 			force_sig(SIGSEGV, current);
842 			retval = -EINVAL;
843 			goto out_free_dentry;
844 		}
845 	}
846 
847 	kfree(elf_phdata);
848 
849 	if (interpreter_type != INTERPRETER_AOUT)
850 		sys_close(elf_exec_fileno);
851 
852 	set_binfmt(&elf_format);
853 
854 	compute_creds(bprm);
855 	current->flags &= ~PF_FORKNOEXEC;
856 	bprm->p = (unsigned long)
857 	  create_elf_tables((char *)bprm->p,
858 			bprm->argc,
859 			bprm->envc,
860 			&elf_ex,
861 			load_addr, load_bias,
862 			interp_load_addr,
863 			(interpreter_type == INTERPRETER_AOUT ? 0 : 1));
864 	/* N.B. passed_fileno might not be initialized? */
865 	if (interpreter_type == INTERPRETER_AOUT)
866 		current->mm->arg_start += strlen(passed_fileno) + 1;
867 	current->mm->start_brk = current->mm->brk = elf_brk;
868 	current->mm->end_code = end_code;
869 	current->mm->start_code = start_code;
870 	current->mm->start_data = start_data;
871 	current->mm->end_data = end_data;
872 	current->mm->start_stack = bprm->p;
873 
874 #if 0
875 	printk("(start_brk) %lx\n" , (long) current->mm->start_brk);
876 	printk("(end_code) %lx\n" , (long) current->mm->end_code);
877 	printk("(start_code) %lx\n" , (long) current->mm->start_code);
878 	printk("(start_data) %lx\n" , (long) current->mm->start_data);
879 	printk("(end_data) %lx\n" , (long) current->mm->end_data);
880 	printk("(start_stack) %lx\n" , (long) current->mm->start_stack);
881 	printk("(brk) %lx\n" , (long) current->mm->brk);
882 #endif
883 
884 	if (current->personality & MMAP_PAGE_ZERO) {
885 		/* Why this, you ask???  Well SVr4 maps page 0 as read-only,
886 		   and some applications "depend" upon this behavior.
887 		   Since we do not have the power to recompile these, we
888 		   emulate the SVr4 behavior.  Sigh.  */
889 		/* N.B. Shouldn't the size here be PAGE_SIZE?? */
890 		down_write(&current->mm->mmap_sem);
891 		error = do_mmap(NULL, 0, 4096, PROT_READ | PROT_EXEC,
892 				MAP_FIXED | MAP_PRIVATE, 0);
893 		up_write(&current->mm->mmap_sem);
894 	}
895 
896 #ifdef ELF_PLAT_INIT
897 	/*
898 	 * The ABI may specify that certain registers be set up in special
899 	 * ways (on i386 %edx is the address of a DT_FINI function, for
900 	 * example.  In addition, it may also specify (eg, PowerPC64 ELF)
901 	 * that the e_entry field is the address of the function descriptor
902 	 * for the startup routine, rather than the address of the startup
903 	 * routine itself.  This macro performs whatever initialization to
904 	 * the regs structure is required as well as any relocations to the
905 	 * function descriptor entries when executing dynamically linked apps.
906 	 */
907 	ELF_PLAT_INIT(regs, reloc_func_desc);
908 #endif
909 
910 	start_thread(regs, elf_entry, bprm->p);
911 	if (current->ptrace & PT_PTRACED)
912 		send_sig(SIGTRAP, current, 0);
913 	retval = 0;
914 out:
915 	return retval;
916 
917 	/* error cleanup */
918 out_free_dentry:
919 	allow_write_access(interpreter);
920 	if (interpreter)
921 		fput(interpreter);
922 out_free_interp:
923 	if (elf_interpreter)
924 		kfree(elf_interpreter);
925 out_free_file:
926 	sys_close(elf_exec_fileno);
927 out_free_fh:
928 	if (files) {
929 		put_files_struct(current->files);
930 		current->files = files;
931 	}
932 out_free_ph:
933 	kfree(elf_phdata);
934 	goto out;
935 }
936 
937 /* This is really simpleminded and specialized - we are loading an
938    a.out library that is given an ELF header. */
939 
load_elf_library(struct file * file)940 static int load_elf_library(struct file *file)
941 {
942 	struct elf_phdr *elf_phdata;
943 	struct elf_phdr *eppnt;
944 	unsigned long elf_bss, bss, len;
945 	int retval, error, i, j;
946 	struct elfhdr elf_ex;
947 
948 	error = -ENOEXEC;
949 	retval = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
950 	if (retval != sizeof(elf_ex))
951 		goto out;
952 
953 	if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
954 		goto out;
955 
956 	/* First of all, some simple consistency checks */
957 	if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
958 	   !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
959 		goto out;
960 
961 	/* Now read in all of the header information */
962 
963 	j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
964 	/* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
965 
966 	error = -ENOMEM;
967 	elf_phdata = kmalloc(j, GFP_KERNEL);
968 	if (!elf_phdata)
969 		goto out;
970 
971 	eppnt = elf_phdata;
972 	error = -ENOEXEC;
973 	retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j);
974 	if (retval != j)
975 		goto out_free_ph;
976 
977 	for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
978 		if ((eppnt + i)->p_type == PT_LOAD) j++;
979 	if (j != 1)
980 		goto out_free_ph;
981 
982 	while (eppnt->p_type != PT_LOAD)
983 		eppnt++;
984 
985 	/* Now use mmap to map the library into memory. */
986 	down_write(&current->mm->mmap_sem);
987 	error = do_mmap(file,
988 			ELF_PAGESTART(eppnt->p_vaddr),
989 			(eppnt->p_filesz +
990 			 ELF_PAGEOFFSET(eppnt->p_vaddr)),
991 			PROT_READ | PROT_WRITE | PROT_EXEC,
992 			MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
993 			(eppnt->p_offset -
994 			 ELF_PAGEOFFSET(eppnt->p_vaddr)));
995 	up_write(&current->mm->mmap_sem);
996 	if (error != ELF_PAGESTART(eppnt->p_vaddr))
997 		goto out_free_ph;
998 
999 	elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1000 	padzero(elf_bss);
1001 
1002 	len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr + ELF_MIN_ALIGN - 1);
1003 	bss = eppnt->p_memsz + eppnt->p_vaddr;
1004 	if (bss > len) {
1005 		down_write(&current->mm->mmap_sem);
1006 		do_brk(len, bss - len);
1007 		up_write(&current->mm->mmap_sem);
1008 	}
1009 	error = 0;
1010 
1011 out_free_ph:
1012 	kfree(elf_phdata);
1013 out:
1014 	return error;
1015 }
1016 
1017 /*
1018  * Note that some platforms still use traditional core dumps and not
1019  * the ELF core dump.  Each platform can select it as appropriate.
1020  */
1021 #ifdef USE_ELF_CORE_DUMP
1022 
1023 /*
1024  * ELF core dumper
1025  *
1026  * Modelled on fs/exec.c:aout_core_dump()
1027  * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1028  */
1029 /*
1030  * These are the only things you should do on a core-file: use only these
1031  * functions to write out all the necessary info.
1032  */
dump_write(struct file * file,const void * addr,int nr)1033 static int dump_write(struct file *file, const void *addr, int nr)
1034 {
1035 	return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
1036 }
1037 
dump_seek(struct file * file,off_t off)1038 static int dump_seek(struct file *file, off_t off)
1039 {
1040 	if (file->f_op->llseek) {
1041 		if (file->f_op->llseek(file, off, 0) != off)
1042 			return 0;
1043 	} else
1044 		file->f_pos = off;
1045 	return 1;
1046 }
1047 
1048 /*
1049  * Decide whether a segment is worth dumping; default is yes to be
1050  * sure (missing info is worse than too much; etc).
1051  * Personally I'd include everything, and use the coredump limit...
1052  *
1053  * I think we should skip something. But I am not sure how. H.J.
1054  */
maydump(struct vm_area_struct * vma)1055 static inline int maydump(struct vm_area_struct *vma)
1056 {
1057 	/*
1058 	 * If we may not read the contents, don't allow us to dump
1059 	 * them either. "dump_write()" can't handle it anyway.
1060 	 */
1061 	if (!(vma->vm_flags & VM_READ))
1062 		return 0;
1063 
1064 	/* Do not dump I/O mapped devices! -DaveM */
1065 	if (vma->vm_flags & VM_IO)
1066 		return 0;
1067 #if 1
1068 	if (vma->vm_flags & (VM_WRITE|VM_GROWSUP|VM_GROWSDOWN))
1069 		return 1;
1070 	if (vma->vm_flags & (VM_READ|VM_EXEC|VM_EXECUTABLE|VM_SHARED))
1071 		return 0;
1072 #endif
1073 	return 1;
1074 }
1075 
1076 #define roundup(x, y)  ((((x)+((y)-1))/(y))*(y))
1077 
1078 /* An ELF note in memory */
1079 struct memelfnote
1080 {
1081 	const char *name;
1082 	int type;
1083 	unsigned int datasz;
1084 	void *data;
1085 };
1086 
notesize(struct memelfnote * en)1087 static int notesize(struct memelfnote *en)
1088 {
1089 	int sz;
1090 
1091 	sz = sizeof(struct elf_note);
1092 	sz += roundup(strlen(en->name), 4);
1093 	sz += roundup(en->datasz, 4);
1094 
1095 	return sz;
1096 }
1097 
1098 /* #define DEBUG */
1099 
1100 #ifdef DEBUG
dump_regs(const char * str,elf_greg_t * r)1101 static void dump_regs(const char *str, elf_greg_t *r)
1102 {
1103 	int i;
1104 	static const char *regs[] = { "ebx", "ecx", "edx", "esi", "edi", "ebp",
1105 					      "eax", "ds", "es", "fs", "gs",
1106 					      "orig_eax", "eip", "cs",
1107 					      "efl", "uesp", "ss"};
1108 	printk("Registers: %s\n", str);
1109 
1110 	for(i = 0; i < ELF_NGREG; i++)
1111 	{
1112 		unsigned long val = r[i];
1113 		printk("   %-2d %-5s=%08lx %lu\n", i, regs[i], val, val);
1114 	}
1115 }
1116 #endif
1117 
1118 #define DUMP_WRITE(addr, nr)	\
1119 	do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
1120 #define DUMP_SEEK(off)	\
1121 	do { if (!dump_seek(file, (off))) return 0; } while(0)
1122 
writenote(struct memelfnote * men,struct file * file)1123 static int writenote(struct memelfnote *men, struct file *file)
1124 {
1125 	struct elf_note en;
1126 
1127 	en.n_namesz = strlen(men->name);
1128 	en.n_descsz = men->datasz;
1129 	en.n_type = men->type;
1130 
1131 	DUMP_WRITE(&en, sizeof(en));
1132 	DUMP_WRITE(men->name, en.n_namesz);
1133 	/* XXX - cast from long long to long to avoid need for libgcc.a */
1134 	DUMP_SEEK(roundup((unsigned long)file->f_pos, 4));	/* XXX */
1135 	DUMP_WRITE(men->data, men->datasz);
1136 	DUMP_SEEK(roundup((unsigned long)file->f_pos, 4));	/* XXX */
1137 
1138 	return 1;
1139 }
1140 #undef DUMP_WRITE
1141 #undef DUMP_SEEK
1142 
1143 #define DUMP_WRITE(addr, nr)	\
1144 	if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1145 		goto end_coredump;
1146 #define DUMP_SEEK(off)	\
1147 	if (!dump_seek(file, (off))) \
1148 		goto end_coredump;
1149 /*
1150  * Actual dumper
1151  *
1152  * This is a two-pass process; first we find the offsets of the bits,
1153  * and then they are actually written out.  If we run out of core limit
1154  * we just truncate.
1155  */
elf_core_dump(long signr,struct pt_regs * regs,struct file * file)1156 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
1157 {
1158 	int has_dumped = 0;
1159 	mm_segment_t fs;
1160 	int segs;
1161 	size_t size = 0;
1162 	int i;
1163 	struct vm_area_struct *vma;
1164 	struct elfhdr elf;
1165 	off_t offset = 0, dataoff;
1166 	unsigned long limit = current->rlim[RLIMIT_CORE].rlim_cur;
1167 	int numnote = 4;
1168 	struct memelfnote notes[4];
1169 	struct elf_prstatus prstatus;	/* NT_PRSTATUS */
1170 	elf_fpregset_t fpu;		/* NT_PRFPREG */
1171 	struct elf_prpsinfo psinfo;	/* NT_PRPSINFO */
1172 
1173 	/* first copy the parameters from user space */
1174 	memset(&psinfo, 0, sizeof(psinfo));
1175 	{
1176 		unsigned int i, len;
1177 
1178 		len = current->mm->arg_end - current->mm->arg_start;
1179 		if (len >= ELF_PRARGSZ)
1180 			len = ELF_PRARGSZ-1;
1181 		copy_from_user(&psinfo.pr_psargs,
1182 			      (const char *)current->mm->arg_start, len);
1183 		for(i = 0; i < len; i++)
1184 			if (psinfo.pr_psargs[i] == 0)
1185 				psinfo.pr_psargs[i] = ' ';
1186 		psinfo.pr_psargs[len] = 0;
1187 
1188 	}
1189 
1190 	memset(&prstatus, 0, sizeof(prstatus));
1191 	/*
1192 	 * This transfers the registers from regs into the standard
1193 	 * coredump arrangement, whatever that is.
1194 	 */
1195 #ifdef ELF_CORE_COPY_REGS
1196 	ELF_CORE_COPY_REGS(prstatus.pr_reg, regs)
1197 #else
1198 	if (sizeof(elf_gregset_t) != sizeof(struct pt_regs))
1199 	{
1200 		printk("sizeof(elf_gregset_t) (%ld) != sizeof(struct pt_regs) (%ld)\n",
1201 			(long)sizeof(elf_gregset_t), (long)sizeof(struct pt_regs));
1202 	}
1203 	else
1204 		*(struct pt_regs *)&prstatus.pr_reg = *regs;
1205 #endif
1206 
1207 	/* now stop all vm operations */
1208 	down_write(&current->mm->mmap_sem);
1209 	segs = current->mm->map_count;
1210 
1211 #ifdef DEBUG
1212 	printk("elf_core_dump: %d segs %lu limit\n", segs, limit);
1213 #endif
1214 
1215 	/* Set up header */
1216 	memcpy(elf.e_ident, ELFMAG, SELFMAG);
1217 	elf.e_ident[EI_CLASS] = ELF_CLASS;
1218 	elf.e_ident[EI_DATA] = ELF_DATA;
1219 	elf.e_ident[EI_VERSION] = EV_CURRENT;
1220 	memset(elf.e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1221 
1222 	elf.e_type = ET_CORE;
1223 	elf.e_machine = ELF_ARCH;
1224 	elf.e_version = EV_CURRENT;
1225 	elf.e_entry = 0;
1226 	elf.e_phoff = sizeof(elf);
1227 	elf.e_shoff = 0;
1228 	elf.e_flags = 0;
1229 	elf.e_ehsize = sizeof(elf);
1230 	elf.e_phentsize = sizeof(struct elf_phdr);
1231 	elf.e_phnum = segs+1;		/* Include notes */
1232 	elf.e_shentsize = 0;
1233 	elf.e_shnum = 0;
1234 	elf.e_shstrndx = 0;
1235 
1236 	fs = get_fs();
1237 	set_fs(KERNEL_DS);
1238 
1239 	has_dumped = 1;
1240 	current->flags |= PF_DUMPCORE;
1241 
1242 	DUMP_WRITE(&elf, sizeof(elf));
1243 	offset += sizeof(elf);				/* Elf header */
1244 	offset += (segs+1) * sizeof(struct elf_phdr);	/* Program headers */
1245 
1246 	/*
1247 	 * Set up the notes in similar form to SVR4 core dumps made
1248 	 * with info from their /proc.
1249 	 */
1250 
1251 	notes[0].name = "CORE";
1252 	notes[0].type = NT_PRSTATUS;
1253 	notes[0].datasz = sizeof(prstatus);
1254 	notes[0].data = &prstatus;
1255 	prstatus.pr_info.si_signo = prstatus.pr_cursig = signr;
1256 	prstatus.pr_sigpend = current->pending.signal.sig[0];
1257 	prstatus.pr_sighold = current->blocked.sig[0];
1258 	psinfo.pr_pid = prstatus.pr_pid = current->pid;
1259 	psinfo.pr_ppid = prstatus.pr_ppid = current->p_pptr->pid;
1260 	psinfo.pr_pgrp = prstatus.pr_pgrp = current->pgrp;
1261 	psinfo.pr_sid = prstatus.pr_sid = current->session;
1262 	prstatus.pr_utime.tv_sec = CT_TO_SECS(current->times.tms_utime);
1263 	prstatus.pr_utime.tv_usec = CT_TO_USECS(current->times.tms_utime);
1264 	prstatus.pr_stime.tv_sec = CT_TO_SECS(current->times.tms_stime);
1265 	prstatus.pr_stime.tv_usec = CT_TO_USECS(current->times.tms_stime);
1266 	prstatus.pr_cutime.tv_sec = CT_TO_SECS(current->times.tms_cutime);
1267 	prstatus.pr_cutime.tv_usec = CT_TO_USECS(current->times.tms_cutime);
1268 	prstatus.pr_cstime.tv_sec = CT_TO_SECS(current->times.tms_cstime);
1269 	prstatus.pr_cstime.tv_usec = CT_TO_USECS(current->times.tms_cstime);
1270 
1271 #ifdef DEBUG
1272 	dump_regs("Passed in regs", (elf_greg_t *)regs);
1273 	dump_regs("prstatus regs", (elf_greg_t *)&prstatus.pr_reg);
1274 #endif
1275 
1276 	notes[1].name = "CORE";
1277 	notes[1].type = NT_PRPSINFO;
1278 	notes[1].datasz = sizeof(psinfo);
1279 	notes[1].data = &psinfo;
1280 	i = current->state ? ffz(~current->state) + 1 : 0;
1281 	psinfo.pr_state = i;
1282 	psinfo.pr_sname = (i < 0 || i > 5) ? '.' : "RSDZTD"[i];
1283 	psinfo.pr_zomb = psinfo.pr_sname == 'Z';
1284 	psinfo.pr_nice = current->nice;
1285 	psinfo.pr_flag = current->flags;
1286 	psinfo.pr_uid = NEW_TO_OLD_UID(current->uid);
1287 	psinfo.pr_gid = NEW_TO_OLD_GID(current->gid);
1288 	strncpy(psinfo.pr_fname, current->comm, sizeof(psinfo.pr_fname));
1289 
1290 	notes[2].name = "CORE";
1291 	notes[2].type = NT_TASKSTRUCT;
1292 	notes[2].datasz = sizeof(*current);
1293 	notes[2].data = current;
1294 
1295 	/* Try to dump the FPU. */
1296 	prstatus.pr_fpvalid = dump_fpu (regs, &fpu);
1297 	if (!prstatus.pr_fpvalid)
1298 	{
1299 		numnote--;
1300 	}
1301 	else
1302 	{
1303 		notes[3].name = "CORE";
1304 		notes[3].type = NT_PRFPREG;
1305 		notes[3].datasz = sizeof(fpu);
1306 		notes[3].data = &fpu;
1307 	}
1308 
1309 	/* Write notes phdr entry */
1310 	{
1311 		struct elf_phdr phdr;
1312 		int sz = 0;
1313 
1314 		for(i = 0; i < numnote; i++)
1315 			sz += notesize(&notes[i]);
1316 
1317 		phdr.p_type = PT_NOTE;
1318 		phdr.p_offset = offset;
1319 		phdr.p_vaddr = 0;
1320 		phdr.p_paddr = 0;
1321 		phdr.p_filesz = sz;
1322 		phdr.p_memsz = 0;
1323 		phdr.p_flags = 0;
1324 		phdr.p_align = 0;
1325 
1326 		offset += phdr.p_filesz;
1327 		DUMP_WRITE(&phdr, sizeof(phdr));
1328 	}
1329 
1330 	/* Page-align dumped data */
1331 	dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1332 
1333 	/* Write program headers for segments dump */
1334 	for(vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1335 		struct elf_phdr phdr;
1336 		size_t sz;
1337 
1338 		sz = vma->vm_end - vma->vm_start;
1339 
1340 		phdr.p_type = PT_LOAD;
1341 		phdr.p_offset = offset;
1342 		phdr.p_vaddr = vma->vm_start;
1343 		phdr.p_paddr = 0;
1344 		phdr.p_filesz = maydump(vma) ? sz : 0;
1345 		phdr.p_memsz = sz;
1346 		offset += phdr.p_filesz;
1347 		phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1348 		if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
1349 		if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
1350 		phdr.p_align = ELF_EXEC_PAGESIZE;
1351 
1352 		DUMP_WRITE(&phdr, sizeof(phdr));
1353 	}
1354 
1355 	for(i = 0; i < numnote; i++)
1356 		if (!writenote(&notes[i], file))
1357 			goto end_coredump;
1358 
1359 	DUMP_SEEK(dataoff);
1360 
1361 	for(vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1362 		unsigned long addr;
1363 
1364 		if (!maydump(vma))
1365 			continue;
1366 
1367 #ifdef DEBUG
1368 		printk("elf_core_dump: writing %08lx-%08lx\n", vma->vm_start, vma->vm_end);
1369 #endif
1370 
1371 		for (addr = vma->vm_start;
1372 		     addr < vma->vm_end;
1373 		     addr += PAGE_SIZE) {
1374 			struct page* page;
1375 			struct vm_area_struct *vma;
1376 
1377 			if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1378 						&page, &vma) <= 0) {
1379 				DUMP_SEEK (file->f_pos + PAGE_SIZE);
1380 			} else {
1381 				if (page == ZERO_PAGE(addr)) {
1382 					DUMP_SEEK (file->f_pos + PAGE_SIZE);
1383 				} else {
1384 					void *kaddr;
1385 					flush_cache_page(vma, addr);
1386 					kaddr = kmap(page);
1387 					DUMP_WRITE(kaddr, PAGE_SIZE);
1388 					flush_page_to_ram(page);
1389 					kunmap(page);
1390 				}
1391 				put_page(page);
1392 			}
1393 		}
1394 	}
1395 
1396 	if ((off_t) file->f_pos != offset) {
1397 		/* Sanity check */
1398 		printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
1399 		       (off_t) file->f_pos, offset);
1400 	}
1401 
1402  end_coredump:
1403 	set_fs(fs);
1404 	up_write(&current->mm->mmap_sem);
1405 	return has_dumped;
1406 }
1407 #endif		/* USE_ELF_CORE_DUMP */
1408 
init_elf_binfmt(void)1409 static int __init init_elf_binfmt(void)
1410 {
1411 	return register_binfmt(&elf_format);
1412 }
1413 
exit_elf_binfmt(void)1414 static void __exit exit_elf_binfmt(void)
1415 {
1416 	/* Remove the COFF and ELF loaders. */
1417 	unregister_binfmt(&elf_format);
1418 }
1419 
1420 module_init(init_elf_binfmt)
1421 module_exit(exit_elf_binfmt)
1422 MODULE_LICENSE("GPL");
1423