1 /*
2 * linux/fs/exec.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7 /*
8 * #!-checking implemented by tytso.
9 */
10 /*
11 * Demand-loading implemented 01.12.91 - no need to read anything but
12 * the header into memory. The inode of the executable is put into
13 * "current->executable", and page faults do the actual loading. Clean.
14 *
15 * Once more I can proudly say that linux stood up to being changed: it
16 * was less than 2 hours work to get demand-loading completely implemented.
17 *
18 * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
19 * current->executable is only used by the procfs. This allows a dispatch
20 * table to check for several different types of binary formats. We keep
21 * trying until we recognize the file or we run out of supported binary
22 * formats.
23 */
24
25 #include <linux/config.h>
26 #include <linux/slab.h>
27 #include <linux/file.h>
28 #include <linux/mman.h>
29 #include <linux/a.out.h>
30 #include <linux/stat.h>
31 #include <linux/fcntl.h>
32 #include <linux/smp_lock.h>
33 #include <linux/init.h>
34 #include <linux/pagemap.h>
35 #include <linux/highmem.h>
36 #include <linux/spinlock.h>
37 #include <linux/personality.h>
38 #include <linux/swap.h>
39 #include <linux/utsname.h>
40 #define __NO_VERSION__
41 #include <linux/module.h>
42
43 #include <asm/uaccess.h>
44 #include <asm/pgalloc.h>
45 #include <asm/mmu_context.h>
46
47 #ifdef CONFIG_KMOD
48 #include <linux/kmod.h>
49 #endif
50
51 int core_uses_pid;
52 char core_pattern[65] = "core";
53 int core_setuid_ok = 0;
54 /* The maximal length of core_pattern is also specified in sysctl.c */
55
56 static struct linux_binfmt *formats;
57 static rwlock_t binfmt_lock = RW_LOCK_UNLOCKED;
58
register_binfmt(struct linux_binfmt * fmt)59 int register_binfmt(struct linux_binfmt * fmt)
60 {
61 struct linux_binfmt ** tmp = &formats;
62
63 if (!fmt)
64 return -EINVAL;
65 if (fmt->next)
66 return -EBUSY;
67 write_lock(&binfmt_lock);
68 while (*tmp) {
69 if (fmt == *tmp) {
70 write_unlock(&binfmt_lock);
71 return -EBUSY;
72 }
73 tmp = &(*tmp)->next;
74 }
75 fmt->next = formats;
76 formats = fmt;
77 write_unlock(&binfmt_lock);
78 return 0;
79 }
80
unregister_binfmt(struct linux_binfmt * fmt)81 int unregister_binfmt(struct linux_binfmt * fmt)
82 {
83 struct linux_binfmt ** tmp = &formats;
84
85 write_lock(&binfmt_lock);
86 while (*tmp) {
87 if (fmt == *tmp) {
88 *tmp = fmt->next;
89 write_unlock(&binfmt_lock);
90 return 0;
91 }
92 tmp = &(*tmp)->next;
93 }
94 write_unlock(&binfmt_lock);
95 return -EINVAL;
96 }
97
put_binfmt(struct linux_binfmt * fmt)98 static inline void put_binfmt(struct linux_binfmt * fmt)
99 {
100 if (fmt->module)
101 __MOD_DEC_USE_COUNT(fmt->module);
102 }
103
104 /*
105 * Note that a shared library must be both readable and executable due to
106 * security reasons.
107 *
108 * Also note that we take the address to load from from the file itself.
109 */
sys_uselib(const char * library)110 asmlinkage long sys_uselib(const char * library)
111 {
112 struct file * file;
113 struct nameidata nd;
114 int error;
115
116 error = user_path_walk(library, &nd);
117 if (error)
118 goto out;
119
120 error = -EINVAL;
121 if (!S_ISREG(nd.dentry->d_inode->i_mode))
122 goto exit;
123
124 error = permission(nd.dentry->d_inode, MAY_READ | MAY_EXEC);
125 if (error)
126 goto exit;
127
128 file = dentry_open(nd.dentry, nd.mnt, O_RDONLY);
129 error = PTR_ERR(file);
130 if (IS_ERR(file))
131 goto out;
132
133 error = -ENOEXEC;
134 if(file->f_op && file->f_op->read) {
135 struct linux_binfmt * fmt;
136
137 read_lock(&binfmt_lock);
138 for (fmt = formats ; fmt ; fmt = fmt->next) {
139 if (!fmt->load_shlib)
140 continue;
141 if (!try_inc_mod_count(fmt->module))
142 continue;
143 read_unlock(&binfmt_lock);
144 error = fmt->load_shlib(file);
145 read_lock(&binfmt_lock);
146 put_binfmt(fmt);
147 if (error != -ENOEXEC)
148 break;
149 }
150 read_unlock(&binfmt_lock);
151 }
152 fput(file);
153 out:
154 return error;
155 exit:
156 path_release(&nd);
157 goto out;
158 }
159
160 /*
161 * count() counts the number of arguments/envelopes
162 */
count(char ** argv,int max)163 static int count(char ** argv, int max)
164 {
165 int i = 0;
166
167 if (argv != NULL) {
168 for (;;) {
169 char * p;
170
171 if (get_user(p, argv))
172 return -EFAULT;
173 if (!p)
174 break;
175 argv++;
176 if(++i > max)
177 return -E2BIG;
178 }
179 }
180 return i;
181 }
182
183 /*
184 * 'copy_strings()' copies argument/envelope strings from user
185 * memory to free pages in kernel mem. These are in a format ready
186 * to be put directly into the top of new user memory.
187 */
copy_strings(int argc,char ** argv,struct linux_binprm * bprm)188 int copy_strings(int argc,char ** argv, struct linux_binprm *bprm)
189 {
190 struct page *kmapped_page = NULL;
191 char *kaddr = NULL;
192 int ret;
193
194 while (argc-- > 0) {
195 char *str;
196 int len;
197 unsigned long pos;
198
199 if (get_user(str, argv+argc) ||
200 !(len = strnlen_user(str, bprm->p))) {
201 ret = -EFAULT;
202 goto out;
203 }
204
205 if (bprm->p < len) {
206 ret = -E2BIG;
207 goto out;
208 }
209
210 bprm->p -= len;
211 /* XXX: add architecture specific overflow check here. */
212 pos = bprm->p;
213
214 while (len > 0) {
215 int i, new, err;
216 int offset, bytes_to_copy;
217 struct page *page;
218
219 offset = pos % PAGE_SIZE;
220 i = pos/PAGE_SIZE;
221 page = bprm->page[i];
222 new = 0;
223 if (!page) {
224 page = alloc_page(GFP_HIGHUSER);
225 bprm->page[i] = page;
226 if (!page) {
227 ret = -ENOMEM;
228 goto out;
229 }
230 new = 1;
231 }
232
233 if (page != kmapped_page) {
234 if (kmapped_page)
235 kunmap(kmapped_page);
236 kmapped_page = page;
237 kaddr = kmap(kmapped_page);
238 }
239 if (new && offset)
240 memset(kaddr, 0, offset);
241 bytes_to_copy = PAGE_SIZE - offset;
242 if (bytes_to_copy > len) {
243 bytes_to_copy = len;
244 if (new)
245 memset(kaddr+offset+len, 0,
246 PAGE_SIZE-offset-len);
247 }
248 err = copy_from_user(kaddr+offset, str, bytes_to_copy);
249 if (err) {
250 ret = -EFAULT;
251 goto out;
252 }
253
254 pos += bytes_to_copy;
255 str += bytes_to_copy;
256 len -= bytes_to_copy;
257 }
258 }
259 ret = 0;
260 out:
261 if (kmapped_page)
262 kunmap(kmapped_page);
263 return ret;
264 }
265
266 /*
267 * Like copy_strings, but get argv and its values from kernel memory.
268 */
copy_strings_kernel(int argc,char ** argv,struct linux_binprm * bprm)269 int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
270 {
271 int r;
272 mm_segment_t oldfs = get_fs();
273 set_fs(KERNEL_DS);
274 r = copy_strings(argc, argv, bprm);
275 set_fs(oldfs);
276 return r;
277 }
278
279 /*
280 * This routine is used to map in a page into an address space: needed by
281 * execve() for the initial stack and environment pages.
282 *
283 * tsk->mmap_sem is held for writing.
284 */
put_dirty_page(struct task_struct * tsk,struct page * page,unsigned long address)285 void put_dirty_page(struct task_struct * tsk, struct page *page, unsigned long address)
286 {
287 pgd_t * pgd;
288 pmd_t * pmd;
289 pte_t * pte;
290 struct vm_area_struct *vma;
291 pgprot_t prot = PAGE_COPY;
292
293 if (page_count(page) != 1)
294 printk(KERN_ERR "mem_map disagrees with %p at %08lx\n", page, address);
295 pgd = pgd_offset(tsk->mm, address);
296
297 spin_lock(&tsk->mm->page_table_lock);
298 pmd = pmd_alloc(tsk->mm, pgd, address);
299 if (!pmd)
300 goto out;
301 pte = pte_alloc(tsk->mm, pmd, address);
302 if (!pte)
303 goto out;
304 if (!pte_none(*pte))
305 goto out;
306 lru_cache_add(page);
307 flush_dcache_page(page);
308 flush_page_to_ram(page);
309 /* lookup is cheap because there is only a single entry in the list */
310 vma = find_vma(tsk->mm, address);
311 if (vma)
312 prot = vma->vm_page_prot;
313 set_pte(pte, pte_mkdirty(pte_mkwrite(mk_pte(page, prot))));
314 tsk->mm->rss++;
315 spin_unlock(&tsk->mm->page_table_lock);
316
317 /* no need for flush_tlb */
318 return;
319 out:
320 spin_unlock(&tsk->mm->page_table_lock);
321 __free_page(page);
322 force_sig(SIGKILL, tsk);
323 return;
324 }
325
setup_arg_pages(struct linux_binprm * bprm)326 int setup_arg_pages(struct linux_binprm *bprm)
327 {
328 unsigned long stack_base;
329 struct vm_area_struct *mpnt;
330 int i, ret;
331
332 stack_base = STACK_TOP - MAX_ARG_PAGES*PAGE_SIZE;
333
334 bprm->p += stack_base;
335 if (bprm->loader)
336 bprm->loader += stack_base;
337 bprm->exec += stack_base;
338
339 mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
340 if (!mpnt)
341 return -ENOMEM;
342
343 down_write(¤t->mm->mmap_sem);
344 {
345 mpnt->vm_mm = current->mm;
346 mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p;
347 mpnt->vm_end = STACK_TOP;
348 mpnt->vm_flags = VM_STACK_FLAGS;
349 mpnt->vm_page_prot = protection_map[VM_STACK_FLAGS & 0x7];
350 mpnt->vm_ops = NULL;
351 mpnt->vm_pgoff = 0;
352 mpnt->vm_file = NULL;
353 mpnt->vm_private_data = (void *) 0;
354 if ((ret = insert_vm_struct(current->mm, mpnt))) {
355 up_write(¤t->mm->mmap_sem);
356 kmem_cache_free(vm_area_cachep, mpnt);
357 return ret;
358 }
359 current->mm->total_vm = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
360 }
361
362 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
363 struct page *page = bprm->page[i];
364 if (page) {
365 bprm->page[i] = NULL;
366 put_dirty_page(current,page,stack_base);
367 }
368 stack_base += PAGE_SIZE;
369 }
370 up_write(¤t->mm->mmap_sem);
371
372 return 0;
373 }
374
open_exec(const char * name)375 struct file *open_exec(const char *name)
376 {
377 struct nameidata nd;
378 struct inode *inode;
379 struct file *file;
380 int err = 0;
381
382 err = path_lookup(name, LOOKUP_FOLLOW|LOOKUP_POSITIVE, &nd);
383 file = ERR_PTR(err);
384 if (!err) {
385 inode = nd.dentry->d_inode;
386 file = ERR_PTR(-EACCES);
387 if (!(nd.mnt->mnt_flags & MNT_NOEXEC) &&
388 S_ISREG(inode->i_mode)) {
389 int err = permission(inode, MAY_EXEC);
390 if (!err && !(inode->i_mode & 0111))
391 err = -EACCES;
392 file = ERR_PTR(err);
393 if (!err) {
394 file = dentry_open(nd.dentry, nd.mnt, O_RDONLY);
395 if (!IS_ERR(file)) {
396 err = deny_write_access(file);
397 if (err) {
398 fput(file);
399 file = ERR_PTR(err);
400 }
401 }
402 out:
403 return file;
404 }
405 }
406 path_release(&nd);
407 }
408 goto out;
409 }
410
kernel_read(struct file * file,unsigned long offset,char * addr,unsigned long count)411 int kernel_read(struct file *file, unsigned long offset,
412 char * addr, unsigned long count)
413 {
414 mm_segment_t old_fs;
415 loff_t pos = offset;
416 int result = -ENOSYS;
417
418 if (!file->f_op->read)
419 goto fail;
420 old_fs = get_fs();
421 set_fs(get_ds());
422 result = file->f_op->read(file, addr, count, &pos);
423 set_fs(old_fs);
424 fail:
425 return result;
426 }
427
exec_mmap(void)428 static int exec_mmap(void)
429 {
430 struct mm_struct * mm, * old_mm;
431
432 old_mm = current->mm;
433
434 if (old_mm && atomic_read(&old_mm->mm_users) == 1) {
435 mm_release();
436 down_write(&old_mm->mmap_sem);
437 exit_mmap(old_mm);
438 up_write(&old_mm->mmap_sem);
439 return 0;
440 }
441
442
443 mm = mm_alloc();
444 if (mm) {
445 struct mm_struct *active_mm;
446
447 if (init_new_context(current, mm)) {
448 mmdrop(mm);
449 return -ENOMEM;
450 }
451
452 /* Add it to the list of mm's */
453 spin_lock(&mmlist_lock);
454 list_add(&mm->mmlist, &init_mm.mmlist);
455 mmlist_nr++;
456 spin_unlock(&mmlist_lock);
457
458 task_lock(current);
459 active_mm = current->active_mm;
460 current->mm = mm;
461 current->active_mm = mm;
462 task_unlock(current);
463 activate_mm(active_mm, mm);
464 mm_release();
465 if (old_mm) {
466 if (active_mm != old_mm) BUG();
467 mmput(old_mm);
468 return 0;
469 }
470 mmdrop(active_mm);
471 return 0;
472 }
473 return -ENOMEM;
474 }
475
476 /*
477 * This function makes sure the current process has its own signal table,
478 * so that flush_signal_handlers can later reset the handlers without
479 * disturbing other processes. (Other processes might share the signal
480 * table via the CLONE_SIGNAL option to clone().)
481 */
482
make_private_signals(void)483 static inline int make_private_signals(void)
484 {
485 struct signal_struct * newsig;
486
487 if (atomic_read(¤t->sig->count) <= 1)
488 return 0;
489 newsig = kmem_cache_alloc(sigact_cachep, GFP_KERNEL);
490 if (newsig == NULL)
491 return -ENOMEM;
492 spin_lock_init(&newsig->siglock);
493 atomic_set(&newsig->count, 1);
494 memcpy(newsig->action, current->sig->action, sizeof(newsig->action));
495 spin_lock_irq(¤t->sigmask_lock);
496 current->sig = newsig;
497 spin_unlock_irq(¤t->sigmask_lock);
498 return 0;
499 }
500
501 /*
502 * If make_private_signals() made a copy of the signal table, decrement the
503 * refcount of the original table, and free it if necessary.
504 * We don't do that in make_private_signals() so that we can back off
505 * in flush_old_exec() if an error occurs after calling make_private_signals().
506 */
507
release_old_signals(struct signal_struct * oldsig)508 static inline void release_old_signals(struct signal_struct * oldsig)
509 {
510 if (current->sig == oldsig)
511 return;
512 if (atomic_dec_and_test(&oldsig->count))
513 kmem_cache_free(sigact_cachep, oldsig);
514 }
515
516 /*
517 * These functions flushes out all traces of the currently running executable
518 * so that a new one can be started
519 */
520
flush_old_files(struct files_struct * files)521 static inline void flush_old_files(struct files_struct * files)
522 {
523 long j = -1;
524
525 write_lock(&files->file_lock);
526 for (;;) {
527 unsigned long set, i;
528
529 j++;
530 i = j * __NFDBITS;
531 if (i >= files->max_fds || i >= files->max_fdset)
532 break;
533 set = files->close_on_exec->fds_bits[j];
534 if (!set)
535 continue;
536 files->close_on_exec->fds_bits[j] = 0;
537 write_unlock(&files->file_lock);
538 for ( ; set ; i++,set >>= 1) {
539 if (set & 1) {
540 sys_close(i);
541 }
542 }
543 write_lock(&files->file_lock);
544
545 }
546 write_unlock(&files->file_lock);
547 }
548
549 /*
550 * An execve() will automatically "de-thread" the process.
551 * Note: we don't have to hold the tasklist_lock to test
552 * whether we migth need to do this. If we're not part of
553 * a thread group, there is no way we can become one
554 * dynamically. And if we are, we only need to protect the
555 * unlink - even if we race with the last other thread exit,
556 * at worst the list_del_init() might end up being a no-op.
557 */
de_thread(struct task_struct * tsk)558 static inline void de_thread(struct task_struct *tsk)
559 {
560 if (!list_empty(&tsk->thread_group)) {
561 write_lock_irq(&tasklist_lock);
562 list_del_init(&tsk->thread_group);
563 write_unlock_irq(&tasklist_lock);
564 }
565
566 /* Minor oddity: this might stay the same. */
567 tsk->tgid = tsk->pid;
568 }
569
get_task_comm(char * buf,struct task_struct * tsk)570 void get_task_comm(char *buf, struct task_struct *tsk)
571 {
572 /* buf must be at least sizeof(tsk->comm) in size */
573 task_lock(tsk);
574 memcpy(buf, tsk->comm, sizeof(tsk->comm));
575 task_unlock(tsk);
576 }
577
set_task_comm(struct task_struct * tsk,char * buf)578 void set_task_comm(struct task_struct *tsk, char *buf)
579 {
580 task_lock(tsk);
581 strncpy(tsk->comm, buf, sizeof(tsk->comm));
582 tsk->comm[sizeof(tsk->comm)-1]='\0';
583 task_unlock(tsk);
584 }
585
flush_old_exec(struct linux_binprm * bprm)586 int flush_old_exec(struct linux_binprm * bprm)
587 {
588 char * name;
589 int i, ch, retval;
590 unsigned new_mm_dumpable;
591 struct signal_struct * oldsig;
592 struct files_struct * files;
593 char tcomm[sizeof(current->comm)];
594
595 /*
596 * Make sure we have a private signal table
597 */
598 oldsig = current->sig;
599 retval = make_private_signals();
600 if (retval) goto flush_failed;
601
602 /*
603 * Make sure we have private file handles. Ask the
604 * fork helper to do the work for us and the exit
605 * helper to do the cleanup of the old one.
606 */
607
608 files = current->files; /* refcounted so safe to hold */
609 retval = unshare_files();
610 if(retval)
611 goto flush_failed;
612
613 /*
614 * Release all of the old mmap stuff
615 */
616 retval = exec_mmap();
617 if (retval) goto mmap_failed;
618
619 /* This is the point of no return */
620 steal_locks(files);
621 put_files_struct(files);
622 release_old_signals(oldsig);
623
624 current->sas_ss_sp = current->sas_ss_size = 0;
625
626 new_mm_dumpable = 0; /* no change */
627 if (current->euid == current->uid && current->egid == current->gid) {
628 new_mm_dumpable = 1;
629 current->task_dumpable = 1;
630 }
631
632 if (mmap_min_addr)
633 current->personality &= ~(unsigned long)MMAP_PAGE_ZERO;
634
635 name = bprm->filename;
636 for (i=0; (ch = *(name++)) != '\0';) {
637 if (ch == '/')
638 i = 0;
639 else
640 if (i < (sizeof(tcomm) - 1))
641 tcomm[i++] = ch;
642 }
643 tcomm[i] = '\0';
644 set_task_comm(current, tcomm);
645
646 flush_thread();
647
648 de_thread(current);
649
650 if (bprm->e_uid != current->euid || bprm->e_gid != current->egid) {
651 current->mm->dumpable = 0;
652 current->pdeath_signal = 0;
653 } else if (permission(bprm->file->f_dentry->d_inode, MAY_READ)) {
654 current->mm->dumpable = 0;
655 } else if (new_mm_dumpable)
656 current->mm->dumpable = 1;
657
658 /* An exec changes our domain. We are no longer part of the thread
659 group */
660
661 current->self_exec_id++;
662
663 flush_signal_handlers(current);
664 flush_old_files(current->files);
665
666 return 0;
667
668 mmap_failed:
669 put_files_struct(current->files);
670 current->files = files;
671 flush_failed:
672 spin_lock_irq(¤t->sigmask_lock);
673 if (current->sig != oldsig) {
674 kmem_cache_free(sigact_cachep, current->sig);
675 current->sig = oldsig;
676 }
677 spin_unlock_irq(¤t->sigmask_lock);
678 return retval;
679 }
680
681 /*
682 * We mustn't allow tracing of suid binaries, unless
683 * the tracer has the capability to trace anything..
684 */
must_not_trace_exec(struct task_struct * p)685 static inline int must_not_trace_exec(struct task_struct * p)
686 {
687 return (p->ptrace & PT_PTRACED) && !(p->ptrace & PT_PTRACE_CAP);
688 }
689
690 /*
691 * Fill the binprm structure from the inode.
692 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
693 */
prepare_binprm(struct linux_binprm * bprm)694 int prepare_binprm(struct linux_binprm *bprm)
695 {
696 int mode;
697 struct inode * inode = bprm->file->f_dentry->d_inode;
698
699 mode = inode->i_mode;
700 /*
701 * Check execute perms again - if the caller has CAP_DAC_OVERRIDE,
702 * vfs_permission lets a non-executable through
703 */
704 if (!(mode & 0111)) /* with at least _one_ execute bit set */
705 return -EACCES;
706 if (bprm->file->f_op == NULL)
707 return -EACCES;
708
709 bprm->e_uid = current->euid;
710 bprm->e_gid = current->egid;
711
712 if(!(bprm->file->f_vfsmnt->mnt_flags & MNT_NOSUID)) {
713 /* Set-uid? */
714 if (mode & S_ISUID)
715 bprm->e_uid = inode->i_uid;
716
717 /* Set-gid? */
718 /*
719 * If setgid is set but no group execute bit then this
720 * is a candidate for mandatory locking, not a setgid
721 * executable.
722 */
723 if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))
724 bprm->e_gid = inode->i_gid;
725 }
726
727 /* We don't have VFS support for capabilities yet */
728 cap_clear(bprm->cap_inheritable);
729 cap_clear(bprm->cap_permitted);
730 cap_clear(bprm->cap_effective);
731
732 /* To support inheritance of root-permissions and suid-root
733 * executables under compatibility mode, we raise all three
734 * capability sets for the file.
735 *
736 * If only the real uid is 0, we only raise the inheritable
737 * and permitted sets of the executable file.
738 */
739
740 if (!issecure(SECURE_NOROOT)) {
741 if (bprm->e_uid == 0 || current->uid == 0) {
742 cap_set_full(bprm->cap_inheritable);
743 cap_set_full(bprm->cap_permitted);
744 }
745 if (bprm->e_uid == 0)
746 cap_set_full(bprm->cap_effective);
747 }
748
749 memset(bprm->buf,0,BINPRM_BUF_SIZE);
750 return kernel_read(bprm->file,0,bprm->buf,BINPRM_BUF_SIZE);
751 }
752
753 /*
754 * This function is used to produce the new IDs and capabilities
755 * from the old ones and the file's capabilities.
756 *
757 * The formula used for evolving capabilities is:
758 *
759 * pI' = pI
760 * (***) pP' = (fP & X) | (fI & pI)
761 * pE' = pP' & fE [NB. fE is 0 or ~0]
762 *
763 * I=Inheritable, P=Permitted, E=Effective // p=process, f=file
764 * ' indicates post-exec(), and X is the global 'cap_bset'.
765 *
766 */
767
compute_creds(struct linux_binprm * bprm)768 void compute_creds(struct linux_binprm *bprm)
769 {
770 kernel_cap_t new_permitted, working;
771 int do_unlock = 0;
772
773 new_permitted = cap_intersect(bprm->cap_permitted, cap_bset);
774 working = cap_intersect(bprm->cap_inheritable,
775 current->cap_inheritable);
776 new_permitted = cap_combine(new_permitted, working);
777
778 if (bprm->e_uid != current->uid || bprm->e_gid != current->gid ||
779 !cap_issubset(new_permitted, current->cap_permitted)) {
780 current->mm->dumpable = 0;
781 current->pdeath_signal = 0;
782
783 lock_kernel();
784 if (must_not_trace_exec(current)
785 || atomic_read(¤t->fs->count) > 1
786 || atomic_read(¤t->files->count) > 1
787 || atomic_read(¤t->sig->count) > 1) {
788 if(!capable(CAP_SETUID)) {
789 bprm->e_uid = current->uid;
790 bprm->e_gid = current->gid;
791 }
792 if(!capable(CAP_SETPCAP)) {
793 new_permitted = cap_intersect(new_permitted,
794 current->cap_permitted);
795 }
796 }
797 do_unlock = 1;
798 }
799
800
801 /* For init, we want to retain the capabilities set
802 * in the init_task struct. Thus we skip the usual
803 * capability rules */
804 if (current->pid != 1) {
805 current->cap_permitted = new_permitted;
806 current->cap_effective =
807 cap_intersect(new_permitted, bprm->cap_effective);
808 }
809
810 /* AUD: Audit candidate if current->cap_effective is set */
811
812 current->suid = current->euid = current->fsuid = bprm->e_uid;
813 current->sgid = current->egid = current->fsgid = bprm->e_gid;
814
815 if(do_unlock)
816 unlock_kernel();
817 current->keep_capabilities = 0;
818 }
819
820
remove_arg_zero(struct linux_binprm * bprm)821 void remove_arg_zero(struct linux_binprm *bprm)
822 {
823 if (bprm->argc) {
824 unsigned long offset;
825 char * kaddr;
826 struct page *page;
827
828 offset = bprm->p % PAGE_SIZE;
829 goto inside;
830
831 while (bprm->p++, *(kaddr+offset++)) {
832 if (offset != PAGE_SIZE)
833 continue;
834 offset = 0;
835 kunmap(page);
836 inside:
837 page = bprm->page[bprm->p/PAGE_SIZE];
838 kaddr = kmap(page);
839 }
840 kunmap(page);
841 bprm->argc--;
842 }
843 }
844
845 /*
846 * cycle the list of binary formats handler, until one recognizes the image
847 */
search_binary_handler(struct linux_binprm * bprm,struct pt_regs * regs)848 int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
849 {
850 int try,retval=0;
851 struct linux_binfmt *fmt;
852 #ifdef __alpha__
853 /* handle /sbin/loader.. */
854 {
855 struct exec * eh = (struct exec *) bprm->buf;
856
857 if (!bprm->loader && eh->fh.f_magic == 0x183 &&
858 (eh->fh.f_flags & 0x3000) == 0x3000)
859 {
860 struct file * file;
861 unsigned long loader;
862
863 allow_write_access(bprm->file);
864 fput(bprm->file);
865 bprm->file = NULL;
866
867 loader = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
868
869 file = open_exec("/sbin/loader");
870 retval = PTR_ERR(file);
871 if (IS_ERR(file))
872 return retval;
873
874 /* Remember if the application is TASO. */
875 bprm->sh_bang = eh->ah.entry < 0x100000000;
876
877 bprm->file = file;
878 bprm->loader = loader;
879 retval = prepare_binprm(bprm);
880 if (retval<0)
881 return retval;
882 /* should call search_binary_handler recursively here,
883 but it does not matter */
884 }
885 }
886 #endif
887 /* kernel module loader fixup */
888 /* so we don't try to load run modprobe in kernel space. */
889 set_fs(USER_DS);
890 for (try=0; try<2; try++) {
891 read_lock(&binfmt_lock);
892 for (fmt = formats ; fmt ; fmt = fmt->next) {
893 int (*fn)(struct linux_binprm *, struct pt_regs *) = fmt->load_binary;
894 if (!fn)
895 continue;
896 if (!try_inc_mod_count(fmt->module))
897 continue;
898 read_unlock(&binfmt_lock);
899 retval = fn(bprm, regs);
900 if (retval >= 0) {
901 put_binfmt(fmt);
902 allow_write_access(bprm->file);
903 if (bprm->file)
904 fput(bprm->file);
905 bprm->file = NULL;
906 current->did_exec = 1;
907 return retval;
908 }
909 read_lock(&binfmt_lock);
910 put_binfmt(fmt);
911 if (retval != -ENOEXEC)
912 break;
913 if (!bprm->file) {
914 read_unlock(&binfmt_lock);
915 return retval;
916 }
917 }
918 read_unlock(&binfmt_lock);
919 if (retval != -ENOEXEC) {
920 break;
921 #ifdef CONFIG_KMOD
922 }else{
923 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
924 char modname[20];
925 if (printable(bprm->buf[0]) &&
926 printable(bprm->buf[1]) &&
927 printable(bprm->buf[2]) &&
928 printable(bprm->buf[3]))
929 break; /* -ENOEXEC */
930 sprintf(modname, "binfmt-%04x", *(unsigned short *)(&bprm->buf[2]));
931 request_module(modname);
932 #endif
933 }
934 }
935 return retval;
936 }
937
938
939 /*
940 * sys_execve() executes a new program.
941 */
do_execve(char * filename,char ** argv,char ** envp,struct pt_regs * regs)942 int do_execve(char * filename, char ** argv, char ** envp, struct pt_regs * regs)
943 {
944 struct linux_binprm bprm;
945 struct file *file;
946 int retval;
947 int i;
948
949 file = open_exec(filename);
950
951 retval = PTR_ERR(file);
952 if (IS_ERR(file))
953 return retval;
954
955 bprm.p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
956 memset(bprm.page, 0, MAX_ARG_PAGES*sizeof(bprm.page[0]));
957
958 bprm.file = file;
959 bprm.filename = filename;
960 bprm.sh_bang = 0;
961 bprm.loader = 0;
962 bprm.exec = 0;
963 if ((bprm.argc = count(argv, bprm.p / sizeof(void *))) < 0) {
964 allow_write_access(file);
965 fput(file);
966 return bprm.argc;
967 }
968
969 if ((bprm.envc = count(envp, bprm.p / sizeof(void *))) < 0) {
970 allow_write_access(file);
971 fput(file);
972 return bprm.envc;
973 }
974
975 retval = prepare_binprm(&bprm);
976 if (retval < 0)
977 goto out;
978
979 retval = copy_strings_kernel(1, &bprm.filename, &bprm);
980 if (retval < 0)
981 goto out;
982
983 bprm.exec = bprm.p;
984 retval = copy_strings(bprm.envc, envp, &bprm);
985 if (retval < 0)
986 goto out;
987
988 retval = copy_strings(bprm.argc, argv, &bprm);
989 if (retval < 0)
990 goto out;
991
992 retval = search_binary_handler(&bprm,regs);
993 if (retval >= 0)
994 /* execve success */
995 return retval;
996
997 out:
998 /* Something went wrong, return the inode and free the argument pages*/
999 allow_write_access(bprm.file);
1000 if (bprm.file)
1001 fput(bprm.file);
1002
1003 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
1004 struct page * page = bprm.page[i];
1005 if (page)
1006 __free_page(page);
1007 }
1008
1009 return retval;
1010 }
1011
set_binfmt(struct linux_binfmt * new)1012 void set_binfmt(struct linux_binfmt *new)
1013 {
1014 struct linux_binfmt *old = current->binfmt;
1015 if (new && new->module)
1016 __MOD_INC_USE_COUNT(new->module);
1017 current->binfmt = new;
1018 if (old && old->module)
1019 __MOD_DEC_USE_COUNT(old->module);
1020 }
1021
1022 #define CORENAME_MAX_SIZE 64
1023
1024 /* format_corename will inspect the pattern parameter, and output a
1025 * name into corename, which must have space for at least
1026 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
1027 */
format_corename(char * corename,const char * pattern,long signr)1028 void format_corename(char *corename, const char *pattern, long signr)
1029 {
1030 const char *pat_ptr = pattern;
1031 char *out_ptr = corename;
1032 char *const out_end = corename + CORENAME_MAX_SIZE;
1033 int rc;
1034 int pid_in_pattern = 0;
1035
1036 /* Repeat as long as we have more pattern to process and more output
1037 space */
1038 while (*pat_ptr) {
1039 if (*pat_ptr != '%') {
1040 if (out_ptr == out_end)
1041 goto out;
1042 *out_ptr++ = *pat_ptr++;
1043 } else {
1044 switch (*++pat_ptr) {
1045 case 0:
1046 goto out;
1047 /* Double percent, output one percent */
1048 case '%':
1049 if (out_ptr == out_end)
1050 goto out;
1051 *out_ptr++ = '%';
1052 break;
1053 /* pid */
1054 case 'p':
1055 pid_in_pattern = 1;
1056 rc = snprintf(out_ptr, out_end - out_ptr,
1057 "%d", current->pid);
1058 if (rc > out_end - out_ptr)
1059 goto out;
1060 out_ptr += rc;
1061 break;
1062 /* uid */
1063 case 'u':
1064 rc = snprintf(out_ptr, out_end - out_ptr,
1065 "%d", current->uid);
1066 if (rc > out_end - out_ptr)
1067 goto out;
1068 out_ptr += rc;
1069 break;
1070 /* gid */
1071 case 'g':
1072 rc = snprintf(out_ptr, out_end - out_ptr,
1073 "%d", current->gid);
1074 if (rc > out_end - out_ptr)
1075 goto out;
1076 out_ptr += rc;
1077 break;
1078 /* signal that caused the coredump */
1079 case 's':
1080 rc = snprintf(out_ptr, out_end - out_ptr,
1081 "%ld", signr);
1082 if (rc > out_end - out_ptr)
1083 goto out;
1084 out_ptr += rc;
1085 break;
1086 /* UNIX time of coredump */
1087 case 't': {
1088 struct timeval tv;
1089 do_gettimeofday(&tv);
1090 rc = snprintf(out_ptr, out_end - out_ptr,
1091 "%ld", tv.tv_sec);
1092 if (rc > out_end - out_ptr)
1093 goto out;
1094 out_ptr += rc;
1095 break;
1096 }
1097 /* hostname */
1098 case 'h':
1099 down_read(&uts_sem);
1100 rc = snprintf(out_ptr, out_end - out_ptr,
1101 "%s", system_utsname.nodename);
1102 up_read(&uts_sem);
1103 if (rc > out_end - out_ptr)
1104 goto out;
1105 out_ptr += rc;
1106 break;
1107 /* executable */
1108 case 'e':
1109 rc = snprintf(out_ptr, out_end - out_ptr,
1110 "%s", current->comm);
1111 if (rc > out_end - out_ptr)
1112 goto out;
1113 out_ptr += rc;
1114 break;
1115 default:
1116 break;
1117 }
1118 ++pat_ptr;
1119 }
1120 }
1121 /* Backward compatibility with core_uses_pid:
1122 *
1123 * If core_pattern does not include a %p (as is the default)
1124 * and core_uses_pid is set, then .%pid will be appended to
1125 * the filename */
1126 if (!pid_in_pattern
1127 && (core_uses_pid || atomic_read(¤t->mm->mm_users) != 1)) {
1128 rc = snprintf(out_ptr, out_end - out_ptr,
1129 ".%d", current->pid);
1130 if (rc > out_end - out_ptr)
1131 goto out;
1132 out_ptr += rc;
1133 }
1134 out:
1135 *out_ptr = 0;
1136 }
1137
do_coredump(long signr,struct pt_regs * regs)1138 int do_coredump(long signr, struct pt_regs * regs)
1139 {
1140 struct linux_binfmt * binfmt;
1141 char corename[CORENAME_MAX_SIZE + 1];
1142 struct file * file;
1143 struct inode * inode;
1144 int retval = 0;
1145 int fsuid = current->fsuid;
1146
1147 lock_kernel();
1148 binfmt = current->binfmt;
1149 if (!binfmt || !binfmt->core_dump)
1150 goto fail;
1151 if (!is_dumpable(current))
1152 {
1153 if(!core_setuid_ok || !current->task_dumpable)
1154 goto fail;
1155 current->fsuid = 0;
1156 }
1157 current->mm->dumpable = 0;
1158 if (current->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump)
1159 goto fail;
1160
1161 format_corename(corename, core_pattern, signr);
1162 file = filp_open(corename, O_CREAT | 2 | O_NOFOLLOW, 0600);
1163 if (IS_ERR(file))
1164 goto fail;
1165 inode = file->f_dentry->d_inode;
1166 if (inode->i_nlink > 1)
1167 goto close_fail; /* multiple links - don't dump */
1168 if (d_unhashed(file->f_dentry))
1169 goto close_fail;
1170
1171 if (!S_ISREG(inode->i_mode))
1172 goto close_fail;
1173 /*
1174 * Dont allow local users get cute and trick others to coredump
1175 * into their pre-created files:
1176 */
1177 if (inode->i_uid != current->fsuid)
1178 goto close_fail;
1179 if (!file->f_op)
1180 goto close_fail;
1181 if (!file->f_op->write)
1182 goto close_fail;
1183 if (do_truncate(file->f_dentry, 0) != 0)
1184 goto close_fail;
1185
1186 retval = binfmt->core_dump(signr, regs, file);
1187
1188 close_fail:
1189 filp_close(file, NULL);
1190 fail:
1191 if (fsuid != current->fsuid)
1192 current->fsuid = fsuid;
1193 unlock_kernel();
1194 return retval;
1195 }
1196