1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  */
8 
9 #include <linux/config.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/unistd.h>
13 #include <linux/smp_lock.h>
14 #include <linux/init.h>
15 #include <linux/sched.h>
16 
17 #include <asm/uaccess.h>
18 
19 /*
20  * SLAB caches for signal bits.
21  */
22 
23 #define DEBUG_SIG 0
24 
25 #if DEBUG_SIG
26 #define SIG_SLAB_DEBUG	(SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */)
27 #else
28 #define SIG_SLAB_DEBUG	0
29 #endif
30 
31 static kmem_cache_t *sigqueue_cachep;
32 
33 atomic_t nr_queued_signals;
34 int max_queued_signals = 1024;
35 
signals_init(void)36 void __init signals_init(void)
37 {
38 	sigqueue_cachep =
39 		kmem_cache_create("sigqueue",
40 				  sizeof(struct sigqueue),
41 				  __alignof__(struct sigqueue),
42 				  SIG_SLAB_DEBUG, NULL, NULL);
43 	if (!sigqueue_cachep)
44 		panic("signals_init(): cannot create sigqueue SLAB cache");
45 }
46 
47 
48 /* Given the mask, find the first available signal that should be serviced. */
49 
50 static int
next_signal(struct task_struct * tsk,sigset_t * mask)51 next_signal(struct task_struct *tsk, sigset_t *mask)
52 {
53 	unsigned long i, *s, *m, x;
54 	int sig = 0;
55 
56 	s = tsk->pending.signal.sig;
57 	m = mask->sig;
58 	switch (_NSIG_WORDS) {
59 	default:
60 		for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
61 			if ((x = *s &~ *m) != 0) {
62 				sig = ffz(~x) + i*_NSIG_BPW + 1;
63 				break;
64 			}
65 		break;
66 
67 	case 2: if ((x = s[0] &~ m[0]) != 0)
68 			sig = 1;
69 		else if ((x = s[1] &~ m[1]) != 0)
70 			sig = _NSIG_BPW + 1;
71 		else
72 			break;
73 		sig += ffz(~x);
74 		break;
75 
76 	case 1: if ((x = *s &~ *m) != 0)
77 			sig = ffz(~x) + 1;
78 		break;
79 	}
80 
81 	return sig;
82 }
83 
flush_sigqueue(struct sigpending * queue)84 static void flush_sigqueue(struct sigpending *queue)
85 {
86 	struct sigqueue *q, *n;
87 
88 	sigemptyset(&queue->signal);
89 	q = queue->head;
90 	queue->head = NULL;
91 	queue->tail = &queue->head;
92 
93 	while (q) {
94 		n = q->next;
95 		kmem_cache_free(sigqueue_cachep, q);
96 		atomic_dec(&nr_queued_signals);
97 		q = n;
98 	}
99 }
100 
101 /*
102  * Flush all pending signals for a task.
103  */
104 
105 void
flush_signals(struct task_struct * t)106 flush_signals(struct task_struct *t)
107 {
108 	t->sigpending = 0;
109 	flush_sigqueue(&t->pending);
110 }
111 
exit_sighand(struct task_struct * tsk)112 void exit_sighand(struct task_struct *tsk)
113 {
114 	struct signal_struct * sig = tsk->sig;
115 
116 	spin_lock_irq(&tsk->sigmask_lock);
117 	if (sig) {
118 		tsk->sig = NULL;
119 		if (atomic_dec_and_test(&sig->count))
120 			kmem_cache_free(sigact_cachep, sig);
121 	}
122 	tsk->sigpending = 0;
123 	flush_sigqueue(&tsk->pending);
124 	spin_unlock_irq(&tsk->sigmask_lock);
125 }
126 
127 /*
128  * Flush all handlers for a task.
129  */
130 
131 void
flush_signal_handlers(struct task_struct * t)132 flush_signal_handlers(struct task_struct *t)
133 {
134 	int i;
135 	struct k_sigaction *ka = &t->sig->action[0];
136 	for (i = _NSIG ; i != 0 ; i--) {
137 		if (ka->sa.sa_handler != SIG_IGN)
138 			ka->sa.sa_handler = SIG_DFL;
139 		ka->sa.sa_flags = 0;
140 		sigemptyset(&ka->sa.sa_mask);
141 		ka++;
142 	}
143 }
144 
145 /*
146  * sig_exit - cause the current task to exit due to a signal.
147  */
148 
149 void
sig_exit(int sig,int exit_code,struct siginfo * info)150 sig_exit(int sig, int exit_code, struct siginfo *info)
151 {
152 	struct task_struct *t;
153 
154 	sigaddset(&current->pending.signal, sig);
155 	recalc_sigpending(current);
156 	current->flags |= PF_SIGNALED;
157 
158 	/* Propagate the signal to all the tasks in
159 	 *  our thread group
160 	 */
161 	if (info && (unsigned long)info != 1
162 	    && info->si_code != SI_TKILL) {
163 		read_lock(&tasklist_lock);
164 		for_each_thread(t) {
165 			force_sig_info(sig, info, t);
166 		}
167 		read_unlock(&tasklist_lock);
168 	}
169 
170 	do_exit(exit_code);
171 	/* NOTREACHED */
172 }
173 
174 /* Notify the system that a driver wants to block all signals for this
175  * process, and wants to be notified if any signals at all were to be
176  * sent/acted upon.  If the notifier routine returns non-zero, then the
177  * signal will be acted upon after all.  If the notifier routine returns 0,
178  * then then signal will be blocked.  Only one block per process is
179  * allowed.  priv is a pointer to private data that the notifier routine
180  * can use to determine if the signal should be blocked or not.  */
181 
182 void
block_all_signals(int (* notifier)(void * priv),void * priv,sigset_t * mask)183 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
184 {
185 	unsigned long flags;
186 
187 	spin_lock_irqsave(&current->sigmask_lock, flags);
188 	current->notifier_mask = mask;
189 	current->notifier_data = priv;
190 	current->notifier = notifier;
191 	spin_unlock_irqrestore(&current->sigmask_lock, flags);
192 }
193 
194 /* Notify the system that blocking has ended. */
195 
196 void
unblock_all_signals(void)197 unblock_all_signals(void)
198 {
199 	unsigned long flags;
200 
201 	spin_lock_irqsave(&current->sigmask_lock, flags);
202 	current->notifier = NULL;
203 	current->notifier_data = NULL;
204 	recalc_sigpending(current);
205 	spin_unlock_irqrestore(&current->sigmask_lock, flags);
206 }
207 
collect_signal(int sig,struct sigpending * list,siginfo_t * info)208 static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
209 {
210 	if (sigismember(&list->signal, sig)) {
211 		/* Collect the siginfo appropriate to this signal.  */
212 		struct sigqueue *q, **pp;
213 		pp = &list->head;
214 		while ((q = *pp) != NULL) {
215 			if (q->info.si_signo == sig)
216 				goto found_it;
217 			pp = &q->next;
218 		}
219 
220 		/* Ok, it wasn't in the queue.  We must have
221 		   been out of queue space.  So zero out the
222 		   info.  */
223 		sigdelset(&list->signal, sig);
224 		info->si_signo = sig;
225 		info->si_errno = 0;
226 		info->si_code = 0;
227 		info->si_pid = 0;
228 		info->si_uid = 0;
229 		return 1;
230 
231 found_it:
232 		if ((*pp = q->next) == NULL)
233 			list->tail = pp;
234 
235 		/* Copy the sigqueue information and free the queue entry */
236 		copy_siginfo(info, &q->info);
237 		kmem_cache_free(sigqueue_cachep,q);
238 		atomic_dec(&nr_queued_signals);
239 
240 		/* Non-RT signals can exist multiple times.. */
241 		if (sig >= SIGRTMIN) {
242 			while ((q = *pp) != NULL) {
243 				if (q->info.si_signo == sig)
244 					goto found_another;
245 				pp = &q->next;
246 			}
247 		}
248 
249 		sigdelset(&list->signal, sig);
250 found_another:
251 		return 1;
252 	}
253 	return 0;
254 }
255 
256 /*
257  * Dequeue a signal and return the element to the caller, which is
258  * expected to free it.
259  *
260  * All callers must be holding current->sigmask_lock.
261  */
262 
263 int
dequeue_signal(sigset_t * mask,siginfo_t * info)264 dequeue_signal(sigset_t *mask, siginfo_t *info)
265 {
266 	int sig = 0;
267 
268 #if DEBUG_SIG
269 printk("SIG dequeue (%s:%d): %d ", current->comm, current->pid,
270 	signal_pending(current));
271 #endif
272 
273 	sig = next_signal(current, mask);
274 	if (sig) {
275 		if (current->notifier) {
276 			if (sigismember(current->notifier_mask, sig)) {
277 				if (!(current->notifier)(current->notifier_data)) {
278 					current->sigpending = 0;
279 					return 0;
280 				}
281 			}
282 		}
283 
284 		if (!collect_signal(sig, &current->pending, info))
285 			sig = 0;
286 
287 		/* XXX: Once POSIX.1b timers are in, if si_code == SI_TIMER,
288 		   we need to xchg out the timer overrun values.  */
289 	}
290 	recalc_sigpending(current);
291 
292 #if DEBUG_SIG
293 printk(" %d -> %d\n", signal_pending(current), sig);
294 #endif
295 
296 	return sig;
297 }
298 
rm_from_queue(int sig,struct sigpending * s)299 static int rm_from_queue(int sig, struct sigpending *s)
300 {
301 	struct sigqueue *q, **pp;
302 
303 	if (!sigismember(&s->signal, sig))
304 		return 0;
305 
306 	sigdelset(&s->signal, sig);
307 
308 	pp = &s->head;
309 
310 	while ((q = *pp) != NULL) {
311 		if (q->info.si_signo == sig) {
312 			if ((*pp = q->next) == NULL)
313 				s->tail = pp;
314 			kmem_cache_free(sigqueue_cachep,q);
315 			atomic_dec(&nr_queued_signals);
316 			continue;
317 		}
318 		pp = &q->next;
319 	}
320 	return 1;
321 }
322 
323 /*
324  * Remove signal sig from t->pending.
325  * Returns 1 if sig was found.
326  *
327  * All callers must be holding t->sigmask_lock.
328  */
rm_sig_from_queue(int sig,struct task_struct * t)329 static int rm_sig_from_queue(int sig, struct task_struct *t)
330 {
331 	return rm_from_queue(sig, &t->pending);
332 }
333 
334 /*
335  * Bad permissions for sending the signal
336  */
bad_signal(int sig,struct siginfo * info,struct task_struct * t)337 int bad_signal(int sig, struct siginfo *info, struct task_struct *t)
338 {
339 	return (!info || ((unsigned long)info != 1 && SI_FROMUSER(info)))
340 	    && ((sig != SIGCONT) || (current->session != t->session))
341 	    && (current->euid ^ t->suid) && (current->euid ^ t->uid)
342 	    && (current->uid ^ t->suid) && (current->uid ^ t->uid)
343 	    && !capable(CAP_KILL);
344 }
345 
346 /*
347  * Signal type:
348  *    < 0 : global action (kill - spread to all non-blocked threads)
349  *    = 0 : ignored
350  *    > 0 : wake up.
351  */
signal_type(int sig,struct signal_struct * signals)352 static int signal_type(int sig, struct signal_struct *signals)
353 {
354 	unsigned long handler;
355 
356 	if (!signals)
357 		return 0;
358 
359 	handler = (unsigned long) signals->action[sig-1].sa.sa_handler;
360 	if (handler > 1)
361 		return 1;
362 
363 	/* "Ignore" handler.. Illogical, but that has an implicit handler for SIGCHLD */
364 	if (handler == 1)
365 		return sig == SIGCHLD;
366 
367 	/* Default handler. Normally lethal, but.. */
368 	switch (sig) {
369 
370 	/* Ignored */
371 	case SIGCONT: case SIGWINCH:
372 	case SIGCHLD: case SIGURG:
373 		return 0;
374 
375 	/* Implicit behaviour */
376 	case SIGTSTP: case SIGTTIN: case SIGTTOU:
377 		return 1;
378 
379 	/* Implicit actions (kill or do special stuff) */
380 	default:
381 		return -1;
382 	}
383 }
384 
385 
386 /*
387  * Determine whether a signal should be posted or not.
388  *
389  * Signals with SIG_IGN can be ignored, except for the
390  * special case of a SIGCHLD.
391  *
392  * Some signals with SIG_DFL default to a non-action.
393  */
ignored_signal(int sig,struct task_struct * t)394 static int ignored_signal(int sig, struct task_struct *t)
395 {
396 	/* Don't ignore traced or blocked signals */
397 	if ((t->ptrace & PT_PTRACED) || sigismember(&t->blocked, sig))
398 		return 0;
399 
400 	return signal_type(sig, t->sig) == 0;
401 }
402 
403 /*
404  * Handle TASK_STOPPED cases etc implicit behaviour
405  * of certain magical signals.
406  *
407  * SIGKILL gets spread out to every thread.
408  */
handle_stop_signal(int sig,struct task_struct * t)409 static void handle_stop_signal(int sig, struct task_struct *t)
410 {
411 	switch (sig) {
412 	case SIGCONT:
413 		/* SIGCONT must not wake a task while it's being traced */
414 		if ((t->state == TASK_STOPPED) &&
415 		    ((t->ptrace & (PT_PTRACED|PT_TRACESYS)) ==
416 		     (PT_PTRACED|PT_TRACESYS)))
417 			return;
418 		/* fall through */
419 	case SIGKILL:
420 		/* Wake up the process if stopped.
421 		 * Note that if the process is being traced, waking it up
422 		 * will make it continue before being killed. This may end
423 		 * up unexpectedly completing whatever syscall is pending.
424 		 */
425 		if (t->state == TASK_STOPPED)
426 			wake_up_process(t);
427 		t->exit_code = 0;
428 		rm_sig_from_queue(SIGSTOP, t);
429 		rm_sig_from_queue(SIGTSTP, t);
430 		rm_sig_from_queue(SIGTTOU, t);
431 		rm_sig_from_queue(SIGTTIN, t);
432 		break;
433 
434 	case SIGSTOP: case SIGTSTP:
435 	case SIGTTIN: case SIGTTOU:
436 		/* If we're stopping again, cancel SIGCONT */
437 		rm_sig_from_queue(SIGCONT, t);
438 		break;
439 	}
440 }
441 
send_signal(int sig,struct siginfo * info,struct sigpending * signals)442 static int send_signal(int sig, struct siginfo *info, struct sigpending *signals)
443 {
444 	struct sigqueue * q = NULL;
445 
446 	/* Real-time signals must be queued if sent by sigqueue, or
447 	   some other real-time mechanism.  It is implementation
448 	   defined whether kill() does so.  We attempt to do so, on
449 	   the principle of least surprise, but since kill is not
450 	   allowed to fail with EAGAIN when low on memory we just
451 	   make sure at least one signal gets delivered and don't
452 	   pass on the info struct.  */
453 
454 	if (atomic_read(&nr_queued_signals) < max_queued_signals) {
455 		q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
456 	}
457 
458 	if (q) {
459 		atomic_inc(&nr_queued_signals);
460 		q->next = NULL;
461 		*signals->tail = q;
462 		signals->tail = &q->next;
463 		switch ((unsigned long) info) {
464 			case 0:
465 				q->info.si_signo = sig;
466 				q->info.si_errno = 0;
467 				q->info.si_code = SI_USER;
468 				q->info.si_pid = current->pid;
469 				q->info.si_uid = current->uid;
470 				break;
471 			case 1:
472 				q->info.si_signo = sig;
473 				q->info.si_errno = 0;
474 				q->info.si_code = SI_KERNEL;
475 				q->info.si_pid = 0;
476 				q->info.si_uid = 0;
477 				break;
478 			default:
479 				copy_siginfo(&q->info, info);
480 				break;
481 		}
482 	} else if (sig >= SIGRTMIN && info && (unsigned long)info != 1
483 		   && info->si_code != SI_USER) {
484 		/*
485 		 * Queue overflow, abort.  We may abort if the signal was rt
486 		 * and sent by user using something other than kill().
487 		 */
488 		return -EAGAIN;
489 	}
490 
491 	sigaddset(&signals->signal, sig);
492 	return 0;
493 }
494 
495 /*
496  * Tell a process that it has a new active signal..
497  *
498  * NOTE! we rely on the previous spin_lock to
499  * lock interrupts for us! We can only be called with
500  * "sigmask_lock" held, and the local interrupt must
501  * have been disabled when that got acquired!
502  *
503  * No need to set need_resched since signal event passing
504  * goes through ->blocked
505  */
signal_wake_up(struct task_struct * t)506 static inline void signal_wake_up(struct task_struct *t)
507 {
508 	t->sigpending = 1;
509 
510 #ifdef CONFIG_SMP
511 	/*
512 	 * If the task is running on a different CPU
513 	 * force a reschedule on the other CPU to make
514 	 * it notice the new signal quickly.
515 	 *
516 	 * The code below is a tad loose and might occasionally
517 	 * kick the wrong CPU if we catch the process in the
518 	 * process of changing - but no harm is done by that
519 	 * other than doing an extra (lightweight) IPI interrupt.
520 	 */
521 	spin_lock(&runqueue_lock);
522 	if (task_has_cpu(t) && t->processor != smp_processor_id())
523 		smp_send_reschedule(t->processor);
524 	spin_unlock(&runqueue_lock);
525 #endif /* CONFIG_SMP */
526 
527 	if (t->state & TASK_INTERRUPTIBLE) {
528 		wake_up_process(t);
529 		return;
530 	}
531 }
532 
deliver_signal(int sig,struct siginfo * info,struct task_struct * t)533 static int deliver_signal(int sig, struct siginfo *info, struct task_struct *t)
534 {
535 	int retval = send_signal(sig, info, &t->pending);
536 
537 	if (!retval && !sigismember(&t->blocked, sig))
538 		signal_wake_up(t);
539 
540 	return retval;
541 }
542 
543 int
send_sig_info(int sig,struct siginfo * info,struct task_struct * t)544 send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
545 {
546 	unsigned long flags;
547 	int ret;
548 
549 
550 #if DEBUG_SIG
551 printk("SIG queue (%s:%d): %d ", t->comm, t->pid, sig);
552 #endif
553 
554 	ret = -EINVAL;
555 	if (sig < 0 || sig > _NSIG)
556 		goto out_nolock;
557 	/* The somewhat baroque permissions check... */
558 	ret = -EPERM;
559 	if (bad_signal(sig, info, t))
560 		goto out_nolock;
561 
562 	/* The null signal is a permissions and process existence probe.
563 	   No signal is actually delivered.  Same goes for zombies. */
564 	ret = 0;
565 	if (!sig || !t->sig)
566 		goto out_nolock;
567 
568 	spin_lock_irqsave(&t->sigmask_lock, flags);
569 	handle_stop_signal(sig, t);
570 
571 	/* Optimize away the signal, if it's a signal that can be
572 	   handled immediately (ie non-blocked and untraced) and
573 	   that is ignored (either explicitly or by default).  */
574 
575 	if (ignored_signal(sig, t))
576 		goto out;
577 
578 	/* Support queueing exactly one non-rt signal, so that we
579 	   can get more detailed information about the cause of
580 	   the signal. */
581 	if (sig < SIGRTMIN && sigismember(&t->pending.signal, sig))
582 		goto out;
583 
584 	ret = deliver_signal(sig, info, t);
585 out:
586 	spin_unlock_irqrestore(&t->sigmask_lock, flags);
587 out_nolock:
588 #if DEBUG_SIG
589 printk(" %d -> %d\n", signal_pending(t), ret);
590 #endif
591 
592 	return ret;
593 }
594 
595 /*
596  * Force a signal that the process can't ignore: if necessary
597  * we unblock the signal and change any SIG_IGN to SIG_DFL.
598  */
599 
600 int
force_sig_info(int sig,struct siginfo * info,struct task_struct * t)601 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
602 {
603 	unsigned long int flags;
604 
605 	spin_lock_irqsave(&t->sigmask_lock, flags);
606 	if (t->sig == NULL) {
607 		spin_unlock_irqrestore(&t->sigmask_lock, flags);
608 		return -ESRCH;
609 	}
610 
611 	if (t->sig->action[sig-1].sa.sa_handler == SIG_IGN)
612 		t->sig->action[sig-1].sa.sa_handler = SIG_DFL;
613 	sigdelset(&t->blocked, sig);
614 	recalc_sigpending(t);
615 	spin_unlock_irqrestore(&t->sigmask_lock, flags);
616 
617 	return send_sig_info(sig, info, t);
618 }
619 
620 /*
621  * kill_pg_info() sends a signal to a process group: this is what the tty
622  * control characters do (^C, ^Z etc)
623  */
624 
625 int
kill_pg_info(int sig,struct siginfo * info,pid_t pgrp)626 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
627 {
628 	int retval = -EINVAL;
629 	if (pgrp > 0) {
630 		struct task_struct *p;
631 
632 		retval = -ESRCH;
633 		read_lock(&tasklist_lock);
634 		for_each_task(p) {
635 			if (p->pgrp == pgrp && thread_group_leader(p)) {
636 				int err = send_sig_info(sig, info, p);
637 				if (retval)
638 					retval = err;
639 			}
640 		}
641 		read_unlock(&tasklist_lock);
642 	}
643 	return retval;
644 }
645 
646 /*
647  * kill_sl_info() sends a signal to the session leader: this is used
648  * to send SIGHUP to the controlling process of a terminal when
649  * the connection is lost.
650  */
651 
652 int
kill_sl_info(int sig,struct siginfo * info,pid_t sess)653 kill_sl_info(int sig, struct siginfo *info, pid_t sess)
654 {
655 	int retval = -EINVAL;
656 	if (sess > 0) {
657 		struct task_struct *p;
658 
659 		retval = -ESRCH;
660 		read_lock(&tasklist_lock);
661 		for_each_task(p) {
662 			if (p->leader && p->session == sess) {
663 				int err = send_sig_info(sig, info, p);
664 				if (retval)
665 					retval = err;
666 			}
667 		}
668 		read_unlock(&tasklist_lock);
669 	}
670 	return retval;
671 }
672 
673 inline int
kill_proc_info(int sig,struct siginfo * info,pid_t pid)674 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
675 {
676 	int error;
677 	struct task_struct *p;
678 
679 	read_lock(&tasklist_lock);
680 	p = find_task_by_pid(pid);
681 	error = -ESRCH;
682 	if (p) {
683 		if (!thread_group_leader(p)) {
684                        struct task_struct *tg;
685                        tg = find_task_by_pid(p->tgid);
686                        if (tg)
687                                p = tg;
688                 }
689 		error = send_sig_info(sig, info, p);
690 	}
691 	read_unlock(&tasklist_lock);
692 	return error;
693 }
694 
695 
696 /*
697  * kill_something_info() interprets pid in interesting ways just like kill(2).
698  *
699  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
700  * is probably wrong.  Should make it like BSD or SYSV.
701  */
702 
kill_something_info(int sig,struct siginfo * info,int pid)703 static int kill_something_info(int sig, struct siginfo *info, int pid)
704 {
705 	if (!pid) {
706 		return kill_pg_info(sig, info, current->pgrp);
707 	} else if (pid == -1) {
708 		int retval = 0, count = 0;
709 		struct task_struct * p;
710 
711 		read_lock(&tasklist_lock);
712 		for_each_task(p) {
713 			if (p->pid > 1 && p != current && thread_group_leader(p)) {
714 				int err = send_sig_info(sig, info, p);
715 				++count;
716 				if (err != -EPERM)
717 					retval = err;
718 			}
719 		}
720 		read_unlock(&tasklist_lock);
721 		return count ? retval : -ESRCH;
722 	} else if (pid < 0) {
723 		return kill_pg_info(sig, info, -pid);
724 	} else {
725 		return kill_proc_info(sig, info, pid);
726 	}
727 }
728 
729 /*
730  * These are for backward compatibility with the rest of the kernel source.
731  */
732 
733 int
send_sig(int sig,struct task_struct * p,int priv)734 send_sig(int sig, struct task_struct *p, int priv)
735 {
736 	return send_sig_info(sig, (void*)(long)(priv != 0), p);
737 }
738 
739 void
force_sig(int sig,struct task_struct * p)740 force_sig(int sig, struct task_struct *p)
741 {
742 	force_sig_info(sig, (void*)1L, p);
743 }
744 
745 int
kill_pg(pid_t pgrp,int sig,int priv)746 kill_pg(pid_t pgrp, int sig, int priv)
747 {
748 	return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
749 }
750 
751 int
kill_sl(pid_t sess,int sig,int priv)752 kill_sl(pid_t sess, int sig, int priv)
753 {
754 	return kill_sl_info(sig, (void *)(long)(priv != 0), sess);
755 }
756 
757 int
kill_proc(pid_t pid,int sig,int priv)758 kill_proc(pid_t pid, int sig, int priv)
759 {
760 	return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
761 }
762 
763 /*
764  * Joy. Or not. Pthread wants us to wake up every thread
765  * in our parent group.
766  */
wake_up_parent(struct task_struct * parent)767 static void wake_up_parent(struct task_struct *parent)
768 {
769 	struct task_struct *tsk = parent;
770 
771 	do {
772 		wake_up_interruptible(&tsk->wait_chldexit);
773 		tsk = next_thread(tsk);
774 	} while (tsk != parent);
775 }
776 
777 /*
778  * Let a parent know about a status change of a child.
779  */
780 
do_notify_parent(struct task_struct * tsk,int sig)781 void do_notify_parent(struct task_struct *tsk, int sig)
782 {
783 	struct siginfo info;
784 	int why, status;
785 
786 	info.si_signo = sig;
787 	info.si_errno = 0;
788 	info.si_pid = tsk->pid;
789 	info.si_uid = tsk->uid;
790 
791 	/* FIXME: find out whether or not this is supposed to be c*time. */
792 	info.si_utime = tsk->times.tms_utime;
793 	info.si_stime = tsk->times.tms_stime;
794 
795 	status = tsk->exit_code & 0x7f;
796 	why = SI_KERNEL;	/* shouldn't happen */
797 	switch (tsk->state) {
798 	case TASK_STOPPED:
799 		/* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
800 		if (tsk->ptrace & PT_PTRACED)
801 			why = CLD_TRAPPED;
802 		else
803 			why = CLD_STOPPED;
804 		break;
805 
806 	default:
807 		if (tsk->exit_code & 0x80)
808 			why = CLD_DUMPED;
809 		else if (tsk->exit_code & 0x7f)
810 			why = CLD_KILLED;
811 		else {
812 			why = CLD_EXITED;
813 			status = tsk->exit_code >> 8;
814 		}
815 		break;
816 	}
817 	info.si_code = why;
818 	info.si_status = status;
819 
820 	send_sig_info(sig, &info, tsk->p_pptr);
821 	wake_up_parent(tsk->p_pptr);
822 }
823 
824 
825 /*
826  * We need the tasklist lock because it's the only
827  * thing that protects out "parent" pointer.
828  *
829  * exit.c calls "do_notify_parent()" directly, because
830  * it already has the tasklist lock.
831  */
832 void
notify_parent(struct task_struct * tsk,int sig)833 notify_parent(struct task_struct *tsk, int sig)
834 {
835 	read_lock(&tasklist_lock);
836 	do_notify_parent(tsk, sig);
837 	read_unlock(&tasklist_lock);
838 }
839 
840 EXPORT_SYMBOL(dequeue_signal);
841 EXPORT_SYMBOL(flush_signals);
842 EXPORT_SYMBOL(force_sig);
843 EXPORT_SYMBOL(force_sig_info);
844 EXPORT_SYMBOL(kill_pg);
845 EXPORT_SYMBOL(kill_pg_info);
846 EXPORT_SYMBOL(kill_proc);
847 EXPORT_SYMBOL(kill_proc_info);
848 EXPORT_SYMBOL(kill_sl);
849 EXPORT_SYMBOL(kill_sl_info);
850 EXPORT_SYMBOL(notify_parent);
851 EXPORT_SYMBOL(recalc_sigpending);
852 EXPORT_SYMBOL(send_sig);
853 EXPORT_SYMBOL(send_sig_info);
854 EXPORT_SYMBOL(block_all_signals);
855 EXPORT_SYMBOL(unblock_all_signals);
856 
857 
858 /*
859  * System call entry points.
860  */
861 
862 /*
863  * We don't need to get the kernel lock - this is all local to this
864  * particular thread.. (and that's good, because this is _heavily_
865  * used by various programs)
866  */
867 
868 asmlinkage long
sys_rt_sigprocmask(int how,sigset_t * set,sigset_t * oset,size_t sigsetsize)869 sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset, size_t sigsetsize)
870 {
871 	int error = -EINVAL;
872 	sigset_t old_set, new_set;
873 
874 	/* XXX: Don't preclude handling different sized sigset_t's.  */
875 	if (sigsetsize != sizeof(sigset_t))
876 		goto out;
877 
878 	if (set) {
879 		error = -EFAULT;
880 		if (copy_from_user(&new_set, set, sizeof(*set)))
881 			goto out;
882 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
883 
884 		spin_lock_irq(&current->sigmask_lock);
885 		old_set = current->blocked;
886 
887 		error = 0;
888 		switch (how) {
889 		default:
890 			error = -EINVAL;
891 			break;
892 		case SIG_BLOCK:
893 			sigorsets(&current->blocked, &old_set, &new_set);
894 			break;
895 		case SIG_UNBLOCK:
896 			signandsets(&current->blocked, &old_set, &new_set);
897 			break;
898 		case SIG_SETMASK:
899 			current->blocked = new_set;
900 			break;
901 		}
902 
903 		recalc_sigpending(current);
904 		spin_unlock_irq(&current->sigmask_lock);
905 		if (error)
906 			goto out;
907 		if (oset)
908 			goto set_old;
909 	} else if (oset) {
910 		spin_lock_irq(&current->sigmask_lock);
911 		old_set = current->blocked;
912 		spin_unlock_irq(&current->sigmask_lock);
913 
914 	set_old:
915 		error = -EFAULT;
916 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
917 			goto out;
918 	}
919 	error = 0;
920 out:
921 	return error;
922 }
923 
do_sigpending(void * set,unsigned long sigsetsize)924 long do_sigpending(void *set, unsigned long sigsetsize)
925 {
926 	long error = -EINVAL;
927 	sigset_t pending;
928 
929 	if (sigsetsize > sizeof(sigset_t))
930 		goto out;
931 
932 	spin_lock_irq(&current->sigmask_lock);
933 	sigandsets(&pending, &current->blocked, &current->pending.signal);
934 	spin_unlock_irq(&current->sigmask_lock);
935 
936 	error = -EFAULT;
937 	if (!copy_to_user(set, &pending, sigsetsize))
938 		error = 0;
939 out:
940 	return error;
941 }
942 
943 asmlinkage long
sys_rt_sigpending(sigset_t * set,size_t sigsetsize)944 sys_rt_sigpending(sigset_t *set, size_t sigsetsize)
945 {
946 	return do_sigpending(set, sigsetsize);
947 }
948 
949 asmlinkage long
sys_rt_sigtimedwait(const sigset_t * uthese,siginfo_t * uinfo,const struct timespec * uts,size_t sigsetsize)950 sys_rt_sigtimedwait(const sigset_t *uthese, siginfo_t *uinfo,
951 		    const struct timespec *uts, size_t sigsetsize)
952 {
953 	int ret, sig;
954 	sigset_t these;
955 	struct timespec ts;
956 	siginfo_t info;
957 	long timeout = 0;
958 
959 	/* XXX: Don't preclude handling different sized sigset_t's.  */
960 	if (sigsetsize != sizeof(sigset_t))
961 		return -EINVAL;
962 
963 	if (copy_from_user(&these, uthese, sizeof(these)))
964 		return -EFAULT;
965 
966 	/*
967 	 * Invert the set of allowed signals to get those we
968 	 * want to block.
969 	 */
970 	sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
971 	signotset(&these);
972 
973 	if (uts) {
974 		if (copy_from_user(&ts, uts, sizeof(ts)))
975 			return -EFAULT;
976 		if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
977 		    || ts.tv_sec < 0)
978 			return -EINVAL;
979 	}
980 
981 	spin_lock_irq(&current->sigmask_lock);
982 	sig = dequeue_signal(&these, &info);
983 	if (!sig) {
984 		timeout = MAX_SCHEDULE_TIMEOUT;
985 		if (uts)
986 			timeout = (timespec_to_jiffies(&ts)
987 				   + (ts.tv_sec || ts.tv_nsec));
988 
989 		if (timeout) {
990 			/* None ready -- temporarily unblock those we're
991 			 * interested while we are sleeping in so that we'll
992 			 * be awakened when they arrive.  */
993 			sigset_t oldblocked = current->blocked;
994 			sigandsets(&current->blocked, &current->blocked, &these);
995 			recalc_sigpending(current);
996 			spin_unlock_irq(&current->sigmask_lock);
997 
998 			current->state = TASK_INTERRUPTIBLE;
999 			timeout = schedule_timeout(timeout);
1000 
1001 			spin_lock_irq(&current->sigmask_lock);
1002 			sig = dequeue_signal(&these, &info);
1003 			current->blocked = oldblocked;
1004 			recalc_sigpending(current);
1005 		}
1006 	}
1007 	spin_unlock_irq(&current->sigmask_lock);
1008 
1009 	if (sig) {
1010 		ret = sig;
1011 		if (uinfo) {
1012 			if (copy_siginfo_to_user(uinfo, &info))
1013 				ret = -EFAULT;
1014 		}
1015 	} else {
1016 		ret = -EAGAIN;
1017 		if (timeout)
1018 			ret = -EINTR;
1019 	}
1020 
1021 	return ret;
1022 }
1023 
1024 asmlinkage long
sys_kill(int pid,int sig)1025 sys_kill(int pid, int sig)
1026 {
1027 	struct siginfo info;
1028 
1029 	info.si_signo = sig;
1030 	info.si_errno = 0;
1031 	info.si_code = SI_USER;
1032 	info.si_pid = current->pid;
1033 	info.si_uid = current->uid;
1034 
1035 	return kill_something_info(sig, &info, pid);
1036 }
1037 
1038 /*
1039  *  Kill only one task, even if it's a CLONE_THREAD task.
1040  */
1041 asmlinkage long
sys_tkill(int pid,int sig)1042 sys_tkill(int pid, int sig)
1043 {
1044        struct siginfo info;
1045        int error;
1046        struct task_struct *p;
1047 
1048        /* This is only valid for single tasks */
1049        if (pid <= 0)
1050            return -EINVAL;
1051 
1052        info.si_signo = sig;
1053        info.si_errno = 0;
1054        info.si_code = SI_TKILL;
1055        info.si_pid = current->pid;
1056        info.si_uid = current->uid;
1057 
1058        read_lock(&tasklist_lock);
1059        p = find_task_by_pid(pid);
1060        error = -ESRCH;
1061        if (p) {
1062                error = send_sig_info(sig, &info, p);
1063        }
1064        read_unlock(&tasklist_lock);
1065        return error;
1066 }
1067 
1068 asmlinkage long
sys_rt_sigqueueinfo(int pid,int sig,siginfo_t * uinfo)1069 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t *uinfo)
1070 {
1071 	siginfo_t info;
1072 
1073 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
1074 		return -EFAULT;
1075 
1076 	/* Not even root can pretend to send signals from the kernel.
1077 	   Nor can they impersonate a kill(), which adds source info.  */
1078 	if (info.si_code >= 0)
1079 		return -EPERM;
1080 	info.si_signo = sig;
1081 
1082 	/* POSIX.1b doesn't mention process groups.  */
1083 	return kill_proc_info(sig, &info, pid);
1084 }
1085 
1086 int
do_sigaction(int sig,const struct k_sigaction * act,struct k_sigaction * oact)1087 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
1088 {
1089 	struct k_sigaction *k;
1090 
1091 	if (sig < 1 || sig > _NSIG ||
1092 	    (act && (sig == SIGKILL || sig == SIGSTOP)))
1093 		return -EINVAL;
1094 
1095 	k = &current->sig->action[sig-1];
1096 
1097 	spin_lock(&current->sig->siglock);
1098 
1099 	if (oact)
1100 		*oact = *k;
1101 
1102 	if (act) {
1103 		*k = *act;
1104 		sigdelsetmask(&k->sa.sa_mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
1105 
1106 		/*
1107 		 * POSIX 3.3.1.3:
1108 		 *  "Setting a signal action to SIG_IGN for a signal that is
1109 		 *   pending shall cause the pending signal to be discarded,
1110 		 *   whether or not it is blocked."
1111 		 *
1112 		 *  "Setting a signal action to SIG_DFL for a signal that is
1113 		 *   pending and whose default action is to ignore the signal
1114 		 *   (for example, SIGCHLD), shall cause the pending signal to
1115 		 *   be discarded, whether or not it is blocked"
1116 		 *
1117 		 * Note the silly behaviour of SIGCHLD: SIG_IGN means that the
1118 		 * signal isn't actually ignored, but does automatic child
1119 		 * reaping, while SIG_DFL is explicitly said by POSIX to force
1120 		 * the signal to be ignored.
1121 		 */
1122 
1123 		if (k->sa.sa_handler == SIG_IGN
1124 		    || (k->sa.sa_handler == SIG_DFL
1125 			&& (sig == SIGCONT ||
1126 			    sig == SIGCHLD ||
1127 			    sig == SIGURG ||
1128 			    sig == SIGWINCH))) {
1129 			spin_lock_irq(&current->sigmask_lock);
1130 			if (rm_sig_from_queue(sig, current))
1131 				recalc_sigpending(current);
1132 			spin_unlock_irq(&current->sigmask_lock);
1133 		}
1134 	}
1135 
1136 	spin_unlock(&current->sig->siglock);
1137 	return 0;
1138 }
1139 
1140 int
do_sigaltstack(const stack_t * uss,stack_t * uoss,unsigned long sp)1141 do_sigaltstack (const stack_t *uss, stack_t *uoss, unsigned long sp)
1142 {
1143 	stack_t oss;
1144 	int error;
1145 
1146 	oss.ss_sp = (void *) current->sas_ss_sp;
1147 	oss.ss_size = current->sas_ss_size;
1148 	oss.ss_flags = sas_ss_flags(sp);
1149 
1150 	if (uss) {
1151 		void *ss_sp;
1152 		size_t ss_size;
1153 		int ss_flags;
1154 
1155 		error = -EFAULT;
1156 		if (verify_area(VERIFY_READ, uss, sizeof(*uss))
1157 		    || __get_user(ss_sp, &uss->ss_sp)
1158 		    || __get_user(ss_flags, &uss->ss_flags)
1159 		    || __get_user(ss_size, &uss->ss_size))
1160 			goto out;
1161 
1162 		error = -EPERM;
1163 		if (on_sig_stack (sp))
1164 			goto out;
1165 
1166 		error = -EINVAL;
1167 		/*
1168 		 *
1169 		 * Note - this code used to test ss_flags incorrectly
1170 		 *  	  old code may have been written using ss_flags==0
1171 		 *	  to mean ss_flags==SS_ONSTACK (as this was the only
1172 		 *	  way that worked) - this fix preserves that older
1173 		 *	  mechanism
1174 		 */
1175 		if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
1176 			goto out;
1177 
1178 		if (ss_flags == SS_DISABLE) {
1179 			ss_size = 0;
1180 			ss_sp = NULL;
1181 		} else {
1182 			error = -ENOMEM;
1183 			if (ss_size < MINSIGSTKSZ)
1184 				goto out;
1185 		}
1186 
1187 		current->sas_ss_sp = (unsigned long) ss_sp;
1188 		current->sas_ss_size = ss_size;
1189 	}
1190 
1191 	error = 0;
1192 	if (uoss) {
1193 		error = -EFAULT;
1194 		if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
1195 			goto out;
1196 		error = __put_user(oss.ss_sp, &uoss->ss_sp) |
1197 			__put_user(oss.ss_size, &uoss->ss_size) |
1198 			__put_user(oss.ss_flags, &uoss->ss_flags);
1199 	}
1200 
1201 out:
1202 	return error;
1203 }
1204 
1205 asmlinkage long
sys_sigpending(old_sigset_t * set)1206 sys_sigpending(old_sigset_t *set)
1207 {
1208 	return do_sigpending(set, sizeof(*set));
1209 }
1210 
1211 #if !defined(__alpha__)
1212 /* Alpha has its own versions with special arguments.  */
1213 
1214 asmlinkage long
sys_sigprocmask(int how,old_sigset_t * set,old_sigset_t * oset)1215 sys_sigprocmask(int how, old_sigset_t *set, old_sigset_t *oset)
1216 {
1217 	int error;
1218 	old_sigset_t old_set, new_set;
1219 
1220 	if (set) {
1221 		error = -EFAULT;
1222 		if (copy_from_user(&new_set, set, sizeof(*set)))
1223 			goto out;
1224 		new_set &= ~(sigmask(SIGKILL)|sigmask(SIGSTOP));
1225 
1226 		spin_lock_irq(&current->sigmask_lock);
1227 		old_set = current->blocked.sig[0];
1228 
1229 		error = 0;
1230 		switch (how) {
1231 		default:
1232 			error = -EINVAL;
1233 			break;
1234 		case SIG_BLOCK:
1235 			sigaddsetmask(&current->blocked, new_set);
1236 			break;
1237 		case SIG_UNBLOCK:
1238 			sigdelsetmask(&current->blocked, new_set);
1239 			break;
1240 		case SIG_SETMASK:
1241 			current->blocked.sig[0] = new_set;
1242 			break;
1243 		}
1244 
1245 		recalc_sigpending(current);
1246 		spin_unlock_irq(&current->sigmask_lock);
1247 		if (error)
1248 			goto out;
1249 		if (oset)
1250 			goto set_old;
1251 	} else if (oset) {
1252 		old_set = current->blocked.sig[0];
1253 	set_old:
1254 		error = -EFAULT;
1255 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
1256 			goto out;
1257 	}
1258 	error = 0;
1259 out:
1260 	return error;
1261 }
1262 
1263 #ifndef __sparc__
1264 asmlinkage long
sys_rt_sigaction(int sig,const struct sigaction * act,struct sigaction * oact,size_t sigsetsize)1265 sys_rt_sigaction(int sig, const struct sigaction *act, struct sigaction *oact,
1266 		 size_t sigsetsize)
1267 {
1268 	struct k_sigaction new_sa, old_sa;
1269 	int ret = -EINVAL;
1270 
1271 	/* XXX: Don't preclude handling different sized sigset_t's.  */
1272 	if (sigsetsize != sizeof(sigset_t))
1273 		goto out;
1274 
1275 	if (act) {
1276 		if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
1277 			return -EFAULT;
1278 	}
1279 
1280 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
1281 
1282 	if (!ret && oact) {
1283 		if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
1284 			return -EFAULT;
1285 	}
1286 out:
1287 	return ret;
1288 }
1289 #endif /* __sparc__ */
1290 #endif
1291 
1292 #if !defined(__alpha__) && !defined(__ia64__)
1293 /*
1294  * For backwards compatibility.  Functionality superseded by sigprocmask.
1295  */
1296 asmlinkage long
sys_sgetmask(void)1297 sys_sgetmask(void)
1298 {
1299 	/* SMP safe */
1300 	return current->blocked.sig[0];
1301 }
1302 
1303 asmlinkage long
sys_ssetmask(int newmask)1304 sys_ssetmask(int newmask)
1305 {
1306 	int old;
1307 
1308 	spin_lock_irq(&current->sigmask_lock);
1309 	old = current->blocked.sig[0];
1310 
1311 	siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
1312 						  sigmask(SIGSTOP)));
1313 	recalc_sigpending(current);
1314 	spin_unlock_irq(&current->sigmask_lock);
1315 
1316 	return old;
1317 }
1318 #endif /* !defined(__alpha__) */
1319 
1320 #if !defined(__alpha__) && !defined(__ia64__) && !defined(__mips__)
1321 /*
1322  * For backwards compatibility.  Functionality superseded by sigaction.
1323  */
1324 asmlinkage unsigned long
sys_signal(int sig,__sighandler_t handler)1325 sys_signal(int sig, __sighandler_t handler)
1326 {
1327 	struct k_sigaction new_sa, old_sa;
1328 	int ret;
1329 
1330 	new_sa.sa.sa_handler = handler;
1331 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
1332 
1333 	ret = do_sigaction(sig, &new_sa, &old_sa);
1334 
1335 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
1336 }
1337 #endif /* !alpha && !__ia64__ && !defined(__mips__) */
1338