Lines Matching refs:tsk

143 static void __exit_signal(struct task_struct *tsk)  in __exit_signal()  argument
145 struct signal_struct *sig = tsk->signal; in __exit_signal()
146 bool group_dead = thread_group_leader(tsk); in __exit_signal()
151 sighand = rcu_dereference_check(tsk->sighand, in __exit_signal()
156 posix_cpu_timers_exit(tsk); in __exit_signal()
158 posix_cpu_timers_exit_group(tsk); in __exit_signal()
172 if (tsk == sig->curr_target) in __exit_signal()
173 sig->curr_target = next_thread(tsk); in __exit_signal()
176 add_device_randomness((const void*) &tsk->se.sum_exec_runtime, in __exit_signal()
185 task_cputime(tsk, &utime, &stime); in __exit_signal()
189 sig->gtime += task_gtime(tsk); in __exit_signal()
190 sig->min_flt += tsk->min_flt; in __exit_signal()
191 sig->maj_flt += tsk->maj_flt; in __exit_signal()
192 sig->nvcsw += tsk->nvcsw; in __exit_signal()
193 sig->nivcsw += tsk->nivcsw; in __exit_signal()
194 sig->inblock += task_io_get_inblock(tsk); in __exit_signal()
195 sig->oublock += task_io_get_oublock(tsk); in __exit_signal()
196 task_io_accounting_add(&sig->ioac, &tsk->ioac); in __exit_signal()
197 sig->sum_sched_runtime += tsk->se.sum_exec_runtime; in __exit_signal()
199 __unhash_process(tsk, group_dead); in __exit_signal()
206 flush_sigqueue(&tsk->pending); in __exit_signal()
207 tsk->sighand = NULL; in __exit_signal()
211 clear_tsk_thread_flag(tsk, TIF_SIGPENDING); in __exit_signal()
220 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); in delayed_put_task_struct() local
222 kprobe_flush_task(tsk); in delayed_put_task_struct()
223 rethook_flush_task(tsk); in delayed_put_task_struct()
224 perf_event_delayed_put(tsk); in delayed_put_task_struct()
225 trace_sched_process_free(tsk); in delayed_put_task_struct()
226 put_task_struct(tsk); in delayed_put_task_struct()
374 kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent) in kill_orphaned_pgrp() argument
376 struct pid *pgrp = task_pgrp(tsk); in kill_orphaned_pgrp()
377 struct task_struct *ignored_task = tsk; in kill_orphaned_pgrp()
383 parent = tsk->real_parent; in kill_orphaned_pgrp()
391 task_session(parent) == task_session(tsk) && in kill_orphaned_pgrp()
399 static void coredump_task_exit(struct task_struct *tsk) in coredump_task_exit() argument
410 spin_lock_irq(&tsk->sighand->siglock); in coredump_task_exit()
411 tsk->flags |= PF_POSTCOREDUMP; in coredump_task_exit()
412 core_state = tsk->signal->core_state; in coredump_task_exit()
413 spin_unlock_irq(&tsk->sighand->siglock); in coredump_task_exit()
417 ((tsk->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER)) { in coredump_task_exit()
727 static void exit_notify(struct task_struct *tsk, int group_dead) in exit_notify() argument
734 forget_original_parent(tsk, &dead); in exit_notify()
737 kill_orphaned_pgrp(tsk->group_leader, NULL); in exit_notify()
739 tsk->exit_state = EXIT_ZOMBIE; in exit_notify()
740 if (unlikely(tsk->ptrace)) { in exit_notify()
741 int sig = thread_group_leader(tsk) && in exit_notify()
742 thread_group_empty(tsk) && in exit_notify()
743 !ptrace_reparented(tsk) ? in exit_notify()
744 tsk->exit_signal : SIGCHLD; in exit_notify()
745 autoreap = do_notify_parent(tsk, sig); in exit_notify()
746 } else if (thread_group_leader(tsk)) { in exit_notify()
747 autoreap = thread_group_empty(tsk) && in exit_notify()
748 do_notify_parent(tsk, tsk->exit_signal); in exit_notify()
754 tsk->exit_state = EXIT_DEAD; in exit_notify()
755 list_add(&tsk->ptrace_entry, &dead); in exit_notify()
759 if (unlikely(tsk->signal->notify_count < 0)) in exit_notify()
760 wake_up_process(tsk->signal->group_exec_task); in exit_notify()
793 static void synchronize_group_exit(struct task_struct *tsk, long code) in synchronize_group_exit() argument
795 struct sighand_struct *sighand = tsk->sighand; in synchronize_group_exit()
796 struct signal_struct *signal = tsk->signal; in synchronize_group_exit()
811 struct task_struct *tsk = current; in do_exit() local
816 synchronize_group_exit(tsk, code); in do_exit()
818 WARN_ON(tsk->plug); in do_exit()
820 kcov_task_exit(tsk); in do_exit()
821 kmsan_task_exit(tsk); in do_exit()
823 coredump_task_exit(tsk); in do_exit()
825 user_events_exit(tsk); in do_exit()
828 exit_signals(tsk); /* sets PF_EXITING */ in do_exit()
831 if (tsk->mm) in do_exit()
832 sync_mm_rss(tsk->mm); in do_exit()
833 acct_update_integrals(tsk); in do_exit()
834 group_dead = atomic_dec_and_test(&tsk->signal->live); in do_exit()
840 if (unlikely(is_global_init(tsk))) in do_exit()
842 tsk->signal->group_exit_code ?: (int)code); in do_exit()
845 hrtimer_cancel(&tsk->signal->real_timer); in do_exit()
846 exit_itimers(tsk); in do_exit()
848 if (tsk->mm) in do_exit()
849 setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm); in do_exit()
854 audit_free(tsk); in do_exit()
856 tsk->exit_code = code; in do_exit()
857 taskstats_exit(tsk, group_dead); in do_exit()
863 trace_sched_process_exit(tsk); in do_exit()
865 exit_sem(tsk); in do_exit()
866 exit_shm(tsk); in do_exit()
867 exit_files(tsk); in do_exit()
868 exit_fs(tsk); in do_exit()
871 exit_task_namespaces(tsk); in do_exit()
872 exit_task_work(tsk); in do_exit()
873 exit_thread(tsk); in do_exit()
881 perf_event_exit_task(tsk); in do_exit()
883 sched_autogroup_exit_task(tsk); in do_exit()
884 cgroup_exit(tsk); in do_exit()
889 flush_ptrace_hw_breakpoint(tsk); in do_exit()
892 exit_notify(tsk, group_dead); in do_exit()
893 proc_exit_connector(tsk); in do_exit()
894 mpol_put_task_policy(tsk); in do_exit()
904 if (tsk->io_context) in do_exit()
905 exit_io_context(tsk); in do_exit()
907 if (tsk->splice_pipe) in do_exit()
908 free_pipe_info(tsk->splice_pipe); in do_exit()
910 if (tsk->task_frag.page) in do_exit()
911 put_page(tsk->task_frag.page); in do_exit()
913 exit_task_stack_account(tsk); in do_exit()
917 if (tsk->nr_dirtied) in do_exit()
918 __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); in do_exit()
922 lockdep_free_task(tsk); in do_exit()
937 struct task_struct *tsk = current; in make_task_dead() local
942 if (unlikely(!tsk->pid)) in make_task_dead()
975 if (unlikely(tsk->flags & PF_EXITING)) { in make_task_dead()
977 futex_exit_recursive(tsk); in make_task_dead()
978 tsk->exit_state = EXIT_DEAD; in make_task_dead()
979 refcount_inc(&tsk->rcu_users); in make_task_dead()
1492 static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk) in do_wait_thread() argument
1496 list_for_each_entry(p, &tsk->children, sibling) { in do_wait_thread()
1506 static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk) in ptrace_do_wait() argument
1510 list_for_each_entry(p, &tsk->ptraced, ptrace_entry) { in ptrace_do_wait()
1611 struct task_struct *tsk = current; in do_wait() local
1614 retval = do_wait_thread(wo, tsk); in do_wait()
1618 retval = ptrace_do_wait(wo, tsk); in do_wait()
1624 } while_each_thread(current, tsk); in do_wait()