1 #ifndef _LINUX_SCHED_H
2 #define _LINUX_SCHED_H
3 
4 /*
5  * cloning flags:
6  */
7 #define CSIGNAL		0x000000ff	/* signal mask to be sent at exit */
8 #define CLONE_VM	0x00000100	/* set if VM shared between processes */
9 #define CLONE_FS	0x00000200	/* set if fs info shared between processes */
10 #define CLONE_FILES	0x00000400	/* set if open files shared between processes */
11 #define CLONE_SIGHAND	0x00000800	/* set if signal handlers and blocked signals shared */
12 #define CLONE_PTRACE	0x00002000	/* set if we want to let tracing continue on the child too */
13 #define CLONE_VFORK	0x00004000	/* set if the parent wants the child to wake it up on mm_release */
14 #define CLONE_PARENT	0x00008000	/* set if we want to have the same parent as the cloner */
15 #define CLONE_THREAD	0x00010000	/* Same thread group? */
16 #define CLONE_NEWNS	0x00020000	/* New namespace group? */
17 #define CLONE_SYSVSEM	0x00040000	/* share system V SEM_UNDO semantics */
18 #define CLONE_SETTLS	0x00080000	/* create a new TLS for the child */
19 #define CLONE_PARENT_SETTID	0x00100000	/* set the TID in the parent */
20 #define CLONE_CHILD_CLEARTID	0x00200000	/* clear the TID in the child */
21 #define CLONE_DETACHED		0x00400000	/* Unused, ignored */
22 #define CLONE_UNTRACED		0x00800000	/* set if the tracing process can't force CLONE_PTRACE on this clone */
23 #define CLONE_CHILD_SETTID	0x01000000	/* set the TID in the child */
24 /* 0x02000000 was previously the unused CLONE_STOPPED (Start in stopped state)
25    and is now available for re-use. */
26 #define CLONE_NEWUTS		0x04000000	/* New utsname group? */
27 #define CLONE_NEWIPC		0x08000000	/* New ipcs */
28 #define CLONE_NEWUSER		0x10000000	/* New user namespace */
29 #define CLONE_NEWPID		0x20000000	/* New pid namespace */
30 #define CLONE_NEWNET		0x40000000	/* New network namespace */
31 #define CLONE_IO		0x80000000	/* Clone io context */
32 
33 /*
34  * Scheduling policies
35  */
36 #define SCHED_NORMAL		0
37 #define SCHED_FIFO		1
38 #define SCHED_RR		2
39 #define SCHED_BATCH		3
40 /* SCHED_ISO: reserved but not implemented yet */
41 #define SCHED_IDLE		5
42 /* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
43 #define SCHED_RESET_ON_FORK     0x40000000
44 
45 #ifdef __KERNEL__
46 
47 struct sched_param {
48 	int sched_priority;
49 };
50 
51 #include <asm/param.h>	/* for HZ */
52 
53 #include <linux/capability.h>
54 #include <linux/threads.h>
55 #include <linux/kernel.h>
56 #include <linux/types.h>
57 #include <linux/timex.h>
58 #include <linux/jiffies.h>
59 #include <linux/rbtree.h>
60 #include <linux/thread_info.h>
61 #include <linux/cpumask.h>
62 #include <linux/errno.h>
63 #include <linux/nodemask.h>
64 #include <linux/mm_types.h>
65 
66 #include <asm/page.h>
67 #include <asm/ptrace.h>
68 #include <asm/cputime.h>
69 
70 #include <linux/smp.h>
71 #include <linux/sem.h>
72 #include <linux/signal.h>
73 #include <linux/compiler.h>
74 #include <linux/completion.h>
75 #include <linux/pid.h>
76 #include <linux/percpu.h>
77 #include <linux/topology.h>
78 #include <linux/proportions.h>
79 #include <linux/seccomp.h>
80 #include <linux/rcupdate.h>
81 #include <linux/rculist.h>
82 #include <linux/rtmutex.h>
83 
84 #include <linux/time.h>
85 #include <linux/param.h>
86 #include <linux/resource.h>
87 #include <linux/timer.h>
88 #include <linux/hrtimer.h>
89 #include <linux/task_io_accounting.h>
90 #include <linux/latencytop.h>
91 #include <linux/cred.h>
92 #include <linux/llist.h>
93 
94 #include <asm/processor.h>
95 
96 struct exec_domain;
97 struct futex_pi_state;
98 struct robust_list_head;
99 struct bio_list;
100 struct fs_struct;
101 struct perf_event_context;
102 struct blk_plug;
103 
104 /*
105  * List of flags we want to share for kernel threads,
106  * if only because they are not used by them anyway.
107  */
108 #define CLONE_KERNEL	(CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
109 
110 /*
111  * These are the constant used to fake the fixed-point load-average
112  * counting. Some notes:
113  *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
114  *    a load-average precision of 10 bits integer + 11 bits fractional
115  *  - if you want to count load-averages more often, you need more
116  *    precision, or rounding will get you. With 2-second counting freq,
117  *    the EXP_n values would be 1981, 2034 and 2043 if still using only
118  *    11 bit fractions.
119  */
120 extern unsigned long avenrun[];		/* Load averages */
121 extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
122 
123 #define FSHIFT		11		/* nr of bits of precision */
124 #define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
125 #define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */
126 #define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */
127 #define EXP_5		2014		/* 1/exp(5sec/5min) */
128 #define EXP_15		2037		/* 1/exp(5sec/15min) */
129 
130 #define CALC_LOAD(load,exp,n) \
131 	load *= exp; \
132 	load += n*(FIXED_1-exp); \
133 	load >>= FSHIFT;
134 
135 extern unsigned long total_forks;
136 extern int nr_threads;
137 DECLARE_PER_CPU(unsigned long, process_counts);
138 extern int nr_processes(void);
139 extern unsigned long nr_running(void);
140 extern unsigned long nr_uninterruptible(void);
141 extern unsigned long nr_iowait(void);
142 extern unsigned long nr_iowait_cpu(int cpu);
143 extern unsigned long this_cpu_load(void);
144 
145 
146 extern void calc_global_load(unsigned long ticks);
147 extern void update_cpu_load_nohz(void);
148 
149 extern unsigned long get_parent_ip(unsigned long addr);
150 
151 struct seq_file;
152 struct cfs_rq;
153 struct task_group;
154 #ifdef CONFIG_SCHED_DEBUG
155 extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
156 extern void proc_sched_set_task(struct task_struct *p);
157 extern void
158 print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
159 #else
160 static inline void
proc_sched_show_task(struct task_struct * p,struct seq_file * m)161 proc_sched_show_task(struct task_struct *p, struct seq_file *m)
162 {
163 }
proc_sched_set_task(struct task_struct * p)164 static inline void proc_sched_set_task(struct task_struct *p)
165 {
166 }
167 static inline void
print_cfs_rq(struct seq_file * m,int cpu,struct cfs_rq * cfs_rq)168 print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
169 {
170 }
171 #endif
172 
173 /*
174  * Task state bitmask. NOTE! These bits are also
175  * encoded in fs/proc/array.c: get_task_state().
176  *
177  * We have two separate sets of flags: task->state
178  * is about runnability, while task->exit_state are
179  * about the task exiting. Confusing, but this way
180  * modifying one set can't modify the other one by
181  * mistake.
182  */
183 #define TASK_RUNNING		0
184 #define TASK_INTERRUPTIBLE	1
185 #define TASK_UNINTERRUPTIBLE	2
186 #define __TASK_STOPPED		4
187 #define __TASK_TRACED		8
188 /* in tsk->exit_state */
189 #define EXIT_ZOMBIE		16
190 #define EXIT_DEAD		32
191 /* in tsk->state again */
192 #define TASK_DEAD		64
193 #define TASK_WAKEKILL		128
194 #define TASK_WAKING		256
195 #define TASK_STATE_MAX		512
196 
197 #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW"
198 
199 extern char ___assert_task_state[1 - 2*!!(
200 		sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
201 
202 /* Convenience macros for the sake of set_task_state */
203 #define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
204 #define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
205 #define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
206 
207 /* Convenience macros for the sake of wake_up */
208 #define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
209 #define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
210 
211 /* get_task_state() */
212 #define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
213 				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
214 				 __TASK_TRACED)
215 
216 #define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
217 #define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
218 #define task_is_dead(task)	((task)->exit_state != 0)
219 #define task_is_stopped_or_traced(task)	\
220 			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
221 #define task_contributes_to_load(task)	\
222 				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
223 				 (task->flags & PF_FROZEN) == 0)
224 
225 #define __set_task_state(tsk, state_value)		\
226 	do { (tsk)->state = (state_value); } while (0)
227 #define set_task_state(tsk, state_value)		\
228 	set_mb((tsk)->state, (state_value))
229 
230 /*
231  * set_current_state() includes a barrier so that the write of current->state
232  * is correctly serialised wrt the caller's subsequent test of whether to
233  * actually sleep:
234  *
235  *	set_current_state(TASK_UNINTERRUPTIBLE);
236  *	if (do_i_need_to_sleep())
237  *		schedule();
238  *
239  * If the caller does not need such serialisation then use __set_current_state()
240  */
241 #define __set_current_state(state_value)			\
242 	do { current->state = (state_value); } while (0)
243 #define set_current_state(state_value)		\
244 	set_mb(current->state, (state_value))
245 
246 /* Task command name length */
247 #define TASK_COMM_LEN 16
248 
249 #include <linux/spinlock.h>
250 
251 /*
252  * This serializes "schedule()" and also protects
253  * the run-queue from deletions/modifications (but
254  * _adding_ to the beginning of the run-queue has
255  * a separate lock).
256  */
257 extern rwlock_t tasklist_lock;
258 extern spinlock_t mmlist_lock;
259 
260 struct task_struct;
261 
262 #ifdef CONFIG_PROVE_RCU
263 extern int lockdep_tasklist_lock_is_held(void);
264 #endif /* #ifdef CONFIG_PROVE_RCU */
265 
266 extern void sched_init(void);
267 extern void sched_init_smp(void);
268 extern asmlinkage void schedule_tail(struct task_struct *prev);
269 extern void init_idle(struct task_struct *idle, int cpu);
270 extern void init_idle_bootup_task(struct task_struct *idle);
271 
272 extern int runqueue_is_locked(int cpu);
273 
274 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
275 extern void select_nohz_load_balancer(int stop_tick);
276 extern void set_cpu_sd_state_idle(void);
277 extern int get_nohz_timer_target(void);
278 #else
select_nohz_load_balancer(int stop_tick)279 static inline void select_nohz_load_balancer(int stop_tick) { }
set_cpu_sd_state_idle(void)280 static inline void set_cpu_sd_state_idle(void) { }
281 #endif
282 
283 /*
284  * Only dump TASK_* tasks. (0 for all tasks)
285  */
286 extern void show_state_filter(unsigned long state_filter);
287 
show_state(void)288 static inline void show_state(void)
289 {
290 	show_state_filter(0);
291 }
292 
293 extern void show_regs(struct pt_regs *);
294 
295 /*
296  * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
297  * task), SP is the stack pointer of the first frame that should be shown in the back
298  * trace (or NULL if the entire call-chain of the task should be shown).
299  */
300 extern void show_stack(struct task_struct *task, unsigned long *sp);
301 
302 void io_schedule(void);
303 long io_schedule_timeout(long timeout);
304 
305 extern void cpu_init (void);
306 extern void trap_init(void);
307 extern void update_process_times(int user);
308 extern void scheduler_tick(void);
309 
310 extern void sched_show_task(struct task_struct *p);
311 
312 #ifdef CONFIG_LOCKUP_DETECTOR
313 extern void touch_softlockup_watchdog(void);
314 extern void touch_softlockup_watchdog_sync(void);
315 extern void touch_all_softlockup_watchdogs(void);
316 extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
317 				  void __user *buffer,
318 				  size_t *lenp, loff_t *ppos);
319 extern unsigned int  softlockup_panic;
320 void lockup_detector_init(void);
321 #else
touch_softlockup_watchdog(void)322 static inline void touch_softlockup_watchdog(void)
323 {
324 }
touch_softlockup_watchdog_sync(void)325 static inline void touch_softlockup_watchdog_sync(void)
326 {
327 }
touch_all_softlockup_watchdogs(void)328 static inline void touch_all_softlockup_watchdogs(void)
329 {
330 }
lockup_detector_init(void)331 static inline void lockup_detector_init(void)
332 {
333 }
334 #endif
335 
336 #ifdef CONFIG_DETECT_HUNG_TASK
337 extern unsigned int  sysctl_hung_task_panic;
338 extern unsigned long sysctl_hung_task_check_count;
339 extern unsigned long sysctl_hung_task_timeout_secs;
340 extern unsigned long sysctl_hung_task_warnings;
341 extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
342 					 void __user *buffer,
343 					 size_t *lenp, loff_t *ppos);
344 #else
345 /* Avoid need for ifdefs elsewhere in the code */
346 enum { sysctl_hung_task_timeout_secs = 0 };
347 #endif
348 
349 /* Attach to any functions which should be ignored in wchan output. */
350 #define __sched		__attribute__((__section__(".sched.text")))
351 
352 /* Linker adds these: start and end of __sched functions */
353 extern char __sched_text_start[], __sched_text_end[];
354 
355 /* Is this address in the __sched functions? */
356 extern int in_sched_functions(unsigned long addr);
357 
358 #define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
359 extern signed long schedule_timeout(signed long timeout);
360 extern signed long schedule_timeout_interruptible(signed long timeout);
361 extern signed long schedule_timeout_killable(signed long timeout);
362 extern signed long schedule_timeout_uninterruptible(signed long timeout);
363 asmlinkage void schedule(void);
364 extern void schedule_preempt_disabled(void);
365 extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
366 
367 struct nsproxy;
368 struct user_namespace;
369 
370 /*
371  * Default maximum number of active map areas, this limits the number of vmas
372  * per mm struct. Users can overwrite this number by sysctl but there is a
373  * problem.
374  *
375  * When a program's coredump is generated as ELF format, a section is created
376  * per a vma. In ELF, the number of sections is represented in unsigned short.
377  * This means the number of sections should be smaller than 65535 at coredump.
378  * Because the kernel adds some informative sections to a image of program at
379  * generating coredump, we need some margin. The number of extra sections is
380  * 1-3 now and depends on arch. We use "5" as safe margin, here.
381  */
382 #define MAPCOUNT_ELF_CORE_MARGIN	(5)
383 #define DEFAULT_MAX_MAP_COUNT	(USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
384 
385 extern int sysctl_max_map_count;
386 
387 #include <linux/aio.h>
388 
389 #ifdef CONFIG_MMU
390 extern void arch_pick_mmap_layout(struct mm_struct *mm);
391 extern unsigned long
392 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
393 		       unsigned long, unsigned long);
394 extern unsigned long
395 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
396 			  unsigned long len, unsigned long pgoff,
397 			  unsigned long flags);
398 extern void arch_unmap_area(struct mm_struct *, unsigned long);
399 extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
400 #else
arch_pick_mmap_layout(struct mm_struct * mm)401 static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
402 #endif
403 
404 
405 extern void set_dumpable(struct mm_struct *mm, int value);
406 extern int get_dumpable(struct mm_struct *mm);
407 
408 #define SUID_DUMP_DISABLE	0	/* No setuid dumping */
409 #define SUID_DUMP_USER		1	/* Dump as user of process */
410 #define SUID_DUMP_ROOT		2	/* Dump as root */
411 
412 /* mm flags */
413 /* dumpable bits */
414 #define MMF_DUMPABLE      0  /* core dump is permitted */
415 #define MMF_DUMP_SECURELY 1  /* core file is readable only by root */
416 
417 #define MMF_DUMPABLE_BITS 2
418 #define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
419 
420 /* coredump filter bits */
421 #define MMF_DUMP_ANON_PRIVATE	2
422 #define MMF_DUMP_ANON_SHARED	3
423 #define MMF_DUMP_MAPPED_PRIVATE	4
424 #define MMF_DUMP_MAPPED_SHARED	5
425 #define MMF_DUMP_ELF_HEADERS	6
426 #define MMF_DUMP_HUGETLB_PRIVATE 7
427 #define MMF_DUMP_HUGETLB_SHARED  8
428 
429 #define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS
430 #define MMF_DUMP_FILTER_BITS	7
431 #define MMF_DUMP_FILTER_MASK \
432 	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
433 #define MMF_DUMP_FILTER_DEFAULT \
434 	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED) |\
435 	 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
436 
437 #ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
438 # define MMF_DUMP_MASK_DEFAULT_ELF	(1 << MMF_DUMP_ELF_HEADERS)
439 #else
440 # define MMF_DUMP_MASK_DEFAULT_ELF	0
441 #endif
442 					/* leave room for more dump flags */
443 #define MMF_VM_MERGEABLE	16	/* KSM may merge identical pages */
444 #define MMF_VM_HUGEPAGE		17	/* set when VM_HUGEPAGE is set on vma */
445 
446 #define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
447 
448 struct sighand_struct {
449 	atomic_t		count;
450 	struct k_sigaction	action[_NSIG];
451 	spinlock_t		siglock;
452 	wait_queue_head_t	signalfd_wqh;
453 };
454 
455 struct pacct_struct {
456 	int			ac_flag;
457 	long			ac_exitcode;
458 	unsigned long		ac_mem;
459 	cputime_t		ac_utime, ac_stime;
460 	unsigned long		ac_minflt, ac_majflt;
461 };
462 
463 struct cpu_itimer {
464 	cputime_t expires;
465 	cputime_t incr;
466 	u32 error;
467 	u32 incr_error;
468 };
469 
470 /**
471  * struct task_cputime - collected CPU time counts
472  * @utime:		time spent in user mode, in &cputime_t units
473  * @stime:		time spent in kernel mode, in &cputime_t units
474  * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
475  *
476  * This structure groups together three kinds of CPU time that are
477  * tracked for threads and thread groups.  Most things considering
478  * CPU time want to group these counts together and treat all three
479  * of them in parallel.
480  */
481 struct task_cputime {
482 	cputime_t utime;
483 	cputime_t stime;
484 	unsigned long long sum_exec_runtime;
485 };
486 /* Alternate field names when used to cache expirations. */
487 #define prof_exp	stime
488 #define virt_exp	utime
489 #define sched_exp	sum_exec_runtime
490 
491 #define INIT_CPUTIME	\
492 	(struct task_cputime) {					\
493 		.utime = 0,					\
494 		.stime = 0,					\
495 		.sum_exec_runtime = 0,				\
496 	}
497 
498 /*
499  * Disable preemption until the scheduler is running.
500  * Reset by start_kernel()->sched_init()->init_idle().
501  *
502  * We include PREEMPT_ACTIVE to avoid cond_resched() from working
503  * before the scheduler is active -- see should_resched().
504  */
505 #define INIT_PREEMPT_COUNT	(1 + PREEMPT_ACTIVE)
506 
507 /**
508  * struct thread_group_cputimer - thread group interval timer counts
509  * @cputime:		thread group interval timers.
510  * @running:		non-zero when there are timers running and
511  * 			@cputime receives updates.
512  * @lock:		lock for fields in this struct.
513  *
514  * This structure contains the version of task_cputime, above, that is
515  * used for thread group CPU timer calculations.
516  */
517 struct thread_group_cputimer {
518 	struct task_cputime cputime;
519 	int running;
520 	raw_spinlock_t lock;
521 };
522 
523 #include <linux/rwsem.h>
524 struct autogroup;
525 
526 /*
527  * NOTE! "signal_struct" does not have its own
528  * locking, because a shared signal_struct always
529  * implies a shared sighand_struct, so locking
530  * sighand_struct is always a proper superset of
531  * the locking of signal_struct.
532  */
533 struct signal_struct {
534 	atomic_t		sigcnt;
535 	atomic_t		live;
536 	int			nr_threads;
537 
538 	wait_queue_head_t	wait_chldexit;	/* for wait4() */
539 
540 	/* current thread group signal load-balancing target: */
541 	struct task_struct	*curr_target;
542 
543 	/* shared signal handling: */
544 	struct sigpending	shared_pending;
545 
546 	/* thread group exit support */
547 	int			group_exit_code;
548 	/* overloaded:
549 	 * - notify group_exit_task when ->count is equal to notify_count
550 	 * - everyone except group_exit_task is stopped during signal delivery
551 	 *   of fatal signals, group_exit_task processes the signal.
552 	 */
553 	int			notify_count;
554 	struct task_struct	*group_exit_task;
555 
556 	/* thread group stop support, overloads group_exit_code too */
557 	int			group_stop_count;
558 	unsigned int		flags; /* see SIGNAL_* flags below */
559 
560 	/*
561 	 * PR_SET_CHILD_SUBREAPER marks a process, like a service
562 	 * manager, to re-parent orphan (double-forking) child processes
563 	 * to this process instead of 'init'. The service manager is
564 	 * able to receive SIGCHLD signals and is able to investigate
565 	 * the process until it calls wait(). All children of this
566 	 * process will inherit a flag if they should look for a
567 	 * child_subreaper process at exit.
568 	 */
569 	unsigned int		is_child_subreaper:1;
570 	unsigned int		has_child_subreaper:1;
571 
572 	/* POSIX.1b Interval Timers */
573 	struct list_head posix_timers;
574 
575 	/* ITIMER_REAL timer for the process */
576 	struct hrtimer real_timer;
577 	struct pid *leader_pid;
578 	ktime_t it_real_incr;
579 
580 	/*
581 	 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
582 	 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
583 	 * values are defined to 0 and 1 respectively
584 	 */
585 	struct cpu_itimer it[2];
586 
587 	/*
588 	 * Thread group totals for process CPU timers.
589 	 * See thread_group_cputimer(), et al, for details.
590 	 */
591 	struct thread_group_cputimer cputimer;
592 
593 	/* Earliest-expiration cache. */
594 	struct task_cputime cputime_expires;
595 
596 	struct list_head cpu_timers[3];
597 
598 	struct pid *tty_old_pgrp;
599 
600 	/* boolean value for session group leader */
601 	int leader;
602 
603 	struct tty_struct *tty; /* NULL if no tty */
604 
605 #ifdef CONFIG_SCHED_AUTOGROUP
606 	struct autogroup *autogroup;
607 #endif
608 	/*
609 	 * Cumulative resource counters for dead threads in the group,
610 	 * and for reaped dead child processes forked by this group.
611 	 * Live threads maintain their own counters and add to these
612 	 * in __exit_signal, except for the group leader.
613 	 */
614 	cputime_t utime, stime, cutime, cstime;
615 	cputime_t gtime;
616 	cputime_t cgtime;
617 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
618 	cputime_t prev_utime, prev_stime;
619 #endif
620 	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
621 	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
622 	unsigned long inblock, oublock, cinblock, coublock;
623 	unsigned long maxrss, cmaxrss;
624 	struct task_io_accounting ioac;
625 
626 	/*
627 	 * Cumulative ns of schedule CPU time fo dead threads in the
628 	 * group, not including a zombie group leader, (This only differs
629 	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
630 	 * other than jiffies.)
631 	 */
632 	unsigned long long sum_sched_runtime;
633 
634 	/*
635 	 * We don't bother to synchronize most readers of this at all,
636 	 * because there is no reader checking a limit that actually needs
637 	 * to get both rlim_cur and rlim_max atomically, and either one
638 	 * alone is a single word that can safely be read normally.
639 	 * getrlimit/setrlimit use task_lock(current->group_leader) to
640 	 * protect this instead of the siglock, because they really
641 	 * have no need to disable irqs.
642 	 */
643 	struct rlimit rlim[RLIM_NLIMITS];
644 
645 #ifdef CONFIG_BSD_PROCESS_ACCT
646 	struct pacct_struct pacct;	/* per-process accounting information */
647 #endif
648 #ifdef CONFIG_TASKSTATS
649 	struct taskstats *stats;
650 #endif
651 #ifdef CONFIG_AUDIT
652 	unsigned audit_tty;
653 	struct tty_audit_buf *tty_audit_buf;
654 #endif
655 #ifdef CONFIG_CGROUPS
656 	/*
657 	 * group_rwsem prevents new tasks from entering the threadgroup and
658 	 * member tasks from exiting,a more specifically, setting of
659 	 * PF_EXITING.  fork and exit paths are protected with this rwsem
660 	 * using threadgroup_change_begin/end().  Users which require
661 	 * threadgroup to remain stable should use threadgroup_[un]lock()
662 	 * which also takes care of exec path.  Currently, cgroup is the
663 	 * only user.
664 	 */
665 	struct rw_semaphore group_rwsem;
666 #endif
667 
668 	int oom_adj;		/* OOM kill score adjustment (bit shift) */
669 	int oom_score_adj;	/* OOM kill score adjustment */
670 	int oom_score_adj_min;	/* OOM kill score adjustment minimum value.
671 				 * Only settable by CAP_SYS_RESOURCE. */
672 
673 	struct mutex cred_guard_mutex;	/* guard against foreign influences on
674 					 * credential calculations
675 					 * (notably. ptrace) */
676 };
677 
678 /* Context switch must be unlocked if interrupts are to be enabled */
679 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
680 # define __ARCH_WANT_UNLOCKED_CTXSW
681 #endif
682 
683 /*
684  * Bits in flags field of signal_struct.
685  */
686 #define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */
687 #define SIGNAL_STOP_CONTINUED	0x00000002 /* SIGCONT since WCONTINUED reap */
688 #define SIGNAL_GROUP_EXIT	0x00000004 /* group exit in progress */
689 /*
690  * Pending notifications to parent.
691  */
692 #define SIGNAL_CLD_STOPPED	0x00000010
693 #define SIGNAL_CLD_CONTINUED	0x00000020
694 #define SIGNAL_CLD_MASK		(SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
695 
696 #define SIGNAL_UNKILLABLE	0x00000040 /* for init: ignore fatal signals */
697 
698 /* If true, all threads except ->group_exit_task have pending SIGKILL */
signal_group_exit(const struct signal_struct * sig)699 static inline int signal_group_exit(const struct signal_struct *sig)
700 {
701 	return	(sig->flags & SIGNAL_GROUP_EXIT) ||
702 		(sig->group_exit_task != NULL);
703 }
704 
705 /*
706  * Some day this will be a full-fledged user tracking system..
707  */
708 struct user_struct {
709 	atomic_t __count;	/* reference count */
710 	atomic_t processes;	/* How many processes does this user have? */
711 	atomic_t files;		/* How many open files does this user have? */
712 	atomic_t sigpending;	/* How many pending signals does this user have? */
713 #ifdef CONFIG_INOTIFY_USER
714 	atomic_t inotify_watches; /* How many inotify watches does this user have? */
715 	atomic_t inotify_devs;	/* How many inotify devs does this user have opened? */
716 #endif
717 #ifdef CONFIG_FANOTIFY
718 	atomic_t fanotify_listeners;
719 #endif
720 #ifdef CONFIG_EPOLL
721 	atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
722 #endif
723 #ifdef CONFIG_POSIX_MQUEUE
724 	/* protected by mq_lock	*/
725 	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */
726 #endif
727 	unsigned long locked_shm; /* How many pages of mlocked shm ? */
728 
729 #ifdef CONFIG_KEYS
730 	struct key *uid_keyring;	/* UID specific keyring */
731 	struct key *session_keyring;	/* UID's default session keyring */
732 #endif
733 
734 	/* Hash table maintenance information */
735 	struct hlist_node uidhash_node;
736 	uid_t uid;
737 	struct user_namespace *user_ns;
738 
739 #ifdef CONFIG_PERF_EVENTS
740 	atomic_long_t locked_vm;
741 #endif
742 };
743 
744 extern int uids_sysfs_init(void);
745 
746 extern struct user_struct *find_user(uid_t);
747 
748 extern struct user_struct root_user;
749 #define INIT_USER (&root_user)
750 
751 
752 struct backing_dev_info;
753 struct reclaim_state;
754 
755 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
756 struct sched_info {
757 	/* cumulative counters */
758 	unsigned long pcount;	      /* # of times run on this cpu */
759 	unsigned long long run_delay; /* time spent waiting on a runqueue */
760 
761 	/* timestamps */
762 	unsigned long long last_arrival,/* when we last ran on a cpu */
763 			   last_queued;	/* when we were last queued to run */
764 };
765 #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
766 
767 #ifdef CONFIG_TASK_DELAY_ACCT
768 struct task_delay_info {
769 	spinlock_t	lock;
770 	unsigned int	flags;	/* Private per-task flags */
771 
772 	/* For each stat XXX, add following, aligned appropriately
773 	 *
774 	 * struct timespec XXX_start, XXX_end;
775 	 * u64 XXX_delay;
776 	 * u32 XXX_count;
777 	 *
778 	 * Atomicity of updates to XXX_delay, XXX_count protected by
779 	 * single lock above (split into XXX_lock if contention is an issue).
780 	 */
781 
782 	/*
783 	 * XXX_count is incremented on every XXX operation, the delay
784 	 * associated with the operation is added to XXX_delay.
785 	 * XXX_delay contains the accumulated delay time in nanoseconds.
786 	 */
787 	struct timespec blkio_start, blkio_end;	/* Shared by blkio, swapin */
788 	u64 blkio_delay;	/* wait for sync block io completion */
789 	u64 swapin_delay;	/* wait for swapin block io completion */
790 	u32 blkio_count;	/* total count of the number of sync block */
791 				/* io operations performed */
792 	u32 swapin_count;	/* total count of the number of swapin block */
793 				/* io operations performed */
794 
795 	struct timespec freepages_start, freepages_end;
796 	u64 freepages_delay;	/* wait for memory reclaim */
797 	u32 freepages_count;	/* total count of memory reclaim */
798 };
799 #endif	/* CONFIG_TASK_DELAY_ACCT */
800 
sched_info_on(void)801 static inline int sched_info_on(void)
802 {
803 #ifdef CONFIG_SCHEDSTATS
804 	return 1;
805 #elif defined(CONFIG_TASK_DELAY_ACCT)
806 	extern int delayacct_on;
807 	return delayacct_on;
808 #else
809 	return 0;
810 #endif
811 }
812 
813 enum cpu_idle_type {
814 	CPU_IDLE,
815 	CPU_NOT_IDLE,
816 	CPU_NEWLY_IDLE,
817 	CPU_MAX_IDLE_TYPES
818 };
819 
820 /*
821  * Increase resolution of nice-level calculations for 64-bit architectures.
822  * The extra resolution improves shares distribution and load balancing of
823  * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
824  * hierarchies, especially on larger systems. This is not a user-visible change
825  * and does not change the user-interface for setting shares/weights.
826  *
827  * We increase resolution only if we have enough bits to allow this increased
828  * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution
829  * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
830  * increased costs.
831  */
832 #if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load  */
833 # define SCHED_LOAD_RESOLUTION	10
834 # define scale_load(w)		((w) << SCHED_LOAD_RESOLUTION)
835 # define scale_load_down(w)	((w) >> SCHED_LOAD_RESOLUTION)
836 #else
837 # define SCHED_LOAD_RESOLUTION	0
838 # define scale_load(w)		(w)
839 # define scale_load_down(w)	(w)
840 #endif
841 
842 #define SCHED_LOAD_SHIFT	(10 + SCHED_LOAD_RESOLUTION)
843 #define SCHED_LOAD_SCALE	(1L << SCHED_LOAD_SHIFT)
844 
845 /*
846  * Increase resolution of cpu_power calculations
847  */
848 #define SCHED_POWER_SHIFT	10
849 #define SCHED_POWER_SCALE	(1L << SCHED_POWER_SHIFT)
850 
851 /*
852  * sched-domains (multiprocessor balancing) declarations:
853  */
854 #ifdef CONFIG_SMP
855 #define SD_LOAD_BALANCE		0x0001	/* Do load balancing on this domain. */
856 #define SD_BALANCE_NEWIDLE	0x0002	/* Balance when about to become idle */
857 #define SD_BALANCE_EXEC		0x0004	/* Balance on exec */
858 #define SD_BALANCE_FORK		0x0008	/* Balance on fork, clone */
859 #define SD_BALANCE_WAKE		0x0010  /* Balance on wakeup */
860 #define SD_WAKE_AFFINE		0x0020	/* Wake task to waking CPU */
861 #define SD_PREFER_LOCAL		0x0040  /* Prefer to keep tasks local to this domain */
862 #define SD_SHARE_CPUPOWER	0x0080	/* Domain members share cpu power */
863 #define SD_POWERSAVINGS_BALANCE	0x0100	/* Balance for power savings */
864 #define SD_SHARE_PKG_RESOURCES	0x0200	/* Domain members share cpu pkg resources */
865 #define SD_SERIALIZE		0x0400	/* Only a single load balancing instance */
866 #define SD_ASYM_PACKING		0x0800  /* Place busy groups earlier in the domain */
867 #define SD_PREFER_SIBLING	0x1000	/* Prefer to place tasks in a sibling domain */
868 #define SD_OVERLAP		0x2000	/* sched_domains of this level overlap */
869 
870 enum powersavings_balance_level {
871 	POWERSAVINGS_BALANCE_NONE = 0,  /* No power saving load balance */
872 	POWERSAVINGS_BALANCE_BASIC,	/* Fill one thread/core/package
873 					 * first for long running threads
874 					 */
875 	POWERSAVINGS_BALANCE_WAKEUP,	/* Also bias task wakeups to semi-idle
876 					 * cpu package for power savings
877 					 */
878 	MAX_POWERSAVINGS_BALANCE_LEVELS
879 };
880 
881 extern int sched_mc_power_savings, sched_smt_power_savings;
882 
sd_balance_for_mc_power(void)883 static inline int sd_balance_for_mc_power(void)
884 {
885 	if (sched_smt_power_savings)
886 		return SD_POWERSAVINGS_BALANCE;
887 
888 	if (!sched_mc_power_savings)
889 		return SD_PREFER_SIBLING;
890 
891 	return 0;
892 }
893 
sd_balance_for_package_power(void)894 static inline int sd_balance_for_package_power(void)
895 {
896 	if (sched_mc_power_savings | sched_smt_power_savings)
897 		return SD_POWERSAVINGS_BALANCE;
898 
899 	return SD_PREFER_SIBLING;
900 }
901 
902 extern int __weak arch_sd_sibiling_asym_packing(void);
903 
904 /*
905  * Optimise SD flags for power savings:
906  * SD_BALANCE_NEWIDLE helps aggressive task consolidation and power savings.
907  * Keep default SD flags if sched_{smt,mc}_power_saving=0
908  */
909 
sd_power_saving_flags(void)910 static inline int sd_power_saving_flags(void)
911 {
912 	if (sched_mc_power_savings | sched_smt_power_savings)
913 		return SD_BALANCE_NEWIDLE;
914 
915 	return 0;
916 }
917 
918 struct sched_group_power {
919 	atomic_t ref;
920 	/*
921 	 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
922 	 * single CPU.
923 	 */
924 	unsigned int power, power_orig;
925 	unsigned long next_update;
926 	/*
927 	 * Number of busy cpus in this group.
928 	 */
929 	atomic_t nr_busy_cpus;
930 };
931 
932 struct sched_group {
933 	struct sched_group *next;	/* Must be a circular list */
934 	atomic_t ref;
935 
936 	unsigned int group_weight;
937 	struct sched_group_power *sgp;
938 
939 	/*
940 	 * The CPUs this group covers.
941 	 *
942 	 * NOTE: this field is variable length. (Allocated dynamically
943 	 * by attaching extra space to the end of the structure,
944 	 * depending on how many CPUs the kernel has booted up with)
945 	 */
946 	unsigned long cpumask[0];
947 };
948 
sched_group_cpus(struct sched_group * sg)949 static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
950 {
951 	return to_cpumask(sg->cpumask);
952 }
953 
954 /**
955  * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
956  * @group: The group whose first cpu is to be returned.
957  */
group_first_cpu(struct sched_group * group)958 static inline unsigned int group_first_cpu(struct sched_group *group)
959 {
960 	return cpumask_first(sched_group_cpus(group));
961 }
962 
963 struct sched_domain_attr {
964 	int relax_domain_level;
965 };
966 
967 #define SD_ATTR_INIT	(struct sched_domain_attr) {	\
968 	.relax_domain_level = -1,			\
969 }
970 
971 extern int sched_domain_level_max;
972 
973 struct sched_domain {
974 	/* These fields must be setup */
975 	struct sched_domain *parent;	/* top domain must be null terminated */
976 	struct sched_domain *child;	/* bottom domain must be null terminated */
977 	struct sched_group *groups;	/* the balancing groups of the domain */
978 	unsigned long min_interval;	/* Minimum balance interval ms */
979 	unsigned long max_interval;	/* Maximum balance interval ms */
980 	unsigned int busy_factor;	/* less balancing by factor if busy */
981 	unsigned int imbalance_pct;	/* No balance until over watermark */
982 	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
983 	unsigned int busy_idx;
984 	unsigned int idle_idx;
985 	unsigned int newidle_idx;
986 	unsigned int wake_idx;
987 	unsigned int forkexec_idx;
988 	unsigned int smt_gain;
989 	int flags;			/* See SD_* */
990 	int level;
991 
992 	/* Runtime fields. */
993 	unsigned long last_balance;	/* init to jiffies. units in jiffies */
994 	unsigned int balance_interval;	/* initialise to 1. units in ms. */
995 	unsigned int nr_balance_failed; /* initialise to 0 */
996 
997 	u64 last_update;
998 
999 #ifdef CONFIG_SCHEDSTATS
1000 	/* load_balance() stats */
1001 	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
1002 	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
1003 	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
1004 	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
1005 	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
1006 	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
1007 	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
1008 	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
1009 
1010 	/* Active load balancing */
1011 	unsigned int alb_count;
1012 	unsigned int alb_failed;
1013 	unsigned int alb_pushed;
1014 
1015 	/* SD_BALANCE_EXEC stats */
1016 	unsigned int sbe_count;
1017 	unsigned int sbe_balanced;
1018 	unsigned int sbe_pushed;
1019 
1020 	/* SD_BALANCE_FORK stats */
1021 	unsigned int sbf_count;
1022 	unsigned int sbf_balanced;
1023 	unsigned int sbf_pushed;
1024 
1025 	/* try_to_wake_up() stats */
1026 	unsigned int ttwu_wake_remote;
1027 	unsigned int ttwu_move_affine;
1028 	unsigned int ttwu_move_balance;
1029 #endif
1030 #ifdef CONFIG_SCHED_DEBUG
1031 	char *name;
1032 #endif
1033 	union {
1034 		void *private;		/* used during construction */
1035 		struct rcu_head rcu;	/* used during destruction */
1036 	};
1037 
1038 	unsigned int span_weight;
1039 	/*
1040 	 * Span of all CPUs in this domain.
1041 	 *
1042 	 * NOTE: this field is variable length. (Allocated dynamically
1043 	 * by attaching extra space to the end of the structure,
1044 	 * depending on how many CPUs the kernel has booted up with)
1045 	 */
1046 	unsigned long span[0];
1047 };
1048 
sched_domain_span(struct sched_domain * sd)1049 static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
1050 {
1051 	return to_cpumask(sd->span);
1052 }
1053 
1054 extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1055 				    struct sched_domain_attr *dattr_new);
1056 
1057 /* Allocate an array of sched domains, for partition_sched_domains(). */
1058 cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
1059 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
1060 
1061 /* Test a flag in parent sched domain */
test_sd_parent(struct sched_domain * sd,int flag)1062 static inline int test_sd_parent(struct sched_domain *sd, int flag)
1063 {
1064 	if (sd->parent && (sd->parent->flags & flag))
1065 		return 1;
1066 
1067 	return 0;
1068 }
1069 
1070 unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
1071 unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
1072 
1073 bool cpus_share_cache(int this_cpu, int that_cpu);
1074 
1075 #else /* CONFIG_SMP */
1076 
1077 struct sched_domain_attr;
1078 
1079 static inline void
partition_sched_domains(int ndoms_new,cpumask_var_t doms_new[],struct sched_domain_attr * dattr_new)1080 partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1081 			struct sched_domain_attr *dattr_new)
1082 {
1083 }
1084 
cpus_share_cache(int this_cpu,int that_cpu)1085 static inline bool cpus_share_cache(int this_cpu, int that_cpu)
1086 {
1087 	return true;
1088 }
1089 
1090 #endif	/* !CONFIG_SMP */
1091 
1092 
1093 struct io_context;			/* See blkdev.h */
1094 
1095 
1096 #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
1097 extern void prefetch_stack(struct task_struct *t);
1098 #else
prefetch_stack(struct task_struct * t)1099 static inline void prefetch_stack(struct task_struct *t) { }
1100 #endif
1101 
1102 struct audit_context;		/* See audit.c */
1103 struct mempolicy;
1104 struct pipe_inode_info;
1105 struct uts_namespace;
1106 
1107 struct rq;
1108 struct sched_domain;
1109 
1110 /*
1111  * wake flags
1112  */
1113 #define WF_SYNC		0x01		/* waker goes to sleep after wakup */
1114 #define WF_FORK		0x02		/* child wakeup after fork */
1115 #define WF_MIGRATED	0x04		/* internal use, task got migrated */
1116 
1117 #define ENQUEUE_WAKEUP		1
1118 #define ENQUEUE_HEAD		2
1119 #ifdef CONFIG_SMP
1120 #define ENQUEUE_WAKING		4	/* sched_class::task_waking was called */
1121 #else
1122 #define ENQUEUE_WAKING		0
1123 #endif
1124 
1125 #define DEQUEUE_SLEEP		1
1126 
1127 struct sched_class {
1128 	const struct sched_class *next;
1129 
1130 	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1131 	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1132 	void (*yield_task) (struct rq *rq);
1133 	bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
1134 
1135 	void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1136 
1137 	struct task_struct * (*pick_next_task) (struct rq *rq);
1138 	void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1139 
1140 #ifdef CONFIG_SMP
1141 	int  (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
1142 
1143 	void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1144 	void (*post_schedule) (struct rq *this_rq);
1145 	void (*task_waking) (struct task_struct *task);
1146 	void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1147 
1148 	void (*set_cpus_allowed)(struct task_struct *p,
1149 				 const struct cpumask *newmask);
1150 
1151 	void (*rq_online)(struct rq *rq);
1152 	void (*rq_offline)(struct rq *rq);
1153 #endif
1154 
1155 	void (*set_curr_task) (struct rq *rq);
1156 	void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1157 	void (*task_fork) (struct task_struct *p);
1158 
1159 	void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1160 	void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1161 	void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1162 			     int oldprio);
1163 
1164 	unsigned int (*get_rr_interval) (struct rq *rq,
1165 					 struct task_struct *task);
1166 
1167 #ifdef CONFIG_FAIR_GROUP_SCHED
1168 	void (*task_move_group) (struct task_struct *p, int on_rq);
1169 #endif
1170 };
1171 
1172 struct load_weight {
1173 	unsigned long weight, inv_weight;
1174 };
1175 
1176 #ifdef CONFIG_SCHEDSTATS
1177 struct sched_statistics {
1178 	u64			wait_start;
1179 	u64			wait_max;
1180 	u64			wait_count;
1181 	u64			wait_sum;
1182 	u64			iowait_count;
1183 	u64			iowait_sum;
1184 
1185 	u64			sleep_start;
1186 	u64			sleep_max;
1187 	s64			sum_sleep_runtime;
1188 
1189 	u64			block_start;
1190 	u64			block_max;
1191 	u64			exec_max;
1192 	u64			slice_max;
1193 
1194 	u64			nr_migrations_cold;
1195 	u64			nr_failed_migrations_affine;
1196 	u64			nr_failed_migrations_running;
1197 	u64			nr_failed_migrations_hot;
1198 	u64			nr_forced_migrations;
1199 
1200 	u64			nr_wakeups;
1201 	u64			nr_wakeups_sync;
1202 	u64			nr_wakeups_migrate;
1203 	u64			nr_wakeups_local;
1204 	u64			nr_wakeups_remote;
1205 	u64			nr_wakeups_affine;
1206 	u64			nr_wakeups_affine_attempts;
1207 	u64			nr_wakeups_passive;
1208 	u64			nr_wakeups_idle;
1209 };
1210 #endif
1211 
1212 struct sched_entity {
1213 	struct load_weight	load;		/* for load-balancing */
1214 	struct rb_node		run_node;
1215 	struct list_head	group_node;
1216 	unsigned int		on_rq;
1217 
1218 	u64			exec_start;
1219 	u64			sum_exec_runtime;
1220 	u64			vruntime;
1221 	u64			prev_sum_exec_runtime;
1222 
1223 	u64			nr_migrations;
1224 
1225 #ifdef CONFIG_SCHEDSTATS
1226 	struct sched_statistics statistics;
1227 #endif
1228 
1229 #ifdef CONFIG_FAIR_GROUP_SCHED
1230 	struct sched_entity	*parent;
1231 	/* rq on which this entity is (to be) queued: */
1232 	struct cfs_rq		*cfs_rq;
1233 	/* rq "owned" by this entity/group: */
1234 	struct cfs_rq		*my_q;
1235 #endif
1236 };
1237 
1238 struct sched_rt_entity {
1239 	struct list_head run_list;
1240 	unsigned long timeout;
1241 	unsigned long watchdog_stamp;
1242 	unsigned int time_slice;
1243 	int nr_cpus_allowed;
1244 
1245 	struct sched_rt_entity *back;
1246 #ifdef CONFIG_RT_GROUP_SCHED
1247 	struct sched_rt_entity	*parent;
1248 	/* rq on which this entity is (to be) queued: */
1249 	struct rt_rq		*rt_rq;
1250 	/* rq "owned" by this entity/group: */
1251 	struct rt_rq		*my_q;
1252 #endif
1253 };
1254 
1255 /*
1256  * default timeslice is 100 msecs (used only for SCHED_RR tasks).
1257  * Timeslices get refilled after they expire.
1258  */
1259 #define RR_TIMESLICE		(100 * HZ / 1000)
1260 
1261 struct rcu_node;
1262 
1263 enum perf_event_task_context {
1264 	perf_invalid_context = -1,
1265 	perf_hw_context = 0,
1266 	perf_sw_context,
1267 	perf_nr_task_contexts,
1268 };
1269 
1270 struct task_struct {
1271 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
1272 	void *stack;
1273 	atomic_t usage;
1274 	unsigned int flags;	/* per process flags, defined below */
1275 	unsigned int ptrace;
1276 
1277 #ifdef CONFIG_SMP
1278 	struct llist_node wake_entry;
1279 	int on_cpu;
1280 #endif
1281 	int on_rq;
1282 
1283 	int prio, static_prio, normal_prio;
1284 	unsigned int rt_priority;
1285 	const struct sched_class *sched_class;
1286 	struct sched_entity se;
1287 	struct sched_rt_entity rt;
1288 #ifdef CONFIG_CGROUP_SCHED
1289 	struct task_group *sched_task_group;
1290 #endif
1291 
1292 #ifdef CONFIG_PREEMPT_NOTIFIERS
1293 	/* list of struct preempt_notifier: */
1294 	struct hlist_head preempt_notifiers;
1295 #endif
1296 
1297 	/*
1298 	 * fpu_counter contains the number of consecutive context switches
1299 	 * that the FPU is used. If this is over a threshold, the lazy fpu
1300 	 * saving becomes unlazy to save the trap. This is an unsigned char
1301 	 * so that after 256 times the counter wraps and the behavior turns
1302 	 * lazy again; this to deal with bursty apps that only use FPU for
1303 	 * a short time
1304 	 */
1305 	unsigned char fpu_counter;
1306 #ifdef CONFIG_BLK_DEV_IO_TRACE
1307 	unsigned int btrace_seq;
1308 #endif
1309 
1310 	unsigned int policy;
1311 	cpumask_t cpus_allowed;
1312 
1313 #ifdef CONFIG_PREEMPT_RCU
1314 	int rcu_read_lock_nesting;
1315 	char rcu_read_unlock_special;
1316 	struct list_head rcu_node_entry;
1317 #endif /* #ifdef CONFIG_PREEMPT_RCU */
1318 #ifdef CONFIG_TREE_PREEMPT_RCU
1319 	struct rcu_node *rcu_blocked_node;
1320 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1321 #ifdef CONFIG_RCU_BOOST
1322 	struct rt_mutex *rcu_boost_mutex;
1323 #endif /* #ifdef CONFIG_RCU_BOOST */
1324 
1325 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1326 	struct sched_info sched_info;
1327 #endif
1328 
1329 	struct list_head tasks;
1330 #ifdef CONFIG_SMP
1331 	struct plist_node pushable_tasks;
1332 #endif
1333 
1334 	struct mm_struct *mm, *active_mm;
1335 #ifdef CONFIG_COMPAT_BRK
1336 	unsigned brk_randomized:1;
1337 #endif
1338 #if defined(SPLIT_RSS_COUNTING)
1339 	struct task_rss_stat	rss_stat;
1340 #endif
1341 /* task state */
1342 	int exit_state;
1343 	int exit_code, exit_signal;
1344 	int pdeath_signal;  /*  The signal sent when the parent dies  */
1345 	unsigned int jobctl;	/* JOBCTL_*, siglock protected */
1346 	/* ??? */
1347 	unsigned int personality;
1348 	unsigned did_exec:1;
1349 	unsigned in_execve:1;	/* Tell the LSMs that the process is doing an
1350 				 * execve */
1351 	unsigned in_iowait:1;
1352 
1353 
1354 	/* Revert to default priority/policy when forking */
1355 	unsigned sched_reset_on_fork:1;
1356 	unsigned sched_contributes_to_load:1;
1357 
1358 #ifdef CONFIG_GENERIC_HARDIRQS
1359 	/* IRQ handler threads */
1360 	unsigned irq_thread:1;
1361 #endif
1362 
1363 	pid_t pid;
1364 	pid_t tgid;
1365 
1366 #ifdef CONFIG_CC_STACKPROTECTOR
1367 	/* Canary value for the -fstack-protector gcc feature */
1368 	unsigned long stack_canary;
1369 #endif
1370 
1371 	/*
1372 	 * pointers to (original) parent process, youngest child, younger sibling,
1373 	 * older sibling, respectively.  (p->father can be replaced with
1374 	 * p->real_parent->pid)
1375 	 */
1376 	struct task_struct __rcu *real_parent; /* real parent process */
1377 	struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
1378 	/*
1379 	 * children/sibling forms the list of my natural children
1380 	 */
1381 	struct list_head children;	/* list of my children */
1382 	struct list_head sibling;	/* linkage in my parent's children list */
1383 	struct task_struct *group_leader;	/* threadgroup leader */
1384 
1385 	/*
1386 	 * ptraced is the list of tasks this task is using ptrace on.
1387 	 * This includes both natural children and PTRACE_ATTACH targets.
1388 	 * p->ptrace_entry is p's link on the p->parent->ptraced list.
1389 	 */
1390 	struct list_head ptraced;
1391 	struct list_head ptrace_entry;
1392 
1393 	/* PID/PID hash table linkage. */
1394 	struct pid_link pids[PIDTYPE_MAX];
1395 	struct list_head thread_group;
1396 
1397 	struct completion *vfork_done;		/* for vfork() */
1398 	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
1399 	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */
1400 
1401 	cputime_t utime, stime, utimescaled, stimescaled;
1402 	cputime_t gtime;
1403 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
1404 	cputime_t prev_utime, prev_stime;
1405 #endif
1406 	unsigned long nvcsw, nivcsw; /* context switch counts */
1407 	struct timespec start_time; 		/* monotonic time */
1408 	struct timespec real_start_time;	/* boot based time */
1409 /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1410 	unsigned long min_flt, maj_flt;
1411 
1412 	struct task_cputime cputime_expires;
1413 	struct list_head cpu_timers[3];
1414 
1415 /* process credentials */
1416 	const struct cred __rcu *real_cred; /* objective and real subjective task
1417 					 * credentials (COW) */
1418 	const struct cred __rcu *cred;	/* effective (overridable) subjective task
1419 					 * credentials (COW) */
1420 	struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
1421 
1422 	char comm[TASK_COMM_LEN]; /* executable name excluding path
1423 				     - access with [gs]et_task_comm (which lock
1424 				       it with task_lock())
1425 				     - initialized normally by setup_new_exec */
1426 /* file system info */
1427 	int link_count, total_link_count;
1428 #ifdef CONFIG_SYSVIPC
1429 /* ipc stuff */
1430 	struct sysv_sem sysvsem;
1431 #endif
1432 #ifdef CONFIG_DETECT_HUNG_TASK
1433 /* hung task detection */
1434 	unsigned long last_switch_count;
1435 #endif
1436 /* CPU-specific state of this task */
1437 	struct thread_struct thread;
1438 /* filesystem information */
1439 	struct fs_struct *fs;
1440 /* open file information */
1441 	struct files_struct *files;
1442 /* namespaces */
1443 	struct nsproxy *nsproxy;
1444 /* signal handlers */
1445 	struct signal_struct *signal;
1446 	struct sighand_struct *sighand;
1447 
1448 	sigset_t blocked, real_blocked;
1449 	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
1450 	struct sigpending pending;
1451 
1452 	unsigned long sas_ss_sp;
1453 	size_t sas_ss_size;
1454 	int (*notifier)(void *priv);
1455 	void *notifier_data;
1456 	sigset_t *notifier_mask;
1457 	struct audit_context *audit_context;
1458 #ifdef CONFIG_AUDITSYSCALL
1459 	uid_t loginuid;
1460 	unsigned int sessionid;
1461 #endif
1462 	seccomp_t seccomp;
1463 
1464 /* Thread group tracking */
1465    	u32 parent_exec_id;
1466    	u32 self_exec_id;
1467 /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
1468  * mempolicy */
1469 	spinlock_t alloc_lock;
1470 
1471 	/* Protection of the PI data structures: */
1472 	raw_spinlock_t pi_lock;
1473 
1474 #ifdef CONFIG_RT_MUTEXES
1475 	/* PI waiters blocked on a rt_mutex held by this task */
1476 	struct plist_head pi_waiters;
1477 	/* Deadlock detection and priority inheritance handling */
1478 	struct rt_mutex_waiter *pi_blocked_on;
1479 #endif
1480 
1481 #ifdef CONFIG_DEBUG_MUTEXES
1482 	/* mutex deadlock detection */
1483 	struct mutex_waiter *blocked_on;
1484 #endif
1485 #ifdef CONFIG_TRACE_IRQFLAGS
1486 	unsigned int irq_events;
1487 	unsigned long hardirq_enable_ip;
1488 	unsigned long hardirq_disable_ip;
1489 	unsigned int hardirq_enable_event;
1490 	unsigned int hardirq_disable_event;
1491 	int hardirqs_enabled;
1492 	int hardirq_context;
1493 	unsigned long softirq_disable_ip;
1494 	unsigned long softirq_enable_ip;
1495 	unsigned int softirq_disable_event;
1496 	unsigned int softirq_enable_event;
1497 	int softirqs_enabled;
1498 	int softirq_context;
1499 #endif
1500 #ifdef CONFIG_LOCKDEP
1501 # define MAX_LOCK_DEPTH 48UL
1502 	u64 curr_chain_key;
1503 	int lockdep_depth;
1504 	unsigned int lockdep_recursion;
1505 	struct held_lock held_locks[MAX_LOCK_DEPTH];
1506 	gfp_t lockdep_reclaim_gfp;
1507 #endif
1508 
1509 /* journalling filesystem info */
1510 	void *journal_info;
1511 
1512 /* stacked block device info */
1513 	struct bio_list *bio_list;
1514 
1515 #ifdef CONFIG_BLOCK
1516 /* stack plugging */
1517 	struct blk_plug *plug;
1518 #endif
1519 
1520 /* VM state */
1521 	struct reclaim_state *reclaim_state;
1522 
1523 	struct backing_dev_info *backing_dev_info;
1524 
1525 	struct io_context *io_context;
1526 
1527 	unsigned long ptrace_message;
1528 	siginfo_t *last_siginfo; /* For ptrace use.  */
1529 	struct task_io_accounting ioac;
1530 #if defined(CONFIG_TASK_XACCT)
1531 	u64 acct_rss_mem1;	/* accumulated rss usage */
1532 	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
1533 	cputime_t acct_timexpd;	/* stime + utime since last update */
1534 #endif
1535 #ifdef CONFIG_CPUSETS
1536 	nodemask_t mems_allowed;	/* Protected by alloc_lock */
1537 	seqcount_t mems_allowed_seq;	/* Seqence no to catch updates */
1538 	int cpuset_mem_spread_rotor;
1539 	int cpuset_slab_spread_rotor;
1540 #endif
1541 #ifdef CONFIG_CGROUPS
1542 	/* Control Group info protected by css_set_lock */
1543 	struct css_set __rcu *cgroups;
1544 	/* cg_list protected by css_set_lock and tsk->alloc_lock */
1545 	struct list_head cg_list;
1546 #endif
1547 #ifdef CONFIG_FUTEX
1548 	struct robust_list_head __user *robust_list;
1549 #ifdef CONFIG_COMPAT
1550 	struct compat_robust_list_head __user *compat_robust_list;
1551 #endif
1552 	struct list_head pi_state_list;
1553 	struct futex_pi_state *pi_state_cache;
1554 #endif
1555 #ifdef CONFIG_PERF_EVENTS
1556 	struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1557 	struct mutex perf_event_mutex;
1558 	struct list_head perf_event_list;
1559 #endif
1560 #ifdef CONFIG_NUMA
1561 	struct mempolicy *mempolicy;	/* Protected by alloc_lock */
1562 	short il_next;
1563 	short pref_node_fork;
1564 #endif
1565 	struct rcu_head rcu;
1566 
1567 	/*
1568 	 * cache last used pipe for splice
1569 	 */
1570 	struct pipe_inode_info *splice_pipe;
1571 #ifdef	CONFIG_TASK_DELAY_ACCT
1572 	struct task_delay_info *delays;
1573 #endif
1574 #ifdef CONFIG_FAULT_INJECTION
1575 	int make_it_fail;
1576 #endif
1577 	/*
1578 	 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
1579 	 * balance_dirty_pages() for some dirty throttling pause
1580 	 */
1581 	int nr_dirtied;
1582 	int nr_dirtied_pause;
1583 	unsigned long dirty_paused_when; /* start of a write-and-pause period */
1584 
1585 #ifdef CONFIG_LATENCYTOP
1586 	int latency_record_count;
1587 	struct latency_record latency_record[LT_SAVECOUNT];
1588 #endif
1589 	/*
1590 	 * time slack values; these are used to round up poll() and
1591 	 * select() etc timeout values. These are in nanoseconds.
1592 	 */
1593 	unsigned long timer_slack_ns;
1594 	unsigned long default_timer_slack_ns;
1595 
1596 	struct list_head	*scm_work_list;
1597 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1598 	/* Index of current stored address in ret_stack */
1599 	int curr_ret_stack;
1600 	/* Stack of return addresses for return function tracing */
1601 	struct ftrace_ret_stack	*ret_stack;
1602 	/* time stamp for last schedule */
1603 	unsigned long long ftrace_timestamp;
1604 	/*
1605 	 * Number of functions that haven't been traced
1606 	 * because of depth overrun.
1607 	 */
1608 	atomic_t trace_overrun;
1609 	/* Pause for the tracing */
1610 	atomic_t tracing_graph_pause;
1611 #endif
1612 #ifdef CONFIG_TRACING
1613 	/* state flags for use by tracers */
1614 	unsigned long trace;
1615 	/* bitmask and counter of trace recursion */
1616 	unsigned long trace_recursion;
1617 #endif /* CONFIG_TRACING */
1618 #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
1619 	struct memcg_batch_info {
1620 		int do_batch;	/* incremented when batch uncharge started */
1621 		struct mem_cgroup *memcg; /* target memcg of uncharge */
1622 		unsigned long nr_pages;	/* uncharged usage */
1623 		unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
1624 	} memcg_batch;
1625 #endif
1626 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1627 	atomic_t ptrace_bp_refcnt;
1628 #endif
1629 };
1630 
1631 /* Future-safe accessor for struct task_struct's cpus_allowed. */
1632 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1633 
1634 /*
1635  * Priority of a process goes from 0..MAX_PRIO-1, valid RT
1636  * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
1637  * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
1638  * values are inverted: lower p->prio value means higher priority.
1639  *
1640  * The MAX_USER_RT_PRIO value allows the actual maximum
1641  * RT priority to be separate from the value exported to
1642  * user-space.  This allows kernel threads to set their
1643  * priority to a value higher than any user task. Note:
1644  * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
1645  */
1646 
1647 #define MAX_USER_RT_PRIO	100
1648 #define MAX_RT_PRIO		MAX_USER_RT_PRIO
1649 
1650 #define MAX_PRIO		(MAX_RT_PRIO + 40)
1651 #define DEFAULT_PRIO		(MAX_RT_PRIO + 20)
1652 
rt_prio(int prio)1653 static inline int rt_prio(int prio)
1654 {
1655 	if (unlikely(prio < MAX_RT_PRIO))
1656 		return 1;
1657 	return 0;
1658 }
1659 
rt_task(struct task_struct * p)1660 static inline int rt_task(struct task_struct *p)
1661 {
1662 	return rt_prio(p->prio);
1663 }
1664 
task_pid(struct task_struct * task)1665 static inline struct pid *task_pid(struct task_struct *task)
1666 {
1667 	return task->pids[PIDTYPE_PID].pid;
1668 }
1669 
task_tgid(struct task_struct * task)1670 static inline struct pid *task_tgid(struct task_struct *task)
1671 {
1672 	return task->group_leader->pids[PIDTYPE_PID].pid;
1673 }
1674 
1675 /*
1676  * Without tasklist or rcu lock it is not safe to dereference
1677  * the result of task_pgrp/task_session even if task == current,
1678  * we can race with another thread doing sys_setsid/sys_setpgid.
1679  */
task_pgrp(struct task_struct * task)1680 static inline struct pid *task_pgrp(struct task_struct *task)
1681 {
1682 	return task->group_leader->pids[PIDTYPE_PGID].pid;
1683 }
1684 
task_session(struct task_struct * task)1685 static inline struct pid *task_session(struct task_struct *task)
1686 {
1687 	return task->group_leader->pids[PIDTYPE_SID].pid;
1688 }
1689 
1690 struct pid_namespace;
1691 
1692 /*
1693  * the helpers to get the task's different pids as they are seen
1694  * from various namespaces
1695  *
1696  * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
1697  * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
1698  *                     current.
1699  * task_xid_nr_ns()  : id seen from the ns specified;
1700  *
1701  * set_task_vxid()   : assigns a virtual id to a task;
1702  *
1703  * see also pid_nr() etc in include/linux/pid.h
1704  */
1705 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1706 			struct pid_namespace *ns);
1707 
task_pid_nr(struct task_struct * tsk)1708 static inline pid_t task_pid_nr(struct task_struct *tsk)
1709 {
1710 	return tsk->pid;
1711 }
1712 
task_pid_nr_ns(struct task_struct * tsk,struct pid_namespace * ns)1713 static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1714 					struct pid_namespace *ns)
1715 {
1716 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1717 }
1718 
task_pid_vnr(struct task_struct * tsk)1719 static inline pid_t task_pid_vnr(struct task_struct *tsk)
1720 {
1721 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1722 }
1723 
1724 
task_tgid_nr(struct task_struct * tsk)1725 static inline pid_t task_tgid_nr(struct task_struct *tsk)
1726 {
1727 	return tsk->tgid;
1728 }
1729 
1730 pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1731 
task_tgid_vnr(struct task_struct * tsk)1732 static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1733 {
1734 	return pid_vnr(task_tgid(tsk));
1735 }
1736 
1737 
task_pgrp_nr_ns(struct task_struct * tsk,struct pid_namespace * ns)1738 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1739 					struct pid_namespace *ns)
1740 {
1741 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1742 }
1743 
task_pgrp_vnr(struct task_struct * tsk)1744 static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1745 {
1746 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1747 }
1748 
1749 
task_session_nr_ns(struct task_struct * tsk,struct pid_namespace * ns)1750 static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1751 					struct pid_namespace *ns)
1752 {
1753 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1754 }
1755 
task_session_vnr(struct task_struct * tsk)1756 static inline pid_t task_session_vnr(struct task_struct *tsk)
1757 {
1758 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1759 }
1760 
1761 /* obsolete, do not use */
task_pgrp_nr(struct task_struct * tsk)1762 static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1763 {
1764 	return task_pgrp_nr_ns(tsk, &init_pid_ns);
1765 }
1766 
1767 /**
1768  * pid_alive - check that a task structure is not stale
1769  * @p: Task structure to be checked.
1770  *
1771  * Test if a process is not yet dead (at most zombie state)
1772  * If pid_alive fails, then pointers within the task structure
1773  * can be stale and must not be dereferenced.
1774  */
pid_alive(struct task_struct * p)1775 static inline int pid_alive(struct task_struct *p)
1776 {
1777 	return p->pids[PIDTYPE_PID].pid != NULL;
1778 }
1779 
1780 /**
1781  * is_global_init - check if a task structure is init
1782  * @tsk: Task structure to be checked.
1783  *
1784  * Check if a task structure is the first user space task the kernel created.
1785  */
is_global_init(struct task_struct * tsk)1786 static inline int is_global_init(struct task_struct *tsk)
1787 {
1788 	return tsk->pid == 1;
1789 }
1790 
1791 /*
1792  * is_container_init:
1793  * check whether in the task is init in its own pid namespace.
1794  */
1795 extern int is_container_init(struct task_struct *tsk);
1796 
1797 extern struct pid *cad_pid;
1798 
1799 extern void free_task(struct task_struct *tsk);
1800 #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1801 
1802 extern void __put_task_struct(struct task_struct *t);
1803 
put_task_struct(struct task_struct * t)1804 static inline void put_task_struct(struct task_struct *t)
1805 {
1806 	if (atomic_dec_and_test(&t->usage))
1807 		__put_task_struct(t);
1808 }
1809 
1810 extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
1811 extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
1812 
1813 /*
1814  * Per process flags
1815  */
1816 #define PF_EXITING	0x00000004	/* getting shut down */
1817 #define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
1818 #define PF_VCPU		0x00000010	/* I'm a virtual CPU */
1819 #define PF_WQ_WORKER	0x00000020	/* I'm a workqueue worker */
1820 #define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
1821 #define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
1822 #define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
1823 #define PF_DUMPCORE	0x00000200	/* dumped core */
1824 #define PF_SIGNALED	0x00000400	/* killed by a signal */
1825 #define PF_MEMALLOC	0x00000800	/* Allocating memory */
1826 #define PF_NPROC_EXCEEDED 0x00001000	/* set_user noticed that RLIMIT_NPROC was exceeded */
1827 #define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
1828 #define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
1829 #define PF_FROZEN	0x00010000	/* frozen for system suspend */
1830 #define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
1831 #define PF_KSWAPD	0x00040000	/* I am kswapd */
1832 #define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
1833 #define PF_KTHREAD	0x00200000	/* I am a kernel thread */
1834 #define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
1835 #define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
1836 #define PF_SPREAD_PAGE	0x01000000	/* Spread page cache over cpuset */
1837 #define PF_SPREAD_SLAB	0x02000000	/* Spread some slab caches over cpuset */
1838 #define PF_THREAD_BOUND	0x04000000	/* Thread bound to specific cpu */
1839 #define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
1840 #define PF_MEMPOLICY	0x10000000	/* Non-default NUMA mempolicy */
1841 #define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
1842 #define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezable */
1843 
1844 /*
1845  * Only the _current_ task can read/write to tsk->flags, but other
1846  * tasks can access tsk->flags in readonly mode for example
1847  * with tsk_used_math (like during threaded core dumping).
1848  * There is however an exception to this rule during ptrace
1849  * or during fork: the ptracer task is allowed to write to the
1850  * child->flags of its traced child (same goes for fork, the parent
1851  * can write to the child->flags), because we're guaranteed the
1852  * child is not running and in turn not changing child->flags
1853  * at the same time the parent does it.
1854  */
1855 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1856 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1857 #define clear_used_math() clear_stopped_child_used_math(current)
1858 #define set_used_math() set_stopped_child_used_math(current)
1859 #define conditional_stopped_child_used_math(condition, child) \
1860 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1861 #define conditional_used_math(condition) \
1862 	conditional_stopped_child_used_math(condition, current)
1863 #define copy_to_stopped_child_used_math(child) \
1864 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1865 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1866 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1867 #define used_math() tsk_used_math(current)
1868 
1869 /*
1870  * task->jobctl flags
1871  */
1872 #define JOBCTL_STOP_SIGMASK	0xffff	/* signr of the last group stop */
1873 
1874 #define JOBCTL_STOP_DEQUEUED_BIT 16	/* stop signal dequeued */
1875 #define JOBCTL_STOP_PENDING_BIT	17	/* task should stop for group stop */
1876 #define JOBCTL_STOP_CONSUME_BIT	18	/* consume group stop count */
1877 #define JOBCTL_TRAP_STOP_BIT	19	/* trap for STOP */
1878 #define JOBCTL_TRAP_NOTIFY_BIT	20	/* trap for NOTIFY */
1879 #define JOBCTL_TRAPPING_BIT	21	/* switching to TRACED */
1880 #define JOBCTL_LISTENING_BIT	22	/* ptracer is listening for events */
1881 
1882 #define JOBCTL_STOP_DEQUEUED	(1 << JOBCTL_STOP_DEQUEUED_BIT)
1883 #define JOBCTL_STOP_PENDING	(1 << JOBCTL_STOP_PENDING_BIT)
1884 #define JOBCTL_STOP_CONSUME	(1 << JOBCTL_STOP_CONSUME_BIT)
1885 #define JOBCTL_TRAP_STOP	(1 << JOBCTL_TRAP_STOP_BIT)
1886 #define JOBCTL_TRAP_NOTIFY	(1 << JOBCTL_TRAP_NOTIFY_BIT)
1887 #define JOBCTL_TRAPPING		(1 << JOBCTL_TRAPPING_BIT)
1888 #define JOBCTL_LISTENING	(1 << JOBCTL_LISTENING_BIT)
1889 
1890 #define JOBCTL_TRAP_MASK	(JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
1891 #define JOBCTL_PENDING_MASK	(JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
1892 
1893 extern bool task_set_jobctl_pending(struct task_struct *task,
1894 				    unsigned int mask);
1895 extern void task_clear_jobctl_trapping(struct task_struct *task);
1896 extern void task_clear_jobctl_pending(struct task_struct *task,
1897 				      unsigned int mask);
1898 
1899 #ifdef CONFIG_PREEMPT_RCU
1900 
1901 #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1902 #define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
1903 
rcu_copy_process(struct task_struct * p)1904 static inline void rcu_copy_process(struct task_struct *p)
1905 {
1906 	p->rcu_read_lock_nesting = 0;
1907 	p->rcu_read_unlock_special = 0;
1908 #ifdef CONFIG_TREE_PREEMPT_RCU
1909 	p->rcu_blocked_node = NULL;
1910 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1911 #ifdef CONFIG_RCU_BOOST
1912 	p->rcu_boost_mutex = NULL;
1913 #endif /* #ifdef CONFIG_RCU_BOOST */
1914 	INIT_LIST_HEAD(&p->rcu_node_entry);
1915 }
1916 
1917 #else
1918 
rcu_copy_process(struct task_struct * p)1919 static inline void rcu_copy_process(struct task_struct *p)
1920 {
1921 }
1922 
1923 #endif
1924 
1925 #ifdef CONFIG_SMP
1926 extern void do_set_cpus_allowed(struct task_struct *p,
1927 			       const struct cpumask *new_mask);
1928 
1929 extern int set_cpus_allowed_ptr(struct task_struct *p,
1930 				const struct cpumask *new_mask);
1931 #else
do_set_cpus_allowed(struct task_struct * p,const struct cpumask * new_mask)1932 static inline void do_set_cpus_allowed(struct task_struct *p,
1933 				      const struct cpumask *new_mask)
1934 {
1935 }
set_cpus_allowed_ptr(struct task_struct * p,const struct cpumask * new_mask)1936 static inline int set_cpus_allowed_ptr(struct task_struct *p,
1937 				       const struct cpumask *new_mask)
1938 {
1939 	if (!cpumask_test_cpu(0, new_mask))
1940 		return -EINVAL;
1941 	return 0;
1942 }
1943 #endif
1944 
1945 #ifdef CONFIG_NO_HZ
1946 void calc_load_enter_idle(void);
1947 void calc_load_exit_idle(void);
1948 #else
calc_load_enter_idle(void)1949 static inline void calc_load_enter_idle(void) { }
calc_load_exit_idle(void)1950 static inline void calc_load_exit_idle(void) { }
1951 #endif /* CONFIG_NO_HZ */
1952 
1953 #ifndef CONFIG_CPUMASK_OFFSTACK
set_cpus_allowed(struct task_struct * p,cpumask_t new_mask)1954 static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1955 {
1956 	return set_cpus_allowed_ptr(p, &new_mask);
1957 }
1958 #endif
1959 
1960 /*
1961  * Do not use outside of architecture code which knows its limitations.
1962  *
1963  * sched_clock() has no promise of monotonicity or bounded drift between
1964  * CPUs, use (which you should not) requires disabling IRQs.
1965  *
1966  * Please use one of the three interfaces below.
1967  */
1968 extern unsigned long long notrace sched_clock(void);
1969 /*
1970  * See the comment in kernel/sched_clock.c
1971  */
1972 extern u64 cpu_clock(int cpu);
1973 extern u64 local_clock(void);
1974 extern u64 sched_clock_cpu(int cpu);
1975 
1976 
1977 extern void sched_clock_init(void);
1978 
1979 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
sched_clock_tick(void)1980 static inline void sched_clock_tick(void)
1981 {
1982 }
1983 
sched_clock_idle_sleep_event(void)1984 static inline void sched_clock_idle_sleep_event(void)
1985 {
1986 }
1987 
sched_clock_idle_wakeup_event(u64 delta_ns)1988 static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
1989 {
1990 }
1991 #else
1992 /*
1993  * Architectures can set this to 1 if they have specified
1994  * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
1995  * but then during bootup it turns out that sched_clock()
1996  * is reliable after all:
1997  */
1998 extern int sched_clock_stable;
1999 
2000 extern void sched_clock_tick(void);
2001 extern void sched_clock_idle_sleep_event(void);
2002 extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2003 #endif
2004 
2005 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
2006 /*
2007  * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
2008  * The reason for this explicit opt-in is not to have perf penalty with
2009  * slow sched_clocks.
2010  */
2011 extern void enable_sched_clock_irqtime(void);
2012 extern void disable_sched_clock_irqtime(void);
2013 #else
enable_sched_clock_irqtime(void)2014 static inline void enable_sched_clock_irqtime(void) {}
disable_sched_clock_irqtime(void)2015 static inline void disable_sched_clock_irqtime(void) {}
2016 #endif
2017 
2018 extern unsigned long long
2019 task_sched_runtime(struct task_struct *task);
2020 
2021 /* sched_exec is called by processes performing an exec */
2022 #ifdef CONFIG_SMP
2023 extern void sched_exec(void);
2024 #else
2025 #define sched_exec()   {}
2026 #endif
2027 
2028 extern void sched_clock_idle_sleep_event(void);
2029 extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2030 
2031 #ifdef CONFIG_HOTPLUG_CPU
2032 extern void idle_task_exit(void);
2033 #else
idle_task_exit(void)2034 static inline void idle_task_exit(void) {}
2035 #endif
2036 
2037 #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
2038 extern void wake_up_idle_cpu(int cpu);
2039 #else
wake_up_idle_cpu(int cpu)2040 static inline void wake_up_idle_cpu(int cpu) { }
2041 #endif
2042 
2043 extern unsigned int sysctl_sched_latency;
2044 extern unsigned int sysctl_sched_min_granularity;
2045 extern unsigned int sysctl_sched_wakeup_granularity;
2046 extern unsigned int sysctl_sched_child_runs_first;
2047 
2048 enum sched_tunable_scaling {
2049 	SCHED_TUNABLESCALING_NONE,
2050 	SCHED_TUNABLESCALING_LOG,
2051 	SCHED_TUNABLESCALING_LINEAR,
2052 	SCHED_TUNABLESCALING_END,
2053 };
2054 extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
2055 
2056 #ifdef CONFIG_SCHED_DEBUG
2057 extern unsigned int sysctl_sched_migration_cost;
2058 extern unsigned int sysctl_sched_nr_migrate;
2059 extern unsigned int sysctl_sched_time_avg;
2060 extern unsigned int sysctl_timer_migration;
2061 extern unsigned int sysctl_sched_shares_window;
2062 
2063 int sched_proc_update_handler(struct ctl_table *table, int write,
2064 		void __user *buffer, size_t *length,
2065 		loff_t *ppos);
2066 #endif
2067 #ifdef CONFIG_SCHED_DEBUG
get_sysctl_timer_migration(void)2068 static inline unsigned int get_sysctl_timer_migration(void)
2069 {
2070 	return sysctl_timer_migration;
2071 }
2072 #else
get_sysctl_timer_migration(void)2073 static inline unsigned int get_sysctl_timer_migration(void)
2074 {
2075 	return 1;
2076 }
2077 #endif
2078 extern unsigned int sysctl_sched_rt_period;
2079 extern int sysctl_sched_rt_runtime;
2080 
2081 int sched_rt_handler(struct ctl_table *table, int write,
2082 		void __user *buffer, size_t *lenp,
2083 		loff_t *ppos);
2084 
2085 #ifdef CONFIG_SCHED_AUTOGROUP
2086 extern unsigned int sysctl_sched_autogroup_enabled;
2087 
2088 extern void sched_autogroup_create_attach(struct task_struct *p);
2089 extern void sched_autogroup_detach(struct task_struct *p);
2090 extern void sched_autogroup_fork(struct signal_struct *sig);
2091 extern void sched_autogroup_exit(struct signal_struct *sig);
2092 #ifdef CONFIG_PROC_FS
2093 extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2094 extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
2095 #endif
2096 #else
sched_autogroup_create_attach(struct task_struct * p)2097 static inline void sched_autogroup_create_attach(struct task_struct *p) { }
sched_autogroup_detach(struct task_struct * p)2098 static inline void sched_autogroup_detach(struct task_struct *p) { }
sched_autogroup_fork(struct signal_struct * sig)2099 static inline void sched_autogroup_fork(struct signal_struct *sig) { }
sched_autogroup_exit(struct signal_struct * sig)2100 static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2101 #endif
2102 
2103 #ifdef CONFIG_CFS_BANDWIDTH
2104 extern unsigned int sysctl_sched_cfs_bandwidth_slice;
2105 #endif
2106 
2107 #ifdef CONFIG_RT_MUTEXES
2108 extern int rt_mutex_getprio(struct task_struct *p);
2109 extern void rt_mutex_setprio(struct task_struct *p, int prio);
2110 extern void rt_mutex_adjust_pi(struct task_struct *p);
tsk_is_pi_blocked(struct task_struct * tsk)2111 static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
2112 {
2113 	return tsk->pi_blocked_on != NULL;
2114 }
2115 #else
rt_mutex_getprio(struct task_struct * p)2116 static inline int rt_mutex_getprio(struct task_struct *p)
2117 {
2118 	return p->normal_prio;
2119 }
2120 # define rt_mutex_adjust_pi(p)		do { } while (0)
tsk_is_pi_blocked(struct task_struct * tsk)2121 static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
2122 {
2123 	return false;
2124 }
2125 #endif
2126 
2127 extern bool yield_to(struct task_struct *p, bool preempt);
2128 extern void set_user_nice(struct task_struct *p, long nice);
2129 extern int task_prio(const struct task_struct *p);
2130 extern int task_nice(const struct task_struct *p);
2131 extern int can_nice(const struct task_struct *p, const int nice);
2132 extern int task_curr(const struct task_struct *p);
2133 extern int idle_cpu(int cpu);
2134 extern int sched_setscheduler(struct task_struct *, int,
2135 			      const struct sched_param *);
2136 extern int sched_setscheduler_nocheck(struct task_struct *, int,
2137 				      const struct sched_param *);
2138 extern struct task_struct *idle_task(int cpu);
2139 /**
2140  * is_idle_task - is the specified task an idle task?
2141  * @p: the task in question.
2142  */
is_idle_task(const struct task_struct * p)2143 static inline bool is_idle_task(const struct task_struct *p)
2144 {
2145 	return p->pid == 0;
2146 }
2147 extern struct task_struct *curr_task(int cpu);
2148 extern void set_curr_task(int cpu, struct task_struct *p);
2149 
2150 void yield(void);
2151 
2152 /*
2153  * The default (Linux) execution domain.
2154  */
2155 extern struct exec_domain	default_exec_domain;
2156 
2157 union thread_union {
2158 	struct thread_info thread_info;
2159 	unsigned long stack[THREAD_SIZE/sizeof(long)];
2160 };
2161 
2162 #ifndef __HAVE_ARCH_KSTACK_END
kstack_end(void * addr)2163 static inline int kstack_end(void *addr)
2164 {
2165 	/* Reliable end of stack detection:
2166 	 * Some APM bios versions misalign the stack
2167 	 */
2168 	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
2169 }
2170 #endif
2171 
2172 extern union thread_union init_thread_union;
2173 extern struct task_struct init_task;
2174 
2175 extern struct   mm_struct init_mm;
2176 
2177 extern struct pid_namespace init_pid_ns;
2178 
2179 /*
2180  * find a task by one of its numerical ids
2181  *
2182  * find_task_by_pid_ns():
2183  *      finds a task by its pid in the specified namespace
2184  * find_task_by_vpid():
2185  *      finds a task by its virtual pid
2186  *
2187  * see also find_vpid() etc in include/linux/pid.h
2188  */
2189 
2190 extern struct task_struct *find_task_by_vpid(pid_t nr);
2191 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2192 		struct pid_namespace *ns);
2193 
2194 extern void __set_special_pids(struct pid *pid);
2195 
2196 /* per-UID process charging. */
2197 extern struct user_struct * alloc_uid(struct user_namespace *, uid_t);
get_uid(struct user_struct * u)2198 static inline struct user_struct *get_uid(struct user_struct *u)
2199 {
2200 	atomic_inc(&u->__count);
2201 	return u;
2202 }
2203 extern void free_uid(struct user_struct *);
2204 extern void release_uids(struct user_namespace *ns);
2205 
2206 #include <asm/current.h>
2207 
2208 extern void xtime_update(unsigned long ticks);
2209 
2210 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2211 extern int wake_up_process(struct task_struct *tsk);
2212 extern void wake_up_new_task(struct task_struct *tsk);
2213 #ifdef CONFIG_SMP
2214  extern void kick_process(struct task_struct *tsk);
2215 #else
kick_process(struct task_struct * tsk)2216  static inline void kick_process(struct task_struct *tsk) { }
2217 #endif
2218 extern void sched_fork(struct task_struct *p);
2219 extern void sched_dead(struct task_struct *p);
2220 
2221 extern void proc_caches_init(void);
2222 extern void flush_signals(struct task_struct *);
2223 extern void __flush_signals(struct task_struct *);
2224 extern void ignore_signals(struct task_struct *);
2225 extern void flush_signal_handlers(struct task_struct *, int force_default);
2226 extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2227 
dequeue_signal_lock(struct task_struct * tsk,sigset_t * mask,siginfo_t * info)2228 static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
2229 {
2230 	unsigned long flags;
2231 	int ret;
2232 
2233 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
2234 	ret = dequeue_signal(tsk, mask, info);
2235 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
2236 
2237 	return ret;
2238 }
2239 
2240 extern void block_all_signals(int (*notifier)(void *priv), void *priv,
2241 			      sigset_t *mask);
2242 extern void unblock_all_signals(void);
2243 extern void release_task(struct task_struct * p);
2244 extern int send_sig_info(int, struct siginfo *, struct task_struct *);
2245 extern int force_sigsegv(int, struct task_struct *);
2246 extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2247 extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2248 extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2249 extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2250 				const struct cred *, u32);
2251 extern int kill_pgrp(struct pid *pid, int sig, int priv);
2252 extern int kill_pid(struct pid *pid, int sig, int priv);
2253 extern int kill_proc_info(int, struct siginfo *, pid_t);
2254 extern __must_check bool do_notify_parent(struct task_struct *, int);
2255 extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2256 extern void force_sig(int, struct task_struct *);
2257 extern int send_sig(int, struct task_struct *, int);
2258 extern int zap_other_threads(struct task_struct *p);
2259 extern struct sigqueue *sigqueue_alloc(void);
2260 extern void sigqueue_free(struct sigqueue *);
2261 extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
2262 extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2263 extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);
2264 
kill_cad_pid(int sig,int priv)2265 static inline int kill_cad_pid(int sig, int priv)
2266 {
2267 	return kill_pid(cad_pid, sig, priv);
2268 }
2269 
2270 /* These can be the second arg to send_sig_info/send_group_sig_info.  */
2271 #define SEND_SIG_NOINFO ((struct siginfo *) 0)
2272 #define SEND_SIG_PRIV	((struct siginfo *) 1)
2273 #define SEND_SIG_FORCED	((struct siginfo *) 2)
2274 
2275 /*
2276  * True if we are on the alternate signal stack.
2277  */
on_sig_stack(unsigned long sp)2278 static inline int on_sig_stack(unsigned long sp)
2279 {
2280 #ifdef CONFIG_STACK_GROWSUP
2281 	return sp >= current->sas_ss_sp &&
2282 		sp - current->sas_ss_sp < current->sas_ss_size;
2283 #else
2284 	return sp > current->sas_ss_sp &&
2285 		sp - current->sas_ss_sp <= current->sas_ss_size;
2286 #endif
2287 }
2288 
sas_ss_flags(unsigned long sp)2289 static inline int sas_ss_flags(unsigned long sp)
2290 {
2291 	return (current->sas_ss_size == 0 ? SS_DISABLE
2292 		: on_sig_stack(sp) ? SS_ONSTACK : 0);
2293 }
2294 
2295 /*
2296  * Routines for handling mm_structs
2297  */
2298 extern struct mm_struct * mm_alloc(void);
2299 
2300 /* mmdrop drops the mm and the page tables */
2301 extern void __mmdrop(struct mm_struct *);
mmdrop(struct mm_struct * mm)2302 static inline void mmdrop(struct mm_struct * mm)
2303 {
2304 	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2305 		__mmdrop(mm);
2306 }
2307 
2308 /* mmput gets rid of the mappings and all user-space */
2309 extern void mmput(struct mm_struct *);
2310 /* Grab a reference to a task's mm, if it is not already going away */
2311 extern struct mm_struct *get_task_mm(struct task_struct *task);
2312 /*
2313  * Grab a reference to a task's mm, if it is not already going away
2314  * and ptrace_may_access with the mode parameter passed to it
2315  * succeeds.
2316  */
2317 extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
2318 /* Remove the current tasks stale references to the old mm_struct */
2319 extern void mm_release(struct task_struct *, struct mm_struct *);
2320 /* Allocate a new mm structure and copy contents from tsk->mm */
2321 extern struct mm_struct *dup_mm(struct task_struct *tsk);
2322 
2323 extern int copy_thread(unsigned long, unsigned long, unsigned long,
2324 			struct task_struct *, struct pt_regs *);
2325 extern void flush_thread(void);
2326 extern void exit_thread(void);
2327 
2328 extern void exit_files(struct task_struct *);
2329 extern void __cleanup_sighand(struct sighand_struct *);
2330 
2331 extern void exit_itimers(struct signal_struct *);
2332 extern void flush_itimer_signals(void);
2333 
2334 extern void do_group_exit(int);
2335 
2336 extern void daemonize(const char *, ...);
2337 extern int allow_signal(int);
2338 extern int disallow_signal(int);
2339 
2340 extern int do_execve(const char *,
2341 		     const char __user * const __user *,
2342 		     const char __user * const __user *, struct pt_regs *);
2343 extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
2344 struct task_struct *fork_idle(int);
2345 
2346 extern void set_task_comm(struct task_struct *tsk, char *from);
2347 extern char *get_task_comm(char *to, struct task_struct *tsk);
2348 
2349 #ifdef CONFIG_SMP
2350 void scheduler_ipi(void);
2351 extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2352 #else
scheduler_ipi(void)2353 static inline void scheduler_ipi(void) { }
wait_task_inactive(struct task_struct * p,long match_state)2354 static inline unsigned long wait_task_inactive(struct task_struct *p,
2355 					       long match_state)
2356 {
2357 	return 1;
2358 }
2359 #endif
2360 
2361 #define next_task(p) \
2362 	list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2363 
2364 #define for_each_process(p) \
2365 	for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2366 
2367 extern bool current_is_single_threaded(void);
2368 
2369 /*
2370  * Careful: do_each_thread/while_each_thread is a double loop so
2371  *          'break' will not work as expected - use goto instead.
2372  */
2373 #define do_each_thread(g, t) \
2374 	for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
2375 
2376 #define while_each_thread(g, t) \
2377 	while ((t = next_thread(t)) != g)
2378 
get_nr_threads(struct task_struct * tsk)2379 static inline int get_nr_threads(struct task_struct *tsk)
2380 {
2381 	return tsk->signal->nr_threads;
2382 }
2383 
thread_group_leader(struct task_struct * p)2384 static inline bool thread_group_leader(struct task_struct *p)
2385 {
2386 	return p->exit_signal >= 0;
2387 }
2388 
2389 /* Do to the insanities of de_thread it is possible for a process
2390  * to have the pid of the thread group leader without actually being
2391  * the thread group leader.  For iteration through the pids in proc
2392  * all we care about is that we have a task with the appropriate
2393  * pid, we don't actually care if we have the right task.
2394  */
has_group_leader_pid(struct task_struct * p)2395 static inline int has_group_leader_pid(struct task_struct *p)
2396 {
2397 	return p->pid == p->tgid;
2398 }
2399 
2400 static inline
same_thread_group(struct task_struct * p1,struct task_struct * p2)2401 int same_thread_group(struct task_struct *p1, struct task_struct *p2)
2402 {
2403 	return p1->tgid == p2->tgid;
2404 }
2405 
next_thread(const struct task_struct * p)2406 static inline struct task_struct *next_thread(const struct task_struct *p)
2407 {
2408 	return list_entry_rcu(p->thread_group.next,
2409 			      struct task_struct, thread_group);
2410 }
2411 
thread_group_empty(struct task_struct * p)2412 static inline int thread_group_empty(struct task_struct *p)
2413 {
2414 	return list_empty(&p->thread_group);
2415 }
2416 
2417 #define delay_group_leader(p) \
2418 		(thread_group_leader(p) && !thread_group_empty(p))
2419 
2420 /*
2421  * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2422  * subscriptions and synchronises with wait4().  Also used in procfs.  Also
2423  * pins the final release of task.io_context.  Also protects ->cpuset and
2424  * ->cgroup.subsys[]. And ->vfork_done.
2425  *
2426  * Nests both inside and outside of read_lock(&tasklist_lock).
2427  * It must not be nested with write_lock_irq(&tasklist_lock),
2428  * neither inside nor outside.
2429  */
task_lock(struct task_struct * p)2430 static inline void task_lock(struct task_struct *p)
2431 {
2432 	spin_lock(&p->alloc_lock);
2433 }
2434 
task_unlock(struct task_struct * p)2435 static inline void task_unlock(struct task_struct *p)
2436 {
2437 	spin_unlock(&p->alloc_lock);
2438 }
2439 
2440 extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2441 							unsigned long *flags);
2442 
lock_task_sighand(struct task_struct * tsk,unsigned long * flags)2443 static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
2444 						       unsigned long *flags)
2445 {
2446 	struct sighand_struct *ret;
2447 
2448 	ret = __lock_task_sighand(tsk, flags);
2449 	(void)__cond_lock(&tsk->sighand->siglock, ret);
2450 	return ret;
2451 }
2452 
unlock_task_sighand(struct task_struct * tsk,unsigned long * flags)2453 static inline void unlock_task_sighand(struct task_struct *tsk,
2454 						unsigned long *flags)
2455 {
2456 	spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2457 }
2458 
2459 #ifdef CONFIG_CGROUPS
threadgroup_change_begin(struct task_struct * tsk)2460 static inline void threadgroup_change_begin(struct task_struct *tsk)
2461 {
2462 	down_read(&tsk->signal->group_rwsem);
2463 }
threadgroup_change_end(struct task_struct * tsk)2464 static inline void threadgroup_change_end(struct task_struct *tsk)
2465 {
2466 	up_read(&tsk->signal->group_rwsem);
2467 }
2468 
2469 /**
2470  * threadgroup_lock - lock threadgroup
2471  * @tsk: member task of the threadgroup to lock
2472  *
2473  * Lock the threadgroup @tsk belongs to.  No new task is allowed to enter
2474  * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
2475  * change ->group_leader/pid.  This is useful for cases where the threadgroup
2476  * needs to stay stable across blockable operations.
2477  *
2478  * fork and exit paths explicitly call threadgroup_change_{begin|end}() for
2479  * synchronization.  While held, no new task will be added to threadgroup
2480  * and no existing live task will have its PF_EXITING set.
2481  *
2482  * de_thread() does threadgroup_change_{begin|end}() when a non-leader
2483  * sub-thread becomes a new leader.
2484  */
threadgroup_lock(struct task_struct * tsk)2485 static inline void threadgroup_lock(struct task_struct *tsk)
2486 {
2487 	down_write(&tsk->signal->group_rwsem);
2488 }
2489 
2490 /**
2491  * threadgroup_unlock - unlock threadgroup
2492  * @tsk: member task of the threadgroup to unlock
2493  *
2494  * Reverse threadgroup_lock().
2495  */
threadgroup_unlock(struct task_struct * tsk)2496 static inline void threadgroup_unlock(struct task_struct *tsk)
2497 {
2498 	up_write(&tsk->signal->group_rwsem);
2499 }
2500 #else
threadgroup_change_begin(struct task_struct * tsk)2501 static inline void threadgroup_change_begin(struct task_struct *tsk) {}
threadgroup_change_end(struct task_struct * tsk)2502 static inline void threadgroup_change_end(struct task_struct *tsk) {}
threadgroup_lock(struct task_struct * tsk)2503 static inline void threadgroup_lock(struct task_struct *tsk) {}
threadgroup_unlock(struct task_struct * tsk)2504 static inline void threadgroup_unlock(struct task_struct *tsk) {}
2505 #endif
2506 
2507 #ifndef __HAVE_THREAD_FUNCTIONS
2508 
2509 #define task_thread_info(task)	((struct thread_info *)(task)->stack)
2510 #define task_stack_page(task)	((task)->stack)
2511 
setup_thread_stack(struct task_struct * p,struct task_struct * org)2512 static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
2513 {
2514 	*task_thread_info(p) = *task_thread_info(org);
2515 	task_thread_info(p)->task = p;
2516 }
2517 
end_of_stack(struct task_struct * p)2518 static inline unsigned long *end_of_stack(struct task_struct *p)
2519 {
2520 	return (unsigned long *)(task_thread_info(p) + 1);
2521 }
2522 
2523 #endif
2524 
object_is_on_stack(void * obj)2525 static inline int object_is_on_stack(void *obj)
2526 {
2527 	void *stack = task_stack_page(current);
2528 
2529 	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
2530 }
2531 
2532 extern void thread_info_cache_init(void);
2533 
2534 #ifdef CONFIG_DEBUG_STACK_USAGE
stack_not_used(struct task_struct * p)2535 static inline unsigned long stack_not_used(struct task_struct *p)
2536 {
2537 	unsigned long *n = end_of_stack(p);
2538 
2539 	do { 	/* Skip over canary */
2540 		n++;
2541 	} while (!*n);
2542 
2543 	return (unsigned long)n - (unsigned long)end_of_stack(p);
2544 }
2545 #endif
2546 
2547 /* set thread flags in other task's structures
2548  * - see asm/thread_info.h for TIF_xxxx flags available
2549  */
set_tsk_thread_flag(struct task_struct * tsk,int flag)2550 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
2551 {
2552 	set_ti_thread_flag(task_thread_info(tsk), flag);
2553 }
2554 
clear_tsk_thread_flag(struct task_struct * tsk,int flag)2555 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2556 {
2557 	clear_ti_thread_flag(task_thread_info(tsk), flag);
2558 }
2559 
test_and_set_tsk_thread_flag(struct task_struct * tsk,int flag)2560 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2561 {
2562 	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2563 }
2564 
test_and_clear_tsk_thread_flag(struct task_struct * tsk,int flag)2565 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2566 {
2567 	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2568 }
2569 
test_tsk_thread_flag(struct task_struct * tsk,int flag)2570 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2571 {
2572 	return test_ti_thread_flag(task_thread_info(tsk), flag);
2573 }
2574 
set_tsk_need_resched(struct task_struct * tsk)2575 static inline void set_tsk_need_resched(struct task_struct *tsk)
2576 {
2577 	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2578 }
2579 
clear_tsk_need_resched(struct task_struct * tsk)2580 static inline void clear_tsk_need_resched(struct task_struct *tsk)
2581 {
2582 	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2583 }
2584 
test_tsk_need_resched(struct task_struct * tsk)2585 static inline int test_tsk_need_resched(struct task_struct *tsk)
2586 {
2587 	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2588 }
2589 
restart_syscall(void)2590 static inline int restart_syscall(void)
2591 {
2592 	set_tsk_thread_flag(current, TIF_SIGPENDING);
2593 	return -ERESTARTNOINTR;
2594 }
2595 
signal_pending(struct task_struct * p)2596 static inline int signal_pending(struct task_struct *p)
2597 {
2598 	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
2599 }
2600 
__fatal_signal_pending(struct task_struct * p)2601 static inline int __fatal_signal_pending(struct task_struct *p)
2602 {
2603 	return unlikely(sigismember(&p->pending.signal, SIGKILL));
2604 }
2605 
fatal_signal_pending(struct task_struct * p)2606 static inline int fatal_signal_pending(struct task_struct *p)
2607 {
2608 	return signal_pending(p) && __fatal_signal_pending(p);
2609 }
2610 
signal_pending_state(long state,struct task_struct * p)2611 static inline int signal_pending_state(long state, struct task_struct *p)
2612 {
2613 	if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
2614 		return 0;
2615 	if (!signal_pending(p))
2616 		return 0;
2617 
2618 	return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2619 }
2620 
need_resched(void)2621 static inline int need_resched(void)
2622 {
2623 	return unlikely(test_thread_flag(TIF_NEED_RESCHED));
2624 }
2625 
2626 /*
2627  * cond_resched() and cond_resched_lock(): latency reduction via
2628  * explicit rescheduling in places that are safe. The return
2629  * value indicates whether a reschedule was done in fact.
2630  * cond_resched_lock() will drop the spinlock before scheduling,
2631  * cond_resched_softirq() will enable bhs before scheduling.
2632  */
2633 extern int _cond_resched(void);
2634 
2635 #define cond_resched() ({			\
2636 	__might_sleep(__FILE__, __LINE__, 0);	\
2637 	_cond_resched();			\
2638 })
2639 
2640 extern int __cond_resched_lock(spinlock_t *lock);
2641 
2642 #ifdef CONFIG_PREEMPT_COUNT
2643 #define PREEMPT_LOCK_OFFSET	PREEMPT_OFFSET
2644 #else
2645 #define PREEMPT_LOCK_OFFSET	0
2646 #endif
2647 
2648 #define cond_resched_lock(lock) ({				\
2649 	__might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);	\
2650 	__cond_resched_lock(lock);				\
2651 })
2652 
2653 extern int __cond_resched_softirq(void);
2654 
2655 #define cond_resched_softirq() ({					\
2656 	__might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);	\
2657 	__cond_resched_softirq();					\
2658 })
2659 
2660 /*
2661  * Does a critical section need to be broken due to another
2662  * task waiting?: (technically does not depend on CONFIG_PREEMPT,
2663  * but a general need for low latency)
2664  */
spin_needbreak(spinlock_t * lock)2665 static inline int spin_needbreak(spinlock_t *lock)
2666 {
2667 #ifdef CONFIG_PREEMPT
2668 	return spin_is_contended(lock);
2669 #else
2670 	return 0;
2671 #endif
2672 }
2673 
2674 /*
2675  * Thread group CPU time accounting.
2676  */
2677 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
2678 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2679 
thread_group_cputime_init(struct signal_struct * sig)2680 static inline void thread_group_cputime_init(struct signal_struct *sig)
2681 {
2682 	raw_spin_lock_init(&sig->cputimer.lock);
2683 }
2684 
2685 /*
2686  * Reevaluate whether the task has signals pending delivery.
2687  * Wake the task if so.
2688  * This is required every time the blocked sigset_t changes.
2689  * callers must hold sighand->siglock.
2690  */
2691 extern void recalc_sigpending_and_wake(struct task_struct *t);
2692 extern void recalc_sigpending(void);
2693 
2694 extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
2695 
signal_wake_up(struct task_struct * t,bool resume)2696 static inline void signal_wake_up(struct task_struct *t, bool resume)
2697 {
2698 	signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
2699 }
ptrace_signal_wake_up(struct task_struct * t,bool resume)2700 static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
2701 {
2702 	signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
2703 }
2704 
2705 /*
2706  * Wrappers for p->thread_info->cpu access. No-op on UP.
2707  */
2708 #ifdef CONFIG_SMP
2709 
task_cpu(const struct task_struct * p)2710 static inline unsigned int task_cpu(const struct task_struct *p)
2711 {
2712 	return task_thread_info(p)->cpu;
2713 }
2714 
2715 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2716 
2717 #else
2718 
task_cpu(const struct task_struct * p)2719 static inline unsigned int task_cpu(const struct task_struct *p)
2720 {
2721 	return 0;
2722 }
2723 
set_task_cpu(struct task_struct * p,unsigned int cpu)2724 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2725 {
2726 }
2727 
2728 #endif /* CONFIG_SMP */
2729 
2730 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2731 extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2732 
2733 extern void normalize_rt_tasks(void);
2734 
2735 #ifdef CONFIG_CGROUP_SCHED
2736 
2737 extern struct task_group root_task_group;
2738 
2739 extern struct task_group *sched_create_group(struct task_group *parent);
2740 extern void sched_destroy_group(struct task_group *tg);
2741 extern void sched_move_task(struct task_struct *tsk);
2742 #ifdef CONFIG_FAIR_GROUP_SCHED
2743 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
2744 extern unsigned long sched_group_shares(struct task_group *tg);
2745 #endif
2746 #ifdef CONFIG_RT_GROUP_SCHED
2747 extern int sched_group_set_rt_runtime(struct task_group *tg,
2748 				      long rt_runtime_us);
2749 extern long sched_group_rt_runtime(struct task_group *tg);
2750 extern int sched_group_set_rt_period(struct task_group *tg,
2751 				      long rt_period_us);
2752 extern long sched_group_rt_period(struct task_group *tg);
2753 extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
2754 #endif
2755 #endif /* CONFIG_CGROUP_SCHED */
2756 
2757 extern int task_can_switch_user(struct user_struct *up,
2758 					struct task_struct *tsk);
2759 
2760 #ifdef CONFIG_TASK_XACCT
add_rchar(struct task_struct * tsk,ssize_t amt)2761 static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2762 {
2763 	tsk->ioac.rchar += amt;
2764 }
2765 
add_wchar(struct task_struct * tsk,ssize_t amt)2766 static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2767 {
2768 	tsk->ioac.wchar += amt;
2769 }
2770 
inc_syscr(struct task_struct * tsk)2771 static inline void inc_syscr(struct task_struct *tsk)
2772 {
2773 	tsk->ioac.syscr++;
2774 }
2775 
inc_syscw(struct task_struct * tsk)2776 static inline void inc_syscw(struct task_struct *tsk)
2777 {
2778 	tsk->ioac.syscw++;
2779 }
2780 #else
add_rchar(struct task_struct * tsk,ssize_t amt)2781 static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2782 {
2783 }
2784 
add_wchar(struct task_struct * tsk,ssize_t amt)2785 static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2786 {
2787 }
2788 
inc_syscr(struct task_struct * tsk)2789 static inline void inc_syscr(struct task_struct *tsk)
2790 {
2791 }
2792 
inc_syscw(struct task_struct * tsk)2793 static inline void inc_syscw(struct task_struct *tsk)
2794 {
2795 }
2796 #endif
2797 
2798 #ifndef TASK_SIZE_OF
2799 #define TASK_SIZE_OF(tsk)	TASK_SIZE
2800 #endif
2801 
2802 #ifdef CONFIG_MM_OWNER
2803 extern void mm_update_next_owner(struct mm_struct *mm);
2804 extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
2805 #else
mm_update_next_owner(struct mm_struct * mm)2806 static inline void mm_update_next_owner(struct mm_struct *mm)
2807 {
2808 }
2809 
mm_init_owner(struct mm_struct * mm,struct task_struct * p)2810 static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
2811 {
2812 }
2813 #endif /* CONFIG_MM_OWNER */
2814 
task_rlimit(const struct task_struct * tsk,unsigned int limit)2815 static inline unsigned long task_rlimit(const struct task_struct *tsk,
2816 		unsigned int limit)
2817 {
2818 	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
2819 }
2820 
task_rlimit_max(const struct task_struct * tsk,unsigned int limit)2821 static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
2822 		unsigned int limit)
2823 {
2824 	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
2825 }
2826 
rlimit(unsigned int limit)2827 static inline unsigned long rlimit(unsigned int limit)
2828 {
2829 	return task_rlimit(current, limit);
2830 }
2831 
rlimit_max(unsigned int limit)2832 static inline unsigned long rlimit_max(unsigned int limit)
2833 {
2834 	return task_rlimit_max(current, limit);
2835 }
2836 
2837 #endif /* __KERNEL__ */
2838 
2839 #endif
2840