Lines Matching refs:pct

24 void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit)  in posix_cputimers_group_init()  argument
26 posix_cputimers_init(pct); in posix_cputimers_group_init()
28 pct->bases[CPUCLOCK_PROF].nextevt = cpu_limit * NSEC_PER_SEC; in posix_cputimers_group_init()
29 pct->timers_active = true; in posix_cputimers_group_init()
152 static inline bool expiry_cache_is_inactive(const struct posix_cputimers *pct) in expiry_cache_is_inactive() argument
154 return !(~pct->bases[CPUCLOCK_PROF].nextevt | in expiry_cache_is_inactive()
155 ~pct->bases[CPUCLOCK_VIRT].nextevt | in expiry_cache_is_inactive()
156 ~pct->bases[CPUCLOCK_SCHED].nextevt); in expiry_cache_is_inactive()
276 struct posix_cputimers *pct = &tsk->signal->posix_cputimers; in thread_group_sample_cputime() local
278 WARN_ON_ONCE(!pct->timers_active); in thread_group_sample_cputime()
298 struct posix_cputimers *pct = &tsk->signal->posix_cputimers; in thread_group_start_cputime() local
303 if (!READ_ONCE(pct->timers_active)) { in thread_group_start_cputime()
321 WRITE_ONCE(pct->timers_active, true); in thread_group_start_cputime()
343 struct posix_cputimers *pct = &p->signal->posix_cputimers; in cpu_clock_sample_group() local
346 if (!READ_ONCE(pct->timers_active)) { in cpu_clock_sample_group()
531 static void cleanup_timers(struct posix_cputimers *pct) in cleanup_timers() argument
533 cleanup_timerqueue(&pct->bases[CPUCLOCK_PROF].tqhead); in cleanup_timers()
534 cleanup_timerqueue(&pct->bases[CPUCLOCK_VIRT].tqhead); in cleanup_timers()
535 cleanup_timerqueue(&pct->bases[CPUCLOCK_SCHED].tqhead); in cleanup_timers()
858 static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples, in collect_posix_cputimers() argument
861 struct posix_cputimer_base *base = pct->bases; in collect_posix_cputimers()
900 struct posix_cputimers *pct = &tsk->posix_cputimers; in check_thread_timers() local
907 if (expiry_cache_is_inactive(pct)) in check_thread_timers()
911 collect_posix_cputimers(pct, samples, firing); in check_thread_timers()
934 if (expiry_cache_is_inactive(pct)) in check_thread_timers()
940 struct posix_cputimers *pct = &sig->posix_cputimers; in stop_process_timers() local
943 WRITE_ONCE(pct->timers_active, false); in stop_process_timers()
978 struct posix_cputimers *pct = &sig->posix_cputimers; in check_process_timers() local
987 if (!READ_ONCE(pct->timers_active) || pct->expiry_active) in check_process_timers()
994 pct->expiry_active = true; in check_process_timers()
1001 collect_posix_cputimers(pct, samples, firing); in check_process_timers()
1007 &pct->bases[CPUCLOCK_PROF].nextevt, in check_process_timers()
1010 &pct->bases[CPUCLOCK_VIRT].nextevt, in check_process_timers()
1033 if (softns < pct->bases[CPUCLOCK_PROF].nextevt) in check_process_timers()
1034 pct->bases[CPUCLOCK_PROF].nextevt = softns; in check_process_timers()
1037 if (expiry_cache_is_inactive(pct)) in check_process_timers()
1040 pct->expiry_active = false; in check_process_timers()
1094 task_cputimers_expired(const u64 *samples, struct posix_cputimers *pct) in task_cputimers_expired() argument
1099 if (samples[i] >= pct->bases[i].nextevt) in task_cputimers_expired()
1117 struct posix_cputimers *pct = &tsk->posix_cputimers; in fastpath_timer_check() local
1120 if (!expiry_cache_is_inactive(pct)) { in fastpath_timer_check()
1124 if (task_cputimers_expired(samples, pct)) in fastpath_timer_check()
1129 pct = &sig->posix_cputimers; in fastpath_timer_check()
1145 if (READ_ONCE(pct->timers_active) && !READ_ONCE(pct->expiry_active)) { in fastpath_timer_check()
1151 if (task_cputimers_expired(samples, pct)) in fastpath_timer_check()