1 /* Helper code for POSIX timer implementation on NPTL.
2 Copyright (C) 2000-2022 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public License as
7 published by the Free Software Foundation; either version 2.1 of the
8 License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; see the file COPYING.LIB. If
17 not, see <https://www.gnu.org/licenses/>. */
18
19 #include <assert.h>
20 #include <errno.h>
21 #include <pthread.h>
22 #include <stddef.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <sysdep.h>
26 #include <time.h>
27 #include <unistd.h>
28 #include <sys/syscall.h>
29
30 #include "posix-timer.h"
31 #include <timer_routines.h>
32
33 #ifndef DELAYTIMER_MAX
34 # define DELAYTIMER_MAX INT_MAX
35 #endif
36
37 /* Number of threads used. */
38 #define THREAD_MAXNODES 16
39
40 /* Array containing the descriptors for the used threads. */
41 static struct thread_node thread_array[THREAD_MAXNODES];
42
43 /* Static array with the structures for all the timers. */
44 struct timer_node __timer_array[TIMER_MAX];
45
46 /* Global lock to protect operation on the lists. */
47 pthread_mutex_t __timer_mutex = PTHREAD_MUTEX_INITIALIZER;
48
49 /* Variable to protext initialization. */
50 pthread_once_t __timer_init_once_control = PTHREAD_ONCE_INIT;
51
52 /* Nonzero if initialization of timer implementation failed. */
53 int __timer_init_failed;
54
55 /* Node for the thread used to deliver signals. */
56 struct thread_node __timer_signal_thread_rclk;
57
58 /* Lists to keep free and used timers and threads. */
59 static struct list_head timer_free_list;
60 static struct list_head thread_free_list;
61 static struct list_head thread_active_list;
62
63
64 #ifdef __NR_rt_sigqueueinfo
65 extern int __syscall_rt_sigqueueinfo (int, int, siginfo_t *);
66 #endif
67
68
69 /* List handling functions. */
70 static inline void
list_append(struct list_head * list,struct list_head * newp)71 list_append (struct list_head *list, struct list_head *newp)
72 {
73 newp->prev = list->prev;
74 newp->next = list;
75 list->prev->next = newp;
76 list->prev = newp;
77 }
78
79 static inline void
list_insbefore(struct list_head * list,struct list_head * newp)80 list_insbefore (struct list_head *list, struct list_head *newp)
81 {
82 list_append (list, newp);
83 }
84
85 /*
86 * Like list_unlink_ip, except that calling it on a node that
87 * is already unlinked is disastrous rather than a noop.
88 */
89
90 static inline void
list_unlink(struct list_head * list)91 list_unlink (struct list_head *list)
92 {
93 struct list_head *lnext = list->next, *lprev = list->prev;
94
95 lnext->prev = lprev;
96 lprev->next = lnext;
97 }
98
99 static inline struct list_head *
list_first(struct list_head * list)100 list_first (struct list_head *list)
101 {
102 return list->next;
103 }
104
105 static inline struct list_head *
list_null(struct list_head * list)106 list_null (struct list_head *list)
107 {
108 return list;
109 }
110
111 static inline struct list_head *
list_next(struct list_head * list)112 list_next (struct list_head *list)
113 {
114 return list->next;
115 }
116
117 static inline int
list_isempty(struct list_head * list)118 list_isempty (struct list_head *list)
119 {
120 return list->next == list;
121 }
122
123
124 /* Functions build on top of the list functions. */
125 static inline struct thread_node *
thread_links2ptr(struct list_head * list)126 thread_links2ptr (struct list_head *list)
127 {
128 return (struct thread_node *) ((char *) list
129 - offsetof (struct thread_node, links));
130 }
131
132 static inline struct timer_node *
timer_links2ptr(struct list_head * list)133 timer_links2ptr (struct list_head *list)
134 {
135 return (struct timer_node *) ((char *) list
136 - offsetof (struct timer_node, links));
137 }
138
139
140 /* Initialize a newly allocated thread structure. */
141 static void
thread_init(struct thread_node * thread,const pthread_attr_t * attr,clockid_t clock_id)142 thread_init (struct thread_node *thread, const pthread_attr_t *attr, clockid_t clock_id)
143 {
144 if (attr != NULL)
145 thread->attr = *attr;
146 else
147 {
148 pthread_attr_init (&thread->attr);
149 pthread_attr_setdetachstate (&thread->attr, PTHREAD_CREATE_DETACHED);
150 }
151
152 thread->exists = 0;
153 INIT_LIST_HEAD (&thread->timer_queue);
154 pthread_cond_init (&thread->cond, 0);
155 thread->current_timer = 0;
156 thread->captured = pthread_self ();
157 thread->clock_id = clock_id;
158 }
159
160
161 /* Initialize the global lists, and acquire global resources. Error
162 reporting is done by storing a non-zero value to the global variable
163 timer_init_failed. */
164 static void
init_module(void)165 init_module (void)
166 {
167 int i;
168
169 INIT_LIST_HEAD (&timer_free_list);
170 INIT_LIST_HEAD (&thread_free_list);
171 INIT_LIST_HEAD (&thread_active_list);
172
173 for (i = 0; i < TIMER_MAX; ++i)
174 {
175 list_append (&timer_free_list, &__timer_array[i].links);
176 __timer_array[i].inuse = TIMER_FREE;
177 }
178
179 for (i = 0; i < THREAD_MAXNODES; ++i)
180 list_append (&thread_free_list, &thread_array[i].links);
181
182 thread_init (&__timer_signal_thread_rclk, 0, CLOCK_REALTIME);
183 }
184
185
186 /* This is a handler executed in a child process after a fork()
187 occurs. It reinitializes the module, resetting all of the data
188 structures to their initial state. The mutex is initialized in
189 case it was locked in the parent process. */
190 static void
reinit_after_fork(void)191 reinit_after_fork (void)
192 {
193 init_module ();
194 pthread_mutex_init (&__timer_mutex, 0);
195 }
196
197
198 /* Called once form pthread_once in timer_init. This initializes the
199 module and ensures that reinit_after_fork will be executed in any
200 child process. */
201 void
__timer_init_once(void)202 __timer_init_once (void)
203 {
204 init_module ();
205 pthread_atfork (0, 0, reinit_after_fork);
206 }
207
208
209 /* Deinitialize a thread that is about to be deallocated. */
210 static void
thread_deinit(struct thread_node * thread)211 thread_deinit (struct thread_node *thread)
212 {
213 assert (list_isempty (&thread->timer_queue));
214 pthread_cond_destroy (&thread->cond);
215 }
216
217
218 /* Allocate a thread structure from the global free list. Global
219 mutex lock must be held by caller. The thread is moved to
220 the active list. */
221 struct thread_node *
__timer_thread_alloc(const pthread_attr_t * desired_attr,clockid_t clock_id)222 __timer_thread_alloc (const pthread_attr_t *desired_attr, clockid_t clock_id)
223 {
224 struct list_head *node = list_first (&thread_free_list);
225
226 if (node != list_null (&thread_free_list))
227 {
228 struct thread_node *thread = thread_links2ptr (node);
229 list_unlink (node);
230 thread_init (thread, desired_attr, clock_id);
231 list_append (&thread_active_list, node);
232 return thread;
233 }
234
235 return 0;
236 }
237
238
239 /* Return a thread structure to the global free list. Global lock
240 must be held by caller. */
241 void
__timer_thread_dealloc(struct thread_node * thread)242 __timer_thread_dealloc (struct thread_node *thread)
243 {
244 thread_deinit (thread);
245 list_unlink (&thread->links);
246 list_append (&thread_free_list, &thread->links);
247 }
248
249
250 /* Each of our threads which terminates executes this cleanup
251 handler. We never terminate threads ourselves; if a thread gets here
252 it means that the evil application has killed it. If the thread has
253 timers, these require servicing and so we must hire a replacement
254 thread right away. We must also unblock another thread that may
255 have been waiting for this thread to finish servicing a timer (see
256 timer_delete()). */
257
258 static void
thread_cleanup(void * val)259 thread_cleanup (void *val)
260 {
261 if (val != NULL)
262 {
263 struct thread_node *thread = val;
264
265 /* How did the signal thread get killed? */
266 assert (thread != &__timer_signal_thread_rclk);
267
268 pthread_mutex_lock (&__timer_mutex);
269
270 thread->exists = 0;
271
272 /* We are no longer processing a timer event. */
273 thread->current_timer = 0;
274
275 if (list_isempty (&thread->timer_queue))
276 __timer_thread_dealloc (thread);
277 else
278 (void) __timer_thread_start (thread);
279
280 pthread_mutex_unlock (&__timer_mutex);
281
282 /* Unblock potentially blocked timer_delete(). */
283 pthread_cond_broadcast (&thread->cond);
284 }
285 }
286
287
288 /* Handle a timer which is supposed to go off now. */
289 static void
thread_expire_timer(struct thread_node * self,struct timer_node * timer)290 thread_expire_timer (struct thread_node *self, struct timer_node *timer)
291 {
292 self->current_timer = timer; /* Lets timer_delete know timer is running. */
293
294 pthread_mutex_unlock (&__timer_mutex);
295
296 switch (__builtin_expect (timer->event.sigev_notify, SIGEV_SIGNAL))
297 {
298 case SIGEV_NONE:
299 break;
300
301 case SIGEV_SIGNAL:
302 #ifdef __NR_rt_sigqueueinfo
303 {
304 siginfo_t info;
305
306 /* First, clear the siginfo_t structure, so that we don't pass our
307 stack content to other tasks. */
308 memset (&info, 0, sizeof (siginfo_t));
309 /* We must pass the information about the data in a siginfo_t
310 value. */
311 info.si_signo = timer->event.sigev_signo;
312 info.si_code = SI_TIMER;
313 info.si_pid = timer->creator_pid;
314 info.si_uid = getuid ();
315 info.si_value = timer->event.sigev_value;
316
317 INLINE_SYSCALL (rt_sigqueueinfo, 3, info.si_pid, info.si_signo, &info);
318 }
319 #else
320 if (pthread_kill (self->captured, timer->event.sigev_signo) != 0)
321 {
322 if (pthread_kill (self->id, timer->event.sigev_signo) != 0)
323 abort ();
324 }
325 #endif
326 break;
327
328 case SIGEV_THREAD:
329 timer->event.sigev_notify_function (timer->event.sigev_value);
330 break;
331
332 default:
333 assert (! "unknown event");
334 break;
335 }
336
337 pthread_mutex_lock (&__timer_mutex);
338
339 self->current_timer = 0;
340
341 pthread_cond_broadcast (&self->cond);
342 }
343
344
345 /* Thread function; executed by each timer thread. The job of this
346 function is to wait on the thread's timer queue and expire the
347 timers in chronological order as close to their scheduled time as
348 possible. */
349 static void
350 __attribute__ ((noreturn))
thread_func(void * arg)351 thread_func (void *arg)
352 {
353 struct thread_node *self = arg;
354
355 /* Register cleanup handler, in case rogue application terminates
356 this thread. (This cannot happen to __timer_signal_thread, which
357 doesn't invoke application callbacks). */
358
359 pthread_cleanup_push (thread_cleanup, self);
360
361 pthread_mutex_lock (&__timer_mutex);
362
363 while (1)
364 {
365 struct list_head *first;
366 struct timer_node *timer = NULL;
367
368 /* While the timer queue is not empty, inspect the first node. */
369 first = list_first (&self->timer_queue);
370 if (first != list_null (&self->timer_queue))
371 {
372 struct timespec now;
373
374 timer = timer_links2ptr (first);
375
376 /* This assumes that the elements of the list of one thread
377 are all for the same clock. */
378 __clock_gettime (timer->clock, &now);
379
380 while (1)
381 {
382 /* If the timer is due or overdue, remove it from the queue.
383 If it's a periodic timer, re-compute its new time and
384 requeue it. Either way, perform the timer expiry. */
385 if (timespec_compare (&now, &timer->expirytime) < 0)
386 break;
387
388 list_unlink_ip (first);
389
390 if (__builtin_expect (timer->value.it_interval.tv_sec, 0) != 0
391 || timer->value.it_interval.tv_nsec != 0)
392 {
393 timer->overrun_count = 0;
394 timespec_add (&timer->expirytime, &timer->expirytime,
395 &timer->value.it_interval);
396 while (timespec_compare (&timer->expirytime, &now) < 0)
397 {
398 timespec_add (&timer->expirytime, &timer->expirytime,
399 &timer->value.it_interval);
400 if (timer->overrun_count < DELAYTIMER_MAX)
401 ++timer->overrun_count;
402 }
403 __timer_thread_queue_timer (self, timer);
404 }
405
406 thread_expire_timer (self, timer);
407
408 first = list_first (&self->timer_queue);
409 if (first == list_null (&self->timer_queue))
410 break;
411
412 timer = timer_links2ptr (first);
413 }
414 }
415
416 /* If the queue is not empty, wait until the expiry time of the
417 first node. Otherwise wait indefinitely. Insertions at the
418 head of the queue must wake up the thread by broadcasting
419 this condition variable. */
420 if (timer != NULL)
421 pthread_cond_timedwait (&self->cond, &__timer_mutex,
422 &timer->expirytime);
423 else
424 pthread_cond_wait (&self->cond, &__timer_mutex);
425 }
426 /* This macro will never be executed since the while loop loops
427 forever - but we have to add it for proper nesting. */
428 pthread_cleanup_pop (1);
429 }
430
431
432 /* Enqueue a timer in wakeup order in the thread's timer queue.
433 Returns 1 if the timer was inserted at the head of the queue,
434 causing the queue's next wakeup time to change. */
435
436 int
__timer_thread_queue_timer(struct thread_node * thread,struct timer_node * insert)437 __timer_thread_queue_timer (struct thread_node *thread,
438 struct timer_node *insert)
439 {
440 struct list_head *iter;
441 int athead = 1;
442
443 for (iter = list_first (&thread->timer_queue);
444 iter != list_null (&thread->timer_queue);
445 iter = list_next (iter))
446 {
447 struct timer_node *timer = timer_links2ptr (iter);
448
449 if (timespec_compare (&insert->expirytime, &timer->expirytime) < 0)
450 break;
451 athead = 0;
452 }
453
454 list_insbefore (iter, &insert->links);
455 return athead;
456 }
457
458
459 /* Start a thread and associate it with the given thread node. Global
460 lock must be held by caller. */
461 int
__timer_thread_start(struct thread_node * thread)462 __timer_thread_start (struct thread_node *thread)
463 {
464 int retval = 1;
465 sigset_t set, oset;
466
467 assert (!thread->exists);
468 thread->exists = 1;
469
470 sigfillset (&set);
471 pthread_sigmask (SIG_SETMASK, &set, &oset);
472
473 if (pthread_create (&thread->id, &thread->attr,
474 (void *(*) (void *)) thread_func, thread) != 0)
475 {
476 thread->exists = 0;
477 retval = -1;
478 }
479
480 pthread_sigmask (SIG_SETMASK, &oset, NULL);
481
482 return retval;
483 }
484
485
486 void
__timer_thread_wakeup(struct thread_node * thread)487 __timer_thread_wakeup (struct thread_node *thread)
488 {
489 pthread_cond_broadcast (&thread->cond);
490 }
491
492
493
494 /* Search the list of active threads and find one which has matching
495 attributes. Global mutex lock must be held by caller. */
496 struct thread_node *
__timer_thread_find_matching(const pthread_attr_t * desired_attr,clockid_t desired_clock_id)497 __timer_thread_find_matching (const pthread_attr_t *desired_attr,
498 clockid_t desired_clock_id)
499 {
500 struct list_head *iter = list_first (&thread_active_list);
501
502 while (iter != list_null (&thread_active_list))
503 {
504 struct thread_node *candidate = thread_links2ptr (iter);
505
506 if (thread_attr_compare (desired_attr, &candidate->attr)
507 && desired_clock_id == candidate->clock_id)
508 return candidate;
509
510 iter = list_next (iter);
511 }
512
513 return NULL;
514 }
515
516
517 /* Grab a free timer structure from the global free list. The global
518 lock must be held by the caller. */
519 struct timer_node *
__timer_alloc(void)520 __timer_alloc (void)
521 {
522 struct list_head *node = list_first (&timer_free_list);
523
524 if (node != list_null (&timer_free_list))
525 {
526 struct timer_node *timer = timer_links2ptr (node);
527 list_unlink_ip (node);
528 timer->inuse = TIMER_INUSE;
529 timer->refcount = 1;
530 return timer;
531 }
532
533 return NULL;
534 }
535
536
537 /* Return a timer structure to the global free list. The global lock
538 must be held by the caller. */
539 void
__timer_dealloc(struct timer_node * timer)540 __timer_dealloc (struct timer_node *timer)
541 {
542 assert (timer->refcount == 0);
543 timer->thread = NULL; /* Break association between timer and thread. */
544 timer->inuse = TIMER_FREE;
545 list_append (&timer_free_list, &timer->links);
546 }
547
548
549 /* Thread cancellation handler which unlocks a mutex. */
550 void
__timer_mutex_cancel_handler(void * arg)551 __timer_mutex_cancel_handler (void *arg)
552 {
553 pthread_mutex_unlock (arg);
554 }
555