1 /* Copyright (C) 2002-2022 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
8
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
13
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library; if not, see
16 <https://www.gnu.org/licenses/>. */
17
18 #include <libc-lock.h>
19 #include <stdbool.h>
20 #include <register-atfork.h>
21 #include <intprops.h>
22 #include <stdio.h>
23
24 #define DYNARRAY_ELEMENT struct fork_handler
25 #define DYNARRAY_STRUCT fork_handler_list
26 #define DYNARRAY_PREFIX fork_handler_list_
27 #define DYNARRAY_INITIAL_SIZE 48
28 #include <malloc/dynarray-skeleton.c>
29
30 static struct fork_handler_list fork_handlers;
31 static uint64_t fork_handler_counter;
32
33 static int atfork_lock = LLL_LOCK_INITIALIZER;
34
35 int
__register_atfork(void (* prepare)(void),void (* parent)(void),void (* child)(void),void * dso_handle)36 __register_atfork (void (*prepare) (void), void (*parent) (void),
37 void (*child) (void), void *dso_handle)
38 {
39 lll_lock (atfork_lock, LLL_PRIVATE);
40
41 if (fork_handler_counter == 0)
42 fork_handler_list_init (&fork_handlers);
43
44 struct fork_handler *newp = fork_handler_list_emplace (&fork_handlers);
45 if (newp != NULL)
46 {
47 newp->prepare_handler = prepare;
48 newp->parent_handler = parent;
49 newp->child_handler = child;
50 newp->dso_handle = dso_handle;
51
52 /* IDs assigned to handlers start at 1 and increment with handler
53 registration. Un-registering a handlers discards the corresponding
54 ID. It is not reused in future registrations. */
55 if (INT_ADD_OVERFLOW (fork_handler_counter, 1))
56 __libc_fatal ("fork handler counter overflow");
57 newp->id = ++fork_handler_counter;
58 }
59
60 /* Release the lock. */
61 lll_unlock (atfork_lock, LLL_PRIVATE);
62
63 return newp == NULL ? ENOMEM : 0;
64 }
libc_hidden_def(__register_atfork)65 libc_hidden_def (__register_atfork)
66
67 static struct fork_handler *
68 fork_handler_list_find (struct fork_handler_list *fork_handlers,
69 void *dso_handle)
70 {
71 for (size_t i = 0; i < fork_handler_list_size (fork_handlers); i++)
72 {
73 struct fork_handler *elem = fork_handler_list_at (fork_handlers, i);
74 if (elem->dso_handle == dso_handle)
75 return elem;
76 }
77 return NULL;
78 }
79
80 void
__unregister_atfork(void * dso_handle)81 __unregister_atfork (void *dso_handle)
82 {
83 lll_lock (atfork_lock, LLL_PRIVATE);
84
85 struct fork_handler *first = fork_handler_list_find (&fork_handlers,
86 dso_handle);
87 /* Removing is done by shifting the elements in the way the elements
88 that are not to be removed appear in the beginning in dynarray.
89 This avoid the quadradic run-time if a naive strategy to remove and
90 shift one element at time. */
91 if (first != NULL)
92 {
93 struct fork_handler *new_end = first;
94 first++;
95 for (; first != fork_handler_list_end (&fork_handlers); ++first)
96 {
97 if (first->dso_handle != dso_handle)
98 {
99 *new_end = *first;
100 ++new_end;
101 }
102 }
103
104 ptrdiff_t removed = first - new_end;
105 for (size_t i = 0; i < removed; i++)
106 fork_handler_list_remove_last (&fork_handlers);
107 }
108
109 lll_unlock (atfork_lock, LLL_PRIVATE);
110 }
111
112 uint64_t
__run_prefork_handlers(_Bool do_locking)113 __run_prefork_handlers (_Bool do_locking)
114 {
115 uint64_t lastrun;
116
117 if (do_locking)
118 lll_lock (atfork_lock, LLL_PRIVATE);
119
120 /* We run prepare handlers from last to first. After fork, only
121 handlers up to the last handler found here (pre-fork) will be run.
122 Handlers registered during __run_prefork_handlers or
123 __run_postfork_handlers will be positioned after this last handler, and
124 since their prepare handlers won't be run now, their parent/child
125 handlers should also be ignored. */
126 lastrun = fork_handler_counter;
127
128 size_t sl = fork_handler_list_size (&fork_handlers);
129 for (size_t i = sl; i > 0;)
130 {
131 struct fork_handler *runp
132 = fork_handler_list_at (&fork_handlers, i - 1);
133
134 uint64_t id = runp->id;
135
136 if (runp->prepare_handler != NULL)
137 {
138 if (do_locking)
139 lll_unlock (atfork_lock, LLL_PRIVATE);
140
141 runp->prepare_handler ();
142
143 if (do_locking)
144 lll_lock (atfork_lock, LLL_PRIVATE);
145 }
146
147 /* We unlocked, ran the handler, and locked again. In the
148 meanwhile, one or more deregistrations could have occurred leading
149 to the current (just run) handler being moved up the list or even
150 removed from the list itself. Since handler IDs are guaranteed to
151 to be in increasing order, the next handler has to have: */
152
153 /* A. An earlier position than the current one has. */
154 i--;
155
156 /* B. A lower ID than the current one does. The code below skips
157 any newly added handlers with higher IDs. */
158 while (i > 0
159 && fork_handler_list_at (&fork_handlers, i - 1)->id >= id)
160 i--;
161 }
162
163 return lastrun;
164 }
165
166 void
__run_postfork_handlers(enum __run_fork_handler_type who,_Bool do_locking,uint64_t lastrun)167 __run_postfork_handlers (enum __run_fork_handler_type who, _Bool do_locking,
168 uint64_t lastrun)
169 {
170 size_t sl = fork_handler_list_size (&fork_handlers);
171 for (size_t i = 0; i < sl;)
172 {
173 struct fork_handler *runp = fork_handler_list_at (&fork_handlers, i);
174 uint64_t id = runp->id;
175
176 /* prepare handlers were not run for handlers with ID > LASTRUN.
177 Thus, parent/child handlers will also not be run. */
178 if (id > lastrun)
179 break;
180
181 if (do_locking)
182 lll_unlock (atfork_lock, LLL_PRIVATE);
183
184 if (who == atfork_run_child && runp->child_handler)
185 runp->child_handler ();
186 else if (who == atfork_run_parent && runp->parent_handler)
187 runp->parent_handler ();
188
189 if (do_locking)
190 lll_lock (atfork_lock, LLL_PRIVATE);
191
192 /* We unlocked, ran the handler, and locked again. In the meanwhile,
193 one or more [de]registrations could have occurred. Due to this,
194 the list size must be updated. */
195 sl = fork_handler_list_size (&fork_handlers);
196
197 /* The just-run handler could also have moved up the list. */
198
199 if (sl > i && fork_handler_list_at (&fork_handlers, i)->id == id)
200 /* The position of the recently run handler hasn't changed. The
201 next handler to be run is an easy increment away. */
202 i++;
203 else
204 {
205 /* The next handler to be run is the first handler in the list
206 to have an ID higher than the current one. */
207 for (i = 0; i < sl; i++)
208 {
209 if (fork_handler_list_at (&fork_handlers, i)->id > id)
210 break;
211 }
212 }
213 }
214
215 if (do_locking)
216 lll_unlock (atfork_lock, LLL_PRIVATE);
217 }
218
219
libc_freeres_fn(free_mem)220 libc_freeres_fn (free_mem)
221 {
222 lll_lock (atfork_lock, LLL_PRIVATE);
223
224 fork_handler_list_free (&fork_handlers);
225
226 lll_unlock (atfork_lock, LLL_PRIVATE);
227 }
228