1 /* Copyright (C) 2002-2022 Free Software Foundation, Inc.
2    This file is part of the GNU C Library.
3 
4    The GNU C Library is free software; you can redistribute it and/or
5    modify it under the terms of the GNU Lesser General Public
6    License as published by the Free Software Foundation; either
7    version 2.1 of the License, or (at your option) any later version.
8 
9    The GNU C Library is distributed in the hope that it will be useful,
10    but WITHOUT ANY WARRANTY; without even the implied warranty of
11    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12    Lesser General Public License for more details.
13 
14    You should have received a copy of the GNU Lesser General Public
15    License along with the GNU C Library; if not, see
16    <https://www.gnu.org/licenses/>.  */
17 
18 #ifndef _DESCR_H
19 #define _DESCR_H	1
20 
21 #include <limits.h>
22 #include <sched.h>
23 #include <setjmp.h>
24 #include <stdbool.h>
25 #include <sys/types.h>
26 #include <hp-timing.h>
27 #include <list_t.h>
28 #include <lowlevellock.h>
29 #include <pthreaddef.h>
30 #include <dl-sysdep.h>
31 #include <thread_db.h>
32 #include <tls.h>
33 #include <unwind.h>
34 #include <bits/types/res_state.h>
35 #include <kernel-features.h>
36 #include <tls-internal-struct.h>
37 #include <sys/rseq.h>
38 #include <internal-sigset.h>
39 
40 #ifndef TCB_ALIGNMENT
41 # define TCB_ALIGNMENT 32
42 #elif TCB_ALIGNMENT < 32
43 # error TCB_ALIGNMENT must be at least 32
44 #endif
45 
46 
47 /* We keep thread specific data in a special data structure, a two-level
48    array.  The top-level array contains pointers to dynamically allocated
49    arrays of a certain number of data pointers.  So we can implement a
50    sparse array.  Each dynamic second-level array has
51         PTHREAD_KEY_2NDLEVEL_SIZE
52    entries.  This value shouldn't be too large.  */
53 #define PTHREAD_KEY_2NDLEVEL_SIZE       32
54 
55 /* We need to address PTHREAD_KEYS_MAX key with PTHREAD_KEY_2NDLEVEL_SIZE
56    keys in each subarray.  */
57 #define PTHREAD_KEY_1STLEVEL_SIZE \
58   ((PTHREAD_KEYS_MAX + PTHREAD_KEY_2NDLEVEL_SIZE - 1) \
59    / PTHREAD_KEY_2NDLEVEL_SIZE)
60 
61 
62 
63 
64 /* Internal version of the buffer to store cancellation handler
65    information.  */
66 struct pthread_unwind_buf
67 {
68   struct
69   {
70     __jmp_buf jmp_buf;
71     int mask_was_saved;
72   } cancel_jmp_buf[1];
73 
74   union
75   {
76     /* This is the placeholder of the public version.  */
77     void *pad[4];
78 
79     struct
80     {
81       /* Pointer to the previous cleanup buffer.  */
82       struct pthread_unwind_buf *prev;
83 
84       /* Backward compatibility: state of the old-style cleanup
85 	 handler at the time of the previous new-style cleanup handler
86 	 installment.  */
87       struct _pthread_cleanup_buffer *cleanup;
88 
89       /* Cancellation type before the push call.  */
90       int canceltype;
91     } data;
92   } priv;
93 };
94 
95 
96 /* Opcodes and data types for communication with the signal handler to
97    change user/group IDs.  */
98 struct xid_command
99 {
100   int syscall_no;
101   /* Enforce zero-extension for the pointer argument in
102 
103      int setgroups (size_t size, const gid_t *list);
104 
105      The kernel XID arguments are unsigned and do not require sign
106      extension.  */
107   unsigned long int id[3];
108   volatile int cntr;
109   volatile int error; /* -1: no call yet, 0: success seen, >0: error seen.  */
110 };
111 
112 
113 /* Data structure used by the kernel to find robust futexes.  */
114 struct robust_list_head
115 {
116   void *list;
117   long int futex_offset;
118   void *list_op_pending;
119 };
120 
121 
122 /* Data strcture used to handle thread priority protection.  */
123 struct priority_protection_data
124 {
125   int priomax;
126   unsigned int priomap[];
127 };
128 
129 
130 /* Thread descriptor data structure.  */
131 struct pthread
132 {
133   union
134   {
135 #if !TLS_DTV_AT_TP
136     /* This overlaps the TCB as used for TLS without threads (see tls.h).  */
137     tcbhead_t header;
138 #else
139     struct
140     {
141       /* multiple_threads is enabled either when the process has spawned at
142 	 least one thread or when a single-threaded process cancels itself.
143 	 This enables additional code to introduce locking before doing some
144 	 compare_and_exchange operations and also enable cancellation points.
145 	 The concepts of multiple threads and cancellation points ideally
146 	 should be separate, since it is not necessary for multiple threads to
147 	 have been created for cancellation points to be enabled, as is the
148 	 case is when single-threaded process cancels itself.
149 
150 	 Since enabling multiple_threads enables additional code in
151 	 cancellation points and compare_and_exchange operations, there is a
152 	 potential for an unneeded performance hit when it is enabled in a
153 	 single-threaded, self-canceling process.  This is OK though, since a
154 	 single-threaded process will enable async cancellation only when it
155 	 looks to cancel itself and is hence going to end anyway.  */
156       int multiple_threads;
157       int gscope_flag;
158     } header;
159 #endif
160 
161     /* This extra padding has no special purpose, and this structure layout
162        is private and subject to change without affecting the official ABI.
163        We just have it here in case it might be convenient for some
164        implementation-specific instrumentation hack or suchlike.  */
165     void *__padding[24];
166   };
167 
168   /* This descriptor's link on the GL (dl_stack_used) or
169      GL (dl_stack_user) list.  */
170   list_t list;
171 
172   /* Thread ID - which is also a 'is this thread descriptor (and
173      therefore stack) used' flag.  */
174   pid_t tid;
175 
176   /* List of robust mutexes the thread is holding.  */
177 #if __PTHREAD_MUTEX_HAVE_PREV
178   void *robust_prev;
179   struct robust_list_head robust_head;
180 
181   /* The list above is strange.  It is basically a double linked list
182      but the pointer to the next/previous element of the list points
183      in the middle of the object, the __next element.  Whenever
184      casting to __pthread_list_t we need to adjust the pointer
185      first.
186      These operations are effectively concurrent code in that the thread
187      can get killed at any point in time and the kernel takes over.  Thus,
188      the __next elements are a kind of concurrent list and we need to
189      enforce using compiler barriers that the individual operations happen
190      in such a way that the kernel always sees a consistent list.  The
191      backward links (ie, the __prev elements) are not used by the kernel.
192      FIXME We should use relaxed MO atomic operations here and signal fences
193      because this kind of concurrency is similar to synchronizing with a
194      signal handler.  */
195 # define QUEUE_PTR_ADJUST (offsetof (__pthread_list_t, __next))
196 
197 # define ENQUEUE_MUTEX_BOTH(mutex, val)					      \
198   do {									      \
199     __pthread_list_t *next = (__pthread_list_t *)			      \
200       ((((uintptr_t) THREAD_GETMEM (THREAD_SELF, robust_head.list)) & ~1ul)   \
201        - QUEUE_PTR_ADJUST);						      \
202     next->__prev = (void *) &mutex->__data.__list.__next;		      \
203     mutex->__data.__list.__next = THREAD_GETMEM (THREAD_SELF,		      \
204 						 robust_head.list);	      \
205     mutex->__data.__list.__prev = (void *) &THREAD_SELF->robust_head;	      \
206     /* Ensure that the new list entry is ready before we insert it.  */	      \
207     __asm ("" ::: "memory");						      \
208     THREAD_SETMEM (THREAD_SELF, robust_head.list,			      \
209 		   (void *) (((uintptr_t) &mutex->__data.__list.__next)	      \
210 			     | val));					      \
211   } while (0)
212 # define DEQUEUE_MUTEX(mutex) \
213   do {									      \
214     __pthread_list_t *next = (__pthread_list_t *)			      \
215       ((char *) (((uintptr_t) mutex->__data.__list.__next) & ~1ul)	      \
216        - QUEUE_PTR_ADJUST);						      \
217     next->__prev = mutex->__data.__list.__prev;				      \
218     __pthread_list_t *prev = (__pthread_list_t *)			      \
219       ((char *) (((uintptr_t) mutex->__data.__list.__prev) & ~1ul)	      \
220        - QUEUE_PTR_ADJUST);						      \
221     prev->__next = mutex->__data.__list.__next;				      \
222     /* Ensure that we remove the entry from the list before we change the     \
223        __next pointer of the entry, which is read by the kernel.  */	      \
224     __asm ("" ::: "memory");						      \
225     mutex->__data.__list.__prev = NULL;					      \
226     mutex->__data.__list.__next = NULL;					      \
227   } while (0)
228 #else
229   union
230   {
231     __pthread_slist_t robust_list;
232     struct robust_list_head robust_head;
233   };
234 
235 # define ENQUEUE_MUTEX_BOTH(mutex, val)					      \
236   do {									      \
237     mutex->__data.__list.__next						      \
238       = THREAD_GETMEM (THREAD_SELF, robust_list.__next);		      \
239     /* Ensure that the new list entry is ready before we insert it.  */	      \
240     __asm ("" ::: "memory");						      \
241     THREAD_SETMEM (THREAD_SELF, robust_list.__next,			      \
242 		   (void *) (((uintptr_t) &mutex->__data.__list) | val));     \
243   } while (0)
244 # define DEQUEUE_MUTEX(mutex) \
245   do {									      \
246     __pthread_slist_t *runp = (__pthread_slist_t *)			      \
247       (((uintptr_t) THREAD_GETMEM (THREAD_SELF, robust_list.__next)) & ~1ul); \
248     if (runp == &mutex->__data.__list)					      \
249       THREAD_SETMEM (THREAD_SELF, robust_list.__next, runp->__next);	      \
250     else								      \
251       {									      \
252 	__pthread_slist_t *next = (__pthread_slist_t *)		      \
253 	  (((uintptr_t) runp->__next) & ~1ul);				      \
254 	while (next != &mutex->__data.__list)				      \
255 	  {								      \
256 	    runp = next;						      \
257 	    next = (__pthread_slist_t *) (((uintptr_t) runp->__next) & ~1ul); \
258 	  }								      \
259 									      \
260 	runp->__next = next->__next;					      \
261 	/* Ensure that we remove the entry from the list before we change the \
262 	   __next pointer of the entry, which is read by the kernel.  */      \
263 	    __asm ("" ::: "memory");					      \
264 	mutex->__data.__list.__next = NULL;				      \
265       }									      \
266   } while (0)
267 #endif
268 #define ENQUEUE_MUTEX(mutex) ENQUEUE_MUTEX_BOTH (mutex, 0)
269 #define ENQUEUE_MUTEX_PI(mutex) ENQUEUE_MUTEX_BOTH (mutex, 1)
270 
271   /* List of cleanup buffers.  */
272   struct _pthread_cleanup_buffer *cleanup;
273 
274   /* Unwind information.  */
275   struct pthread_unwind_buf *cleanup_jmp_buf;
276 #define HAVE_CLEANUP_JMP_BUF
277 
278   /* Flags determining processing of cancellation.  */
279   int cancelhandling;
280   /* Bit set if cancellation is disabled.  */
281 #define CANCELSTATE_BIT		0
282 #define CANCELSTATE_BITMASK	(1 << CANCELSTATE_BIT)
283   /* Bit set if asynchronous cancellation mode is selected.  */
284 #define CANCELTYPE_BIT		1
285 #define CANCELTYPE_BITMASK	(1 << CANCELTYPE_BIT)
286   /* Bit set if canceling has been initiated.  */
287 #define CANCELING_BIT		2
288 #define CANCELING_BITMASK	(1 << CANCELING_BIT)
289   /* Bit set if canceled.  */
290 #define CANCELED_BIT		3
291 #define CANCELED_BITMASK	(1 << CANCELED_BIT)
292   /* Bit set if thread is exiting.  */
293 #define EXITING_BIT		4
294 #define EXITING_BITMASK		(1 << EXITING_BIT)
295   /* Bit set if thread terminated and TCB is freed.  */
296 #define TERMINATED_BIT		5
297 #define TERMINATED_BITMASK	(1 << TERMINATED_BIT)
298   /* Bit set if thread is supposed to change XID.  */
299 #define SETXID_BIT		6
300 #define SETXID_BITMASK		(1 << SETXID_BIT)
301 
302   /* Flags.  Including those copied from the thread attribute.  */
303   int flags;
304 
305   /* We allocate one block of references here.  This should be enough
306      to avoid allocating any memory dynamically for most applications.  */
307   struct pthread_key_data
308   {
309     /* Sequence number.  We use uintptr_t to not require padding on
310        32- and 64-bit machines.  On 64-bit machines it helps to avoid
311        wrapping, too.  */
312     uintptr_t seq;
313 
314     /* Data pointer.  */
315     void *data;
316   } specific_1stblock[PTHREAD_KEY_2NDLEVEL_SIZE];
317 
318   /* Two-level array for the thread-specific data.  */
319   struct pthread_key_data *specific[PTHREAD_KEY_1STLEVEL_SIZE];
320 
321   /* Flag which is set when specific data is set.  */
322   bool specific_used;
323 
324   /* True if events must be reported.  */
325   bool report_events;
326 
327   /* True if the user provided the stack.  */
328   bool user_stack;
329 
330   /* True if thread must stop at startup time.  */
331   bool stopped_start;
332 
333   /* Indicate that a thread creation setup has failed (for instance the
334      scheduler or affinity).  */
335   int setup_failed;
336 
337   /* Lock to synchronize access to the descriptor.  */
338   int lock;
339 
340   /* Lock for synchronizing setxid calls.  */
341   unsigned int setxid_futex;
342 
343   /* If the thread waits to join another one the ID of the latter is
344      stored here.
345 
346      In case a thread is detached this field contains a pointer of the
347      TCB if the thread itself.  This is something which cannot happen
348      in normal operation.  */
349   struct pthread *joinid;
350   /* Check whether a thread is detached.  */
351 #define IS_DETACHED(pd) ((pd)->joinid == (pd))
352 
353   /* The result of the thread function.  */
354   void *result;
355 
356   /* Scheduling parameters for the new thread.  */
357   struct sched_param schedparam;
358   int schedpolicy;
359 
360   /* Start position of the code to be executed and the argument passed
361      to the function.  */
362   void *(*start_routine) (void *);
363   void *arg;
364 
365   /* Debug state.  */
366   td_eventbuf_t eventbuf;
367   /* Next descriptor with a pending event.  */
368   struct pthread *nextevent;
369 
370   /* Machine-specific unwind info.  */
371   struct _Unwind_Exception exc;
372 
373   /* If nonzero, pointer to the area allocated for the stack and guard. */
374   void *stackblock;
375   /* Size of the stackblock area including the guard.  */
376   size_t stackblock_size;
377   /* Size of the included guard area.  */
378   size_t guardsize;
379   /* This is what the user specified and what we will report.  */
380   size_t reported_guardsize;
381 
382   /* Thread Priority Protection data.  */
383   struct priority_protection_data *tpp;
384 
385   /* Resolver state.  */
386   struct __res_state res;
387 
388   /* Signal mask for the new thread.  Used during thread startup to
389      restore the signal mask.  (Threads are launched with all signals
390      masked.)  */
391   internal_sigset_t sigmask;
392 
393   /* Indicates whether is a C11 thread created by thrd_creat.  */
394   bool c11;
395 
396   /* Used in __pthread_kill_internal to detected a thread that has
397      exited or is about to exit.  exit_lock must only be acquired
398      after blocking signals.  */
399   bool exiting;
400   int exit_lock; /* A low-level lock (for use with __libc_lock_init etc).  */
401 
402   /* Used on strsignal.  */
403   struct tls_internal_t tls_state;
404 
405   /* rseq area registered with the kernel.  */
406   struct rseq rseq_area;
407 
408   /* This member must be last.  */
409   char end_padding[];
410 
411 #define PTHREAD_STRUCT_END_PADDING \
412   (sizeof (struct pthread) - offsetof (struct pthread, end_padding))
413 } __attribute ((aligned (TCB_ALIGNMENT)));
414 
415 static inline bool
cancel_enabled_and_canceled(int value)416 cancel_enabled_and_canceled (int value)
417 {
418   return (value & (CANCELSTATE_BITMASK | CANCELED_BITMASK | EXITING_BITMASK
419 		   | TERMINATED_BITMASK))
420     == CANCELED_BITMASK;
421 }
422 
423 static inline bool
cancel_enabled_and_canceled_and_async(int value)424 cancel_enabled_and_canceled_and_async (int value)
425 {
426   return ((value) & (CANCELSTATE_BITMASK | CANCELTYPE_BITMASK | CANCELED_BITMASK
427 		     | EXITING_BITMASK | TERMINATED_BITMASK))
428     == (CANCELTYPE_BITMASK | CANCELED_BITMASK);
429 }
430 
431 /* This yields the pointer that TLS support code calls the thread pointer.  */
432 #if TLS_TCB_AT_TP
433 # define TLS_TPADJ(pd) (pd)
434 #elif TLS_DTV_AT_TP
435 # define TLS_TPADJ(pd) ((struct pthread *)((char *) (pd) + TLS_PRE_TCB_SIZE))
436 #endif
437 
438 #endif	/* descr.h */
439