1 /* Completion of TCB initialization after TLS_INIT_TP. NPTL version.
2 Copyright (C) 2020-2022 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19 #include <kernel-features.h>
20 #include <ldsodefs.h>
21 #include <list.h>
22 #include <pthreadP.h>
23 #include <tls.h>
24 #include <rseq-internal.h>
25 #include <thread_pointer.h>
26
27 #define TUNABLE_NAMESPACE pthread
28 #include <dl-tunables.h>
29
30 #ifndef __ASSUME_SET_ROBUST_LIST
31 bool __nptl_set_robust_list_avail;
rtld_hidden_data_def(__nptl_set_robust_list_avail)32 rtld_hidden_data_def (__nptl_set_robust_list_avail)
33 #endif
34
35 bool __nptl_initial_report_events;
36 rtld_hidden_def (__nptl_initial_report_events)
37
38 #ifdef SHARED
39 /* Dummy implementation. See __rtld_mutex_init. */
40 static int
41 rtld_mutex_dummy (pthread_mutex_t *lock)
42 {
43 return 0;
44 }
45 #endif
46
47 const unsigned int __rseq_flags;
48 const unsigned int __rseq_size attribute_relro;
49 const ptrdiff_t __rseq_offset attribute_relro;
50
51 void
__tls_pre_init_tp(void)52 __tls_pre_init_tp (void)
53 {
54 /* The list data structures are not consistent until
55 initialized. */
56 INIT_LIST_HEAD (&GL (dl_stack_used));
57 INIT_LIST_HEAD (&GL (dl_stack_user));
58 INIT_LIST_HEAD (&GL (dl_stack_cache));
59
60 #ifdef SHARED
61 ___rtld_mutex_lock = rtld_mutex_dummy;
62 ___rtld_mutex_unlock = rtld_mutex_dummy;
63 #endif
64 }
65
66 void
__tls_init_tp(void)67 __tls_init_tp (void)
68 {
69 struct pthread *pd = THREAD_SELF;
70
71 /* Set up thread stack list management. */
72 list_add (&pd->list, &GL (dl_stack_user));
73
74 /* Early initialization of the TCB. */
75 pd->tid = INTERNAL_SYSCALL_CALL (set_tid_address, &pd->tid);
76 THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
77 THREAD_SETMEM (pd, user_stack, true);
78
79 /* Before initializing GL (dl_stack_user), the debugger could not
80 find us and had to set __nptl_initial_report_events. Propagate
81 its setting. */
82 THREAD_SETMEM (pd, report_events, __nptl_initial_report_events);
83
84 /* Initialize the robust mutex data. */
85 {
86 #if __PTHREAD_MUTEX_HAVE_PREV
87 pd->robust_prev = &pd->robust_head;
88 #endif
89 pd->robust_head.list = &pd->robust_head;
90 pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
91 - offsetof (pthread_mutex_t,
92 __data.__list.__next));
93 int res = INTERNAL_SYSCALL_CALL (set_robust_list, &pd->robust_head,
94 sizeof (struct robust_list_head));
95 if (!INTERNAL_SYSCALL_ERROR_P (res))
96 {
97 #ifndef __ASSUME_SET_ROBUST_LIST
98 __nptl_set_robust_list_avail = true;
99 #endif
100 }
101 }
102
103 {
104 bool do_rseq = true;
105 #if HAVE_TUNABLES
106 do_rseq = TUNABLE_GET (rseq, int, NULL);
107 #endif
108 if (rseq_register_current_thread (pd, do_rseq))
109 {
110 /* We need a writable view of the variables. They are in
111 .data.relro and are not yet write-protected. */
112 extern unsigned int size __asm__ ("__rseq_size");
113 size = sizeof (pd->rseq_area);
114 }
115
116 #ifdef RSEQ_SIG
117 /* This should be a compile-time constant, but the current
118 infrastructure makes it difficult to determine its value. Not
119 all targets support __thread_pointer, so set __rseq_offset only
120 if thre rseq registration may have happened because RSEQ_SIG is
121 defined. */
122 extern ptrdiff_t offset __asm__ ("__rseq_offset");
123 offset = (char *) &pd->rseq_area - (char *) __thread_pointer ();
124 #endif
125 }
126
127 /* Set initial thread's stack block from 0 up to __libc_stack_end.
128 It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
129 purposes this is good enough. */
130 THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end);
131 }
132