1 /* Copyright (C) 2002-2022 Free Software Foundation, Inc.
2    This file is part of the GNU C Library.
3 
4    The GNU C Library is free software; you can redistribute it and/or
5    modify it under the terms of the GNU Lesser General Public
6    License as published by the Free Software Foundation; either
7    version 2.1 of the License, or (at your option) any later version.
8 
9    The GNU C Library is distributed in the hope that it will be useful,
10    but WITHOUT ANY WARRANTY; without even the implied warranty of
11    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12    Lesser General Public License for more details.
13 
14    You should have received a copy of the GNU Lesser General Public
15    License along with the GNU C Library; if not, see
16    <https://www.gnu.org/licenses/>.  */
17 
18 #include <assert.h>
19 #include <errno.h>
20 #include <stdbool.h>
21 #include <string.h>
22 #include <kernel-features.h>
23 #include "pthreadP.h"
24 #include <atomic.h>
25 #include <pthread-offsets.h>
26 #include <futex-internal.h>
27 #include <shlib-compat.h>
28 
29 #include <stap-probe.h>
30 
31 static const struct pthread_mutexattr default_mutexattr =
32   {
33     /* Default is a normal mutex, not shared between processes.  */
34     .mutexkind = PTHREAD_MUTEX_NORMAL
35   };
36 
37 
38 static bool
prio_inherit_missing(void)39 prio_inherit_missing (void)
40 {
41   static int tpi_supported;
42   if (__glibc_unlikely (atomic_load_relaxed (&tpi_supported) == 0))
43     {
44       int e = futex_unlock_pi (&(unsigned int){0}, 0);
45       atomic_store_relaxed (&tpi_supported, e == ENOSYS ? -1 : 1);
46     }
47   return __glibc_unlikely (tpi_supported < 0);
48 }
49 
50 int
___pthread_mutex_init(pthread_mutex_t * mutex,const pthread_mutexattr_t * mutexattr)51 ___pthread_mutex_init (pthread_mutex_t *mutex,
52 		      const pthread_mutexattr_t *mutexattr)
53 {
54   const struct pthread_mutexattr *imutexattr;
55 
56   ASSERT_TYPE_SIZE (pthread_mutex_t, __SIZEOF_PTHREAD_MUTEX_T);
57 
58   /* __kind is the only field where its offset should be checked to
59      avoid ABI breakage with static initializers.  */
60   ASSERT_PTHREAD_INTERNAL_OFFSET (pthread_mutex_t, __data.__kind,
61 				  __PTHREAD_MUTEX_KIND_OFFSET);
62   ASSERT_PTHREAD_INTERNAL_MEMBER_SIZE (pthread_mutex_t, __data.__kind, int);
63 
64   imutexattr = ((const struct pthread_mutexattr *) mutexattr
65 		?: &default_mutexattr);
66 
67   /* Sanity checks.  */
68   switch (__builtin_expect (imutexattr->mutexkind
69 			    & PTHREAD_MUTEXATTR_PROTOCOL_MASK,
70 			    PTHREAD_PRIO_NONE
71 			    << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT))
72     {
73     case PTHREAD_PRIO_NONE << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT:
74       break;
75 
76     case PTHREAD_PRIO_INHERIT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT:
77       if (__glibc_unlikely (prio_inherit_missing ()))
78 	return ENOTSUP;
79       break;
80 
81     default:
82       /* XXX: For now we don't support robust priority protected mutexes.  */
83       if (imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_ROBUST)
84 	return ENOTSUP;
85       break;
86     }
87 
88   /* Clear the whole variable.  */
89   memset (mutex, '\0', __SIZEOF_PTHREAD_MUTEX_T);
90 
91   /* Copy the values from the attribute.  */
92   int mutex_kind = imutexattr->mutexkind & ~PTHREAD_MUTEXATTR_FLAG_BITS;
93 
94   if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_ROBUST) != 0)
95     {
96 #ifndef __ASSUME_SET_ROBUST_LIST
97       if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_PSHARED) != 0
98 	  && !__nptl_set_robust_list_avail)
99 	return ENOTSUP;
100 #endif
101 
102       mutex_kind |= PTHREAD_MUTEX_ROBUST_NORMAL_NP;
103     }
104 
105   switch (imutexattr->mutexkind & PTHREAD_MUTEXATTR_PROTOCOL_MASK)
106     {
107     case PTHREAD_PRIO_INHERIT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT:
108       mutex_kind |= PTHREAD_MUTEX_PRIO_INHERIT_NP;
109       break;
110 
111     case PTHREAD_PRIO_PROTECT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT:
112       mutex_kind |= PTHREAD_MUTEX_PRIO_PROTECT_NP;
113 
114       int ceiling = (imutexattr->mutexkind
115 		     & PTHREAD_MUTEXATTR_PRIO_CEILING_MASK)
116 		    >> PTHREAD_MUTEXATTR_PRIO_CEILING_SHIFT;
117       if (! ceiling)
118 	{
119 	  /* See __init_sched_fifo_prio.  */
120 	  if (atomic_load_relaxed (&__sched_fifo_min_prio) == -1)
121 	    __init_sched_fifo_prio ();
122 	  if (ceiling < atomic_load_relaxed (&__sched_fifo_min_prio))
123 	    ceiling = atomic_load_relaxed (&__sched_fifo_min_prio);
124 	}
125       mutex->__data.__lock = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
126       break;
127 
128     default:
129       break;
130     }
131 
132   /* The kernel when waking robust mutexes on exit never uses
133      FUTEX_PRIVATE_FLAG FUTEX_WAKE.  */
134   if ((imutexattr->mutexkind & (PTHREAD_MUTEXATTR_FLAG_PSHARED
135 				| PTHREAD_MUTEXATTR_FLAG_ROBUST)) != 0)
136     mutex_kind |= PTHREAD_MUTEX_PSHARED_BIT;
137 
138   /* See concurrency notes regarding __kind in struct __pthread_mutex_s
139      in sysdeps/nptl/bits/thread-shared-types.h.  */
140   atomic_store_relaxed (&(mutex->__data.__kind), mutex_kind);
141 
142   /* Default values: mutex not used yet.  */
143   // mutex->__count = 0;	already done by memset
144   // mutex->__owner = 0;	already done by memset
145   // mutex->__nusers = 0;	already done by memset
146   // mutex->__spins = 0;	already done by memset
147   // mutex->__next = NULL;	already done by memset
148 
149   LIBC_PROBE (mutex_init, 1, mutex);
150 
151   return 0;
152 }
153 versioned_symbol (libpthread, ___pthread_mutex_init, pthread_mutex_init,
154 		  GLIBC_2_0);
155 libc_hidden_ver (___pthread_mutex_init, __pthread_mutex_init)
156 #ifndef SHARED
157 strong_alias (___pthread_mutex_init, __pthread_mutex_init)
158 #endif
159 
160 #if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_34)
161 compat_symbol (libpthread, ___pthread_mutex_init, __pthread_mutex_init,
162 	       GLIBC_2_0);
163 #endif
164