1 /* Copyright (C) 2002-2022 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
8
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
13
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library; if not, see
16 <https://www.gnu.org/licenses/>. */
17
18 #include <assert.h>
19 #include <errno.h>
20 #include <time.h>
21 #include <sys/param.h>
22 #include <sys/time.h>
23 #include "pthreadP.h"
24 #include <atomic.h>
25 #include <lowlevellock.h>
26 #include <not-cancel.h>
27 #include <futex-internal.h>
28
29 #include <stap-probe.h>
30
31 int
__pthread_mutex_clocklock_common(pthread_mutex_t * mutex,clockid_t clockid,const struct __timespec64 * abstime)32 __pthread_mutex_clocklock_common (pthread_mutex_t *mutex,
33 clockid_t clockid,
34 const struct __timespec64 *abstime)
35 {
36 int oldval;
37 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
38 int result = 0;
39
40 /* We must not check ABSTIME here. If the thread does not block
41 abstime must not be checked for a valid value. */
42
43 /* See concurrency notes regarding mutex type which is loaded from __kind
44 in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
45 switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex),
46 PTHREAD_MUTEX_TIMED_NP))
47 {
48 /* Recursive mutex. */
49 case PTHREAD_MUTEX_RECURSIVE_NP|PTHREAD_MUTEX_ELISION_NP:
50 case PTHREAD_MUTEX_RECURSIVE_NP:
51 /* Check whether we already hold the mutex. */
52 if (mutex->__data.__owner == id)
53 {
54 /* Just bump the counter. */
55 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
56 /* Overflow of the counter. */
57 return EAGAIN;
58
59 ++mutex->__data.__count;
60
61 goto out;
62 }
63
64 /* We have to get the mutex. */
65 result = __futex_clocklock64 (&mutex->__data.__lock, clockid, abstime,
66 PTHREAD_MUTEX_PSHARED (mutex));
67
68 if (result != 0)
69 goto out;
70
71 /* Only locked once so far. */
72 mutex->__data.__count = 1;
73 break;
74
75 /* Error checking mutex. */
76 case PTHREAD_MUTEX_ERRORCHECK_NP:
77 /* Check whether we already hold the mutex. */
78 if (__glibc_unlikely (mutex->__data.__owner == id))
79 return EDEADLK;
80
81 /* Don't do lock elision on an error checking mutex. */
82 goto simple;
83
84 case PTHREAD_MUTEX_TIMED_NP:
85 FORCE_ELISION (mutex, goto elision);
86 simple:
87 /* Normal mutex. */
88 result = __futex_clocklock64 (&mutex->__data.__lock, clockid, abstime,
89 PTHREAD_MUTEX_PSHARED (mutex));
90 break;
91
92 case PTHREAD_MUTEX_TIMED_ELISION_NP:
93 elision: __attribute__((unused))
94 /* Don't record ownership */
95 return lll_clocklock_elision (mutex->__data.__lock,
96 mutex->__data.__spins,
97 clockid, abstime,
98 PTHREAD_MUTEX_PSHARED (mutex));
99
100
101 case PTHREAD_MUTEX_ADAPTIVE_NP:
102 if (lll_trylock (mutex->__data.__lock) != 0)
103 {
104 int cnt = 0;
105 int max_cnt = MIN (max_adaptive_count (),
106 mutex->__data.__spins * 2 + 10);
107 do
108 {
109 if (cnt++ >= max_cnt)
110 {
111 result = __futex_clocklock64 (&mutex->__data.__lock,
112 clockid, abstime,
113 PTHREAD_MUTEX_PSHARED (mutex));
114 break;
115 }
116 atomic_spin_nop ();
117 }
118 while (lll_trylock (mutex->__data.__lock) != 0);
119
120 mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
121 }
122 break;
123
124 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
125 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
126 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
127 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
128 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
129 &mutex->__data.__list.__next);
130 /* We need to set op_pending before starting the operation. Also
131 see comments at ENQUEUE_MUTEX. */
132 __asm ("" ::: "memory");
133
134 oldval = mutex->__data.__lock;
135 /* This is set to FUTEX_WAITERS iff we might have shared the
136 FUTEX_WAITERS flag with other threads, and therefore need to keep it
137 set to avoid lost wake-ups. We have the same requirement in the
138 simple mutex algorithm. */
139 unsigned int assume_other_futex_waiters = 0;
140 while (1)
141 {
142 /* Try to acquire the lock through a CAS from 0 (not acquired) to
143 our TID | assume_other_futex_waiters. */
144 if (__glibc_likely (oldval == 0))
145 {
146 oldval
147 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
148 id | assume_other_futex_waiters, 0);
149 if (__glibc_likely (oldval == 0))
150 break;
151 }
152
153 if ((oldval & FUTEX_OWNER_DIED) != 0)
154 {
155 /* The previous owner died. Try locking the mutex. */
156 int newval = id | (oldval & FUTEX_WAITERS)
157 | assume_other_futex_waiters;
158
159 newval
160 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
161 newval, oldval);
162 if (newval != oldval)
163 {
164 oldval = newval;
165 continue;
166 }
167
168 /* We got the mutex. */
169 mutex->__data.__count = 1;
170 /* But it is inconsistent unless marked otherwise. */
171 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
172
173 /* We must not enqueue the mutex before we have acquired it.
174 Also see comments at ENQUEUE_MUTEX. */
175 __asm ("" ::: "memory");
176 ENQUEUE_MUTEX (mutex);
177 /* We need to clear op_pending after we enqueue the mutex. */
178 __asm ("" ::: "memory");
179 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
180
181 /* Note that we deliberately exit here. If we fall
182 through to the end of the function __nusers would be
183 incremented which is not correct because the old
184 owner has to be discounted. */
185 return EOWNERDEAD;
186 }
187
188 /* Check whether we already hold the mutex. */
189 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
190 {
191 int kind = PTHREAD_MUTEX_TYPE (mutex);
192 if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
193 {
194 /* We do not need to ensure ordering wrt another memory
195 access. Also see comments at ENQUEUE_MUTEX. */
196 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
197 NULL);
198 return EDEADLK;
199 }
200
201 if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
202 {
203 /* We do not need to ensure ordering wrt another memory
204 access. */
205 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
206 NULL);
207
208 /* Just bump the counter. */
209 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
210 /* Overflow of the counter. */
211 return EAGAIN;
212
213 ++mutex->__data.__count;
214
215 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
216
217 return 0;
218 }
219 }
220
221 /* We are about to block; check whether the timeout is invalid. */
222 if (! valid_nanoseconds (abstime->tv_nsec))
223 return EINVAL;
224 /* Work around the fact that the kernel rejects negative timeout
225 values despite them being valid. */
226 if (__glibc_unlikely (abstime->tv_sec < 0))
227 return ETIMEDOUT;
228
229 /* We cannot acquire the mutex nor has its owner died. Thus, try
230 to block using futexes. Set FUTEX_WAITERS if necessary so that
231 other threads are aware that there are potentially threads
232 blocked on the futex. Restart if oldval changed in the
233 meantime. */
234 if ((oldval & FUTEX_WAITERS) == 0)
235 {
236 int val = atomic_compare_and_exchange_val_acq
237 (&mutex->__data.__lock, oldval | FUTEX_WAITERS, oldval);
238 if (val != oldval)
239 {
240 oldval = val;
241 continue;
242 }
243 oldval |= FUTEX_WAITERS;
244 }
245
246 /* It is now possible that we share the FUTEX_WAITERS flag with
247 another thread; therefore, update assume_other_futex_waiters so
248 that we do not forget about this when handling other cases
249 above and thus do not cause lost wake-ups. */
250 assume_other_futex_waiters |= FUTEX_WAITERS;
251
252 /* Block using the futex. */
253 int err = __futex_abstimed_wait64 (
254 (unsigned int *) &mutex->__data.__lock,
255 oldval, clockid, abstime,
256 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
257 /* The futex call timed out. */
258 if (err == ETIMEDOUT || err == EOVERFLOW)
259 return err;
260 /* Reload current lock value. */
261 oldval = mutex->__data.__lock;
262 }
263
264 /* We have acquired the mutex; check if it is still consistent. */
265 if (__builtin_expect (mutex->__data.__owner
266 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
267 {
268 /* This mutex is now not recoverable. */
269 mutex->__data.__count = 0;
270 int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex);
271 lll_unlock (mutex->__data.__lock, private);
272 /* FIXME This violates the mutex destruction requirements. See
273 __pthread_mutex_unlock_full. */
274 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
275 return ENOTRECOVERABLE;
276 }
277
278 mutex->__data.__count = 1;
279 /* We must not enqueue the mutex before we have acquired it.
280 Also see comments at ENQUEUE_MUTEX. */
281 __asm ("" ::: "memory");
282 ENQUEUE_MUTEX (mutex);
283 /* We need to clear op_pending after we enqueue the mutex. */
284 __asm ("" ::: "memory");
285 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
286 break;
287
288 /* The PI support requires the Linux futex system call. If that's not
289 available, pthread_mutex_init should never have allowed the type to
290 be set. So it will get the default case for an invalid type. */
291 #ifdef __NR_futex
292 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
293 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
294 case PTHREAD_MUTEX_PI_NORMAL_NP:
295 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
296 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
297 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
298 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
299 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
300 {
301 int kind, robust;
302 {
303 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
304 in sysdeps/nptl/bits/thread-shared-types.h. */
305 int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind));
306 kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP;
307 robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
308 }
309
310 if (robust)
311 {
312 /* Note: robust PI futexes are signaled by setting bit 0. */
313 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
314 (void *) (((uintptr_t) &mutex->__data.__list.__next)
315 | 1));
316 /* We need to set op_pending before starting the operation. Also
317 see comments at ENQUEUE_MUTEX. */
318 __asm ("" ::: "memory");
319 }
320
321 oldval = mutex->__data.__lock;
322
323 /* Check whether we already hold the mutex. */
324 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
325 {
326 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
327 {
328 /* We do not need to ensure ordering wrt another memory
329 access. */
330 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
331 return EDEADLK;
332 }
333
334 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
335 {
336 /* We do not need to ensure ordering wrt another memory
337 access. */
338 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
339
340 /* Just bump the counter. */
341 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
342 /* Overflow of the counter. */
343 return EAGAIN;
344
345 ++mutex->__data.__count;
346
347 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
348
349 return 0;
350 }
351 }
352
353 oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
354 id, 0);
355
356 if (oldval != 0)
357 {
358 /* The mutex is locked. The kernel will now take care of
359 everything. The timeout value must be a relative value.
360 Convert it. */
361 int private = (robust
362 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
363 : PTHREAD_MUTEX_PSHARED (mutex));
364 int e = __futex_lock_pi64 (&mutex->__data.__lock, clockid, abstime,
365 private);
366 if (e == ETIMEDOUT)
367 return ETIMEDOUT;
368 else if (e == ESRCH || e == EDEADLK)
369 {
370 assert (e != EDEADLK
371 || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
372 && kind != PTHREAD_MUTEX_RECURSIVE_NP));
373 /* ESRCH can happen only for non-robust PI mutexes where
374 the owner of the lock died. */
375 assert (e != ESRCH || !robust);
376
377 /* Delay the thread until the timeout is reached. Then return
378 ETIMEDOUT. */
379 do
380 e = __futex_abstimed_wait64 (&(unsigned int){0}, 0, clockid,
381 abstime, private);
382 while (e != ETIMEDOUT);
383 return ETIMEDOUT;
384 }
385 else if (e != 0)
386 return e;
387
388 oldval = mutex->__data.__lock;
389
390 assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
391 }
392
393 if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
394 {
395 atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
396
397 /* We got the mutex. */
398 mutex->__data.__count = 1;
399 /* But it is inconsistent unless marked otherwise. */
400 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
401
402 /* We must not enqueue the mutex before we have acquired it.
403 Also see comments at ENQUEUE_MUTEX. */
404 __asm ("" ::: "memory");
405 ENQUEUE_MUTEX_PI (mutex);
406 /* We need to clear op_pending after we enqueue the mutex. */
407 __asm ("" ::: "memory");
408 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
409
410 /* Note that we deliberately exit here. If we fall
411 through to the end of the function __nusers would be
412 incremented which is not correct because the old owner
413 has to be discounted. */
414 return EOWNERDEAD;
415 }
416
417 if (robust
418 && __builtin_expect (mutex->__data.__owner
419 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
420 {
421 /* This mutex is now not recoverable. */
422 mutex->__data.__count = 0;
423
424 futex_unlock_pi ((unsigned int *) &mutex->__data.__lock,
425 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
426
427 /* To the kernel, this will be visible after the kernel has
428 acquired the mutex in the syscall. */
429 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
430 return ENOTRECOVERABLE;
431 }
432
433 mutex->__data.__count = 1;
434 if (robust)
435 {
436 /* We must not enqueue the mutex before we have acquired it.
437 Also see comments at ENQUEUE_MUTEX. */
438 __asm ("" ::: "memory");
439 ENQUEUE_MUTEX_PI (mutex);
440 /* We need to clear op_pending after we enqueue the mutex. */
441 __asm ("" ::: "memory");
442 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
443 }
444 }
445 break;
446 #endif /* __NR_futex. */
447
448 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
449 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
450 case PTHREAD_MUTEX_PP_NORMAL_NP:
451 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
452 {
453 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
454 in sysdeps/nptl/bits/thread-shared-types.h. */
455 int kind = atomic_load_relaxed (&(mutex->__data.__kind))
456 & PTHREAD_MUTEX_KIND_MASK_NP;
457
458 oldval = mutex->__data.__lock;
459
460 /* Check whether we already hold the mutex. */
461 if (mutex->__data.__owner == id)
462 {
463 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
464 return EDEADLK;
465
466 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
467 {
468 /* Just bump the counter. */
469 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
470 /* Overflow of the counter. */
471 return EAGAIN;
472
473 ++mutex->__data.__count;
474
475 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
476
477 return 0;
478 }
479 }
480
481 int oldprio = -1, ceilval;
482 do
483 {
484 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
485 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
486
487 if (__pthread_current_priority () > ceiling)
488 {
489 result = EINVAL;
490 failpp:
491 if (oldprio != -1)
492 __pthread_tpp_change_priority (oldprio, -1);
493 return result;
494 }
495
496 result = __pthread_tpp_change_priority (oldprio, ceiling);
497 if (result)
498 return result;
499
500 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
501 oldprio = ceiling;
502
503 oldval
504 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
505 ceilval | 1, ceilval);
506
507 if (oldval == ceilval)
508 break;
509
510 do
511 {
512 oldval
513 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
514 ceilval | 2,
515 ceilval | 1);
516
517 if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
518 break;
519
520 if (oldval != ceilval)
521 {
522 /* Reject invalid timeouts. */
523 if (! valid_nanoseconds (abstime->tv_nsec))
524 {
525 result = EINVAL;
526 goto failpp;
527 }
528
529 int e = __futex_abstimed_wait64 (
530 (unsigned int *) &mutex->__data.__lock, ceilval | 2,
531 clockid, abstime, PTHREAD_MUTEX_PSHARED (mutex));
532 if (e == ETIMEDOUT || e == EOVERFLOW)
533 return e;
534 }
535 }
536 while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
537 ceilval | 2, ceilval)
538 != ceilval);
539 }
540 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
541
542 assert (mutex->__data.__owner == 0);
543 mutex->__data.__count = 1;
544 }
545 break;
546
547 default:
548 /* Correct code cannot set any other type. */
549 return EINVAL;
550 }
551
552 if (result == 0)
553 {
554 /* Record the ownership. */
555 mutex->__data.__owner = id;
556 ++mutex->__data.__nusers;
557
558 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
559 }
560
561 out:
562 return result;
563 }
564
565 int
___pthread_mutex_clocklock64(pthread_mutex_t * mutex,clockid_t clockid,const struct __timespec64 * abstime)566 ___pthread_mutex_clocklock64 (pthread_mutex_t *mutex,
567 clockid_t clockid,
568 const struct __timespec64 *abstime)
569 {
570 if (__glibc_unlikely (!futex_abstimed_supported_clockid (clockid)))
571 return EINVAL;
572
573 LIBC_PROBE (mutex_clocklock_entry, 3, mutex, clockid, abstime);
574 return __pthread_mutex_clocklock_common (mutex, clockid, abstime);
575 }
576
577 #if __TIMESIZE == 64
578 strong_alias (___pthread_mutex_clocklock64, ___pthread_mutex_clocklock)
579 #else /* __TIMESPEC64 != 64 */
580 strong_alias (___pthread_mutex_clocklock64, __pthread_mutex_clocklock64)
581 libc_hidden_def (__pthread_mutex_clocklock64)
582
583 int
584 ___pthread_mutex_clocklock (pthread_mutex_t *mutex,
585 clockid_t clockid,
586 const struct timespec *abstime)
587 {
588 struct __timespec64 ts64 = valid_timespec_to_timespec64 (*abstime);
589
590 return ___pthread_mutex_clocklock64 (mutex, clockid, &ts64);
591 }
592 #endif /* __TIMESPEC64 != 64 */
593 libc_hidden_ver (___pthread_mutex_clocklock, __pthread_mutex_clocklock)
594 #ifndef SHARED
595 strong_alias (___pthread_mutex_clocklock, __pthread_mutex_clocklock)
596 #endif
597 versioned_symbol (libc, ___pthread_mutex_clocklock,
598 pthread_mutex_clocklock, GLIBC_2_34);
599 #if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_30, GLIBC_2_34)
600 compat_symbol (libpthread, ___pthread_mutex_clocklock,
601 pthread_mutex_clocklock, GLIBC_2_30);
602 #endif
603
604 int
___pthread_mutex_timedlock64(pthread_mutex_t * mutex,const struct __timespec64 * abstime)605 ___pthread_mutex_timedlock64 (pthread_mutex_t *mutex,
606 const struct __timespec64 *abstime)
607 {
608 LIBC_PROBE (mutex_timedlock_entry, 2, mutex, abstime);
609 return __pthread_mutex_clocklock_common (mutex, CLOCK_REALTIME, abstime);
610 }
611
612 #if __TIMESIZE == 64
613 strong_alias (___pthread_mutex_timedlock64, ___pthread_mutex_timedlock)
614 #else /* __TIMESPEC64 != 64 */
615 strong_alias (___pthread_mutex_timedlock64, __pthread_mutex_timedlock64);
616 libc_hidden_def (__pthread_mutex_timedlock64)
617
618 int
619 ___pthread_mutex_timedlock (pthread_mutex_t *mutex,
620 const struct timespec *abstime)
621 {
622 struct __timespec64 ts64 = valid_timespec_to_timespec64 (*abstime);
623
624 return __pthread_mutex_timedlock64 (mutex, &ts64);
625 }
626 #endif /* __TIMESPEC64 != 64 */
627 versioned_symbol (libc, ___pthread_mutex_timedlock,
628 pthread_mutex_timedlock, GLIBC_2_34);
629 libc_hidden_ver (___pthread_mutex_timedlock, __pthread_mutex_timedlock)
630 #ifndef SHARED
631 strong_alias (___pthread_mutex_timedlock, __pthread_mutex_timedlock)
632 #endif
633
634 #if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_2, GLIBC_2_34)
635 compat_symbol (libpthread, ___pthread_mutex_timedlock,
636 pthread_mutex_timedlock, GLIBC_2_2);
637 #endif
638