1 /* Low-level lock implementation.  High-level Hurd helpers.
2    Copyright (C) 1999-2022 Free Software Foundation, Inc.
3    This file is part of the GNU C Library.
4 
5    The GNU C Library is free software; you can redistribute it and/or
6    modify it under the terms of the GNU Lesser General Public
7    License as published by the Free Software Foundation; either
8    version 2.1 of the License, or (at your option) any later version.
9 
10    The GNU C Library is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    Lesser General Public License for more details.
14 
15    You should have received a copy of the GNU Lesser General Public
16    License along with the GNU C Library; if not, see
17    <https://www.gnu.org/licenses/>.  */
18 
19 #ifndef _HURD_LOCK_H
20 #define _HURD_LOCK_H   1
21 
22 #include <mach/lowlevellock.h>
23 
24 struct timespec;
25 
26 /* Flags for robust locks.  */
27 #define LLL_WAITERS      (1U << 31)
28 #define LLL_DEAD_OWNER   (1U << 30)
29 
30 #define LLL_OWNER_MASK   ~(LLL_WAITERS | LLL_DEAD_OWNER)
31 
32 /* Wait on 64-bit address PTR, without blocking if its contents
33    are different from the pair <LO, HI>.  */
34 #define __lll_xwait(ptr, lo, hi, flags) \
35   __gsync_wait (__mach_task_self (), \
36     (vm_offset_t)ptr, lo, hi, 0, flags | GSYNC_QUAD)
37 
38 /* Same as '__lll_wait', but only block for MLSEC milliseconds.  */
39 #define __lll_timed_wait(ptr, val, mlsec, flags) \
40   __gsync_wait (__mach_task_self (), \
41     (vm_offset_t)ptr, val, 0, mlsec, flags | GSYNC_TIMED)
42 
43 /* Interruptible version.  */
44 #define __lll_timed_wait_intr(ptr, val, mlsec, flags) \
45   __gsync_wait_intr (__mach_task_self (), \
46     (vm_offset_t)ptr, val, 0, mlsec, flags | GSYNC_TIMED)
47 
48 /* Same as '__lll_xwait', but only block for MLSEC milliseconds.  */
49 #define __lll_timed_xwait(ptr, lo, hi, mlsec, flags) \
50   __gsync_wait (__mach_task_self (), (vm_offset_t)ptr, \
51     lo, hi, mlsec, flags | GSYNC_TIMED | GSYNC_QUAD)
52 
53 /* Same as '__lll_wait', but only block until TSP elapses,
54    using clock CLK.  */
55 extern int __lll_abstimed_wait (void *__ptr, int __val,
56   const struct timespec *__tsp, int __flags, int __clk);
57 
58 /* Interruptible version.  */
59 extern int __lll_abstimed_wait_intr (void *__ptr, int __val,
60   const struct timespec *__tsp, int __flags, int __clk);
61 
62 /* Same as 'lll_xwait', but only block until TSP elapses,
63    using clock CLK.  */
64 extern int __lll_abstimed_xwait (void *__ptr, int __lo, int __hi,
65   const struct timespec *__tsp, int __flags, int __clk);
66 
67 /* Same as 'lll_lock', but return with an error if TSP elapses,
68    using clock CLK.  */
69 extern int __lll_abstimed_lock (void *__ptr,
70   const struct timespec *__tsp, int __flags, int __clk);
71 
72 /* Acquire the lock at PTR, but return with an error if
73    the process containing the owner thread dies.  */
74 extern int __lll_robust_lock (void *__ptr, int __flags);
75 #define lll_robust_lock(var, flags) \
76   __lll_robust_lock (&(var), flags)
77 
78 /* Same as '__lll_robust_lock', but only block until TSP
79    elapses, using clock CLK.  */
80 extern int __lll_robust_abstimed_lock (void *__ptr,
81   const struct timespec *__tsp, int __flags, int __clk);
82 
83 /* Same as '__lll_robust_lock', but return with an error
84    if the lock cannot be acquired without blocking.  */
85 extern int __lll_robust_trylock (void *__ptr);
86 #define lll_robust_trylock(var) \
87   __lll_robust_trylock (&(var))
88 
89 /* Wake one or more threads waiting on address PTR,
90    setting its value to VAL before doing so.  */
91 #define __lll_set_wake(ptr, val, flags) \
92   __gsync_wake (__mach_task_self (), \
93     (vm_offset_t)ptr, val, flags | GSYNC_MUTATE)
94 
95 /* Release the robust lock at PTR.  */
96 extern void __lll_robust_unlock (void *__ptr, int __flags);
97 #define lll_robust_unlock(var, flags) \
98   __lll_robust_unlock (&(var), flags)
99 
100 /* Rearrange threads waiting on address SRC to instead wait on
101    DST, waking one of them if WAIT_ONE is non-zero.  */
102 #define __lll_requeue(src, dst, wake_one, flags) \
103   __gsync_requeue (__mach_task_self (), (vm_offset_t)src, \
104     (vm_offset_t)dst, (boolean_t)wake_one, flags)
105 
106 /* The following are hacks that allow us to simulate optional
107    parameters in C, to avoid having to pass the clock id for
108    every one of these calls, defaulting to CLOCK_REALTIME if
109    no argument is passed.  */
110 
111 #define lll_abstimed_wait(var, val, tsp, flags, ...)   \
112   ({   \
113      const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ };   \
114      __lll_abstimed_wait (&(var), (val), (tsp), (flags),   \
115        __clk[sizeof (__clk) / sizeof (__clk[0]) - 1]);   \
116    })
117 
118 #define lll_abstimed_wait_intr(var, val, tsp, flags, ...)   \
119   ({   \
120      const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ };   \
121      __lll_abstimed_wait_intr (&(var), (val), (tsp), (flags),   \
122        __clk[sizeof (__clk) / sizeof (__clk[0]) - 1]);   \
123    })
124 
125 #define lll_abstimed_xwait(var, lo, hi, tsp, flags, ...)   \
126   ({   \
127      const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ };   \
128      __lll_abstimed_xwait (&(var), (lo), (hi), (tsp), (flags),   \
129        __clk[sizeof (__clk) / sizeof (__clk[0]) - 1]);   \
130    })
131 
132 #define lll_abstimed_lock(var, tsp, flags, ...)   \
133   ({   \
134      const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ };   \
135      __lll_abstimed_lock (&(var), (tsp), (flags),   \
136        __clk[sizeof (__clk) / sizeof (__clk[0]) - 1]);   \
137    })
138 
139 #define lll_robust_abstimed_lock(var, tsp, flags, ...)   \
140   ({   \
141      const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ };   \
142      __lll_robust_abstimed_lock (&(var), (tsp), (flags),   \
143        __clk[sizeof (__clk) / sizeof (__clk[0]) - 1]);   \
144    })
145 
146 
147 #endif
148