1 /* Hurd helpers for lowlevellocks.
2    Copyright (C) 1999-2022 Free Software Foundation, Inc.
3    This file is part of the GNU C Library.
4 
5    The GNU C Library is free software; you can redistribute it and/or
6    modify it under the terms of the GNU Lesser General Public
7    License as published by the Free Software Foundation; either
8    version 2.1 of the License, or (at your option) any later version.
9 
10    The GNU C Library is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    Lesser General Public License for more details.
14 
15    You should have received a copy of the GNU Lesser General Public
16    License along with the GNU C Library; if not, see
17    <https://www.gnu.org/licenses/>.  */
18 
19 #include "hurdlock.h"
20 #include <hurd.h>
21 #include <hurd/hurd.h>
22 #include <time.h>
23 #include <errno.h>
24 #include <unistd.h>
25 
26 /* Convert an absolute timeout in nanoseconds to a relative
27    timeout in milliseconds.  */
28 static inline int __attribute__ ((gnu_inline))
compute_reltime(const struct timespec * abstime,clockid_t clk)29 compute_reltime (const struct timespec *abstime, clockid_t clk)
30 {
31   struct timespec ts;
32   __clock_gettime (clk, &ts);
33 
34   ts.tv_sec = abstime->tv_sec - ts.tv_sec;
35   ts.tv_nsec = abstime->tv_nsec - ts.tv_nsec;
36 
37   if (ts.tv_nsec < 0)
38     {
39       --ts.tv_sec;
40       ts.tv_nsec += 1000000000;
41     }
42 
43   return ts.tv_sec < 0 ? -1 : (int)(ts.tv_sec * 1000 + ts.tv_nsec / 1000000);
44 }
45 
46 int
__lll_abstimed_wait(void * ptr,int val,const struct timespec * tsp,int flags,int clk)47 __lll_abstimed_wait (void *ptr, int val,
48   const struct timespec *tsp, int flags, int clk)
49 {
50   if (clk != CLOCK_REALTIME)
51     return EINVAL;
52 
53   int mlsec = compute_reltime (tsp, clk);
54   return mlsec < 0 ? KERN_TIMEDOUT : __lll_timed_wait (ptr, val, mlsec, flags);
55 }
56 
57 int
__lll_abstimed_wait_intr(void * ptr,int val,const struct timespec * tsp,int flags,int clk)58 __lll_abstimed_wait_intr (void *ptr, int val,
59   const struct timespec *tsp, int flags, int clk)
60 {
61   if (clk != CLOCK_REALTIME)
62     return EINVAL;
63 
64   int mlsec = compute_reltime (tsp, clk);
65   return mlsec < 0 ? KERN_TIMEDOUT : __lll_timed_wait_intr (ptr, val, mlsec, flags);
66 }
67 
68 int
__lll_abstimed_xwait(void * ptr,int lo,int hi,const struct timespec * tsp,int flags,int clk)69 __lll_abstimed_xwait (void *ptr, int lo, int hi,
70   const struct timespec *tsp, int flags, int clk)
71 {
72   if (clk != CLOCK_REALTIME)
73     return EINVAL;
74 
75   int mlsec = compute_reltime (tsp, clk);
76   return mlsec < 0 ? KERN_TIMEDOUT : __lll_timed_xwait (ptr, lo, hi, mlsec,
77 	                                              flags);
78 }
79 
80 int
__lll_abstimed_lock(void * ptr,const struct timespec * tsp,int flags,int clk)81 __lll_abstimed_lock (void *ptr,
82   const struct timespec *tsp, int flags, int clk)
83 {
84   if (clk != CLOCK_REALTIME)
85     return EINVAL;
86 
87   if (__lll_trylock (ptr) == 0)
88     return 0;
89 
90   while (1)
91     {
92       if (atomic_exchange_acq ((int *)ptr, 2) == 0)
93         return 0;
94       else if (! valid_nanoseconds (tsp->tv_nsec))
95         return EINVAL;
96 
97       int mlsec = compute_reltime (tsp, clk);
98       if (mlsec < 0 || __lll_timed_wait (ptr, 2, mlsec, flags) == KERN_TIMEDOUT)
99         return ETIMEDOUT;
100     }
101 }
102 
103 /* Robust locks.  */
104 
105 /* Test if a given process id is still valid.  */
106 static inline int
valid_pid(int pid)107 valid_pid (int pid)
108 {
109   task_t task = __pid2task (pid);
110   if (task == MACH_PORT_NULL)
111     return 0;
112 
113   __mach_port_deallocate (__mach_task_self (), task);
114   return 1;
115 }
116 
117 /* Robust locks have currently no support from the kernel; they
118    are simply implemented with periodic polling. When sleeping, the
119    maximum blocking time is determined by this constant.  */
120 #define MAX_WAIT_TIME   1500
121 
122 int
__lll_robust_lock(void * ptr,int flags)123 __lll_robust_lock (void *ptr, int flags)
124 {
125   int *iptr = (int *)ptr;
126   int id = __getpid ();
127   int wait_time = 25;
128   unsigned int val;
129 
130   /* Try to set the lock word to our PID if it's clear. Otherwise,
131      mark it as having waiters.  */
132   while (1)
133     {
134       val = *iptr;
135       if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
136         return 0;
137       else if (atomic_compare_and_exchange_bool_acq (iptr,
138                val | LLL_WAITERS, val) == 0)
139         break;
140     }
141 
142   for (id |= LLL_WAITERS ; ; )
143     {
144       val = *iptr;
145       if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
146         return 0;
147       else if (val && !valid_pid (val & LLL_OWNER_MASK))
148         {
149           if (atomic_compare_and_exchange_bool_acq (iptr, id, val) == 0)
150             return EOWNERDEAD;
151         }
152       else
153         {
154           __lll_timed_wait (iptr, val, wait_time, flags);
155           if (wait_time < MAX_WAIT_TIME)
156             wait_time <<= 1;
157         }
158     }
159 }
160 
161 int
__lll_robust_abstimed_lock(void * ptr,const struct timespec * tsp,int flags,int clk)162 __lll_robust_abstimed_lock (void *ptr,
163   const struct timespec *tsp, int flags, int clk)
164 {
165   int *iptr = (int *)ptr;
166   int id = __getpid ();
167   int wait_time = 25;
168   unsigned int val;
169 
170   if (clk != CLOCK_REALTIME)
171     return EINVAL;
172 
173   while (1)
174     {
175       val = *iptr;
176       if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
177         return 0;
178       else if (atomic_compare_and_exchange_bool_acq (iptr,
179           val | LLL_WAITERS, val) == 0)
180         break;
181     }
182 
183   for (id |= LLL_WAITERS ; ; )
184     {
185       val = *iptr;
186       if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
187         return 0;
188       else if (val && !valid_pid (val & LLL_OWNER_MASK))
189         {
190           if (atomic_compare_and_exchange_bool_acq (iptr, id, val) == 0)
191             return EOWNERDEAD;
192         }
193       else
194         {
195           int mlsec = compute_reltime (tsp, clk);
196           if (mlsec < 0)
197             return ETIMEDOUT;
198           else if (mlsec > wait_time)
199             mlsec = wait_time;
200 
201           int res = __lll_timed_wait (iptr, val, mlsec, flags);
202           if (res == KERN_TIMEDOUT)
203             return ETIMEDOUT;
204           else if (wait_time < MAX_WAIT_TIME)
205             wait_time <<= 1;
206         }
207     }
208 }
209 
210 int
__lll_robust_trylock(void * ptr)211 __lll_robust_trylock (void *ptr)
212 {
213   int *iptr = (int *)ptr;
214   int id = __getpid ();
215   unsigned int val = *iptr;
216 
217   if (!val)
218     {
219       if (atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
220         return 0;
221     }
222   else if (!valid_pid (val & LLL_OWNER_MASK)
223            && atomic_compare_and_exchange_bool_acq (iptr, id, val) == 0)
224     return EOWNERDEAD;
225 
226   return EBUSY;
227 }
228 
229 void
__lll_robust_unlock(void * ptr,int flags)230 __lll_robust_unlock (void *ptr, int flags)
231 {
232   unsigned int val = atomic_load_relaxed ((unsigned int *)ptr);
233   while (1)
234     {
235       if (val & LLL_WAITERS)
236         {
237           __lll_set_wake (ptr, 0, flags);
238           break;
239         }
240       else if (atomic_compare_exchange_weak_release ((unsigned int *)ptr, &val, 0))
241         break;
242     }
243 }
244