1 /* Internal macros for atomic operations for GNU C Library.
2    Copyright (C) 2002-2022 Free Software Foundation, Inc.
3    This file is part of the GNU C Library.
4 
5    The GNU C Library is free software; you can redistribute it and/or
6    modify it under the terms of the GNU Lesser General Public
7    License as published by the Free Software Foundation; either
8    version 2.1 of the License, or (at your option) any later version.
9 
10    The GNU C Library is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    Lesser General Public License for more details.
14 
15    You should have received a copy of the GNU Lesser General Public
16    License along with the GNU C Library; if not, see
17    <https://www.gnu.org/licenses/>.  */
18 
19 #ifndef _ATOMIC_H
20 #define _ATOMIC_H	1
21 
22 /* This header defines three types of macros:
23 
24    - atomic arithmetic and logic operation on memory.  They all
25      have the prefix "atomic_".
26 
27    - conditionally atomic operations of the same kinds.  These
28      always behave identical but can be faster when atomicity
29      is not really needed since only one thread has access to
30      the memory location.  In that case the code is slower in
31      the multi-thread case.  The interfaces have the prefix
32      "catomic_".
33 
34    - support functions like barriers.  They also have the prefix
35      "atomic_".
36 
37    Architectures must provide a few lowlevel macros (the compare
38    and exchange definitions).  All others are optional.  They
39    should only be provided if the architecture has specific
40    support for the operation.
41 
42    As <atomic.h> macros are usually heavily nested and often use local
43    variables to make sure side-effects are evaluated properly, use for
44    macro local variables a per-macro unique prefix.  This file uses
45    __atgN_ prefix where N is different in each macro.  */
46 
47 #include <stdlib.h>
48 
49 #include <atomic-machine.h>
50 
51 /* Wrapper macros to call pre_NN_post (mem, ...) where NN is the
52    bit width of *MEM.  The calling macro puts parens around MEM
53    and following args.  */
54 #define __atomic_val_bysize(pre, post, mem, ...)			      \
55   ({									      \
56     __typeof ((__typeof (*(mem))) *(mem)) __atg1_result;		      \
57     if (sizeof (*mem) == 1)						      \
58       __atg1_result = pre##_8_##post (mem, __VA_ARGS__);		      \
59     else if (sizeof (*mem) == 2)					      \
60       __atg1_result = pre##_16_##post (mem, __VA_ARGS__);		      \
61     else if (sizeof (*mem) == 4)					      \
62       __atg1_result = pre##_32_##post (mem, __VA_ARGS__);		      \
63     else if (sizeof (*mem) == 8)					      \
64       __atg1_result = pre##_64_##post (mem, __VA_ARGS__);		      \
65     else								      \
66       abort ();								      \
67     __atg1_result;							      \
68   })
69 #define __atomic_bool_bysize(pre, post, mem, ...)			      \
70   ({									      \
71     int __atg2_result;							      \
72     if (sizeof (*mem) == 1)						      \
73       __atg2_result = pre##_8_##post (mem, __VA_ARGS__);		      \
74     else if (sizeof (*mem) == 2)					      \
75       __atg2_result = pre##_16_##post (mem, __VA_ARGS__);		      \
76     else if (sizeof (*mem) == 4)					      \
77       __atg2_result = pre##_32_##post (mem, __VA_ARGS__);		      \
78     else if (sizeof (*mem) == 8)					      \
79       __atg2_result = pre##_64_##post (mem, __VA_ARGS__);		      \
80     else								      \
81       abort ();								      \
82     __atg2_result;							      \
83   })
84 
85 
86 /* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL.
87    Return the old *MEM value.  */
88 #if !defined atomic_compare_and_exchange_val_acq \
89     && defined __arch_compare_and_exchange_val_32_acq
90 # define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
91   __atomic_val_bysize (__arch_compare_and_exchange_val,acq,		      \
92 		       mem, newval, oldval)
93 #endif
94 
95 
96 #ifndef catomic_compare_and_exchange_val_acq
97 # ifdef __arch_c_compare_and_exchange_val_32_acq
98 #  define catomic_compare_and_exchange_val_acq(mem, newval, oldval) \
99   __atomic_val_bysize (__arch_c_compare_and_exchange_val,acq,		      \
100 		       mem, newval, oldval)
101 # else
102 #  define catomic_compare_and_exchange_val_acq(mem, newval, oldval) \
103   atomic_compare_and_exchange_val_acq (mem, newval, oldval)
104 # endif
105 #endif
106 
107 
108 #ifndef catomic_compare_and_exchange_val_rel
109 # ifndef atomic_compare_and_exchange_val_rel
110 #  define catomic_compare_and_exchange_val_rel(mem, newval, oldval)	      \
111   catomic_compare_and_exchange_val_acq (mem, newval, oldval)
112 # else
113 #  define catomic_compare_and_exchange_val_rel(mem, newval, oldval)	      \
114   atomic_compare_and_exchange_val_rel (mem, newval, oldval)
115 # endif
116 #endif
117 
118 
119 #ifndef atomic_compare_and_exchange_val_rel
120 # define atomic_compare_and_exchange_val_rel(mem, newval, oldval)	      \
121   atomic_compare_and_exchange_val_acq (mem, newval, oldval)
122 #endif
123 
124 
125 /* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL.
126    Return zero if *MEM was changed or non-zero if no exchange happened.  */
127 #ifndef atomic_compare_and_exchange_bool_acq
128 # ifdef __arch_compare_and_exchange_bool_32_acq
129 #  define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
130   __atomic_bool_bysize (__arch_compare_and_exchange_bool,acq,		      \
131 		        mem, newval, oldval)
132 # else
133 #  define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
134   ({ /* Cannot use __oldval here, because macros later in this file might     \
135 	call this macro with __oldval argument.	 */			      \
136      __typeof (oldval) __atg3_old = (oldval);				      \
137      atomic_compare_and_exchange_val_acq (mem, newval, __atg3_old)	      \
138        != __atg3_old;							      \
139   })
140 # endif
141 #endif
142 
143 
144 #ifndef catomic_compare_and_exchange_bool_acq
145 # ifdef __arch_c_compare_and_exchange_bool_32_acq
146 #  define catomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
147   __atomic_bool_bysize (__arch_c_compare_and_exchange_bool,acq,		      \
148 		        mem, newval, oldval)
149 # else
150 #  define catomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
151   ({ /* Cannot use __oldval here, because macros later in this file might     \
152 	call this macro with __oldval argument.	 */			      \
153      __typeof (oldval) __atg4_old = (oldval);				      \
154      catomic_compare_and_exchange_val_acq (mem, newval, __atg4_old)	      \
155        != __atg4_old;							      \
156   })
157 # endif
158 #endif
159 
160 
161 /* Store NEWVALUE in *MEM and return the old value.  */
162 #ifndef atomic_exchange_acq
163 # define atomic_exchange_acq(mem, newvalue) \
164   ({ __typeof ((__typeof (*(mem))) *(mem)) __atg5_oldval;		      \
165      __typeof (mem) __atg5_memp = (mem);				      \
166      __typeof ((__typeof (*(mem))) *(mem)) __atg5_value = (newvalue);	      \
167 									      \
168      do									      \
169        __atg5_oldval = *__atg5_memp;					      \
170      while (__builtin_expect						      \
171 	    (atomic_compare_and_exchange_bool_acq (__atg5_memp, __atg5_value, \
172 						   __atg5_oldval), 0));	      \
173 									      \
174      __atg5_oldval; })
175 #endif
176 
177 #ifndef atomic_exchange_rel
178 # define atomic_exchange_rel(mem, newvalue) atomic_exchange_acq (mem, newvalue)
179 #endif
180 
181 
182 /* Add VALUE to *MEM and return the old value of *MEM.  */
183 #ifndef atomic_exchange_and_add_acq
184 # ifdef atomic_exchange_and_add
185 #  define atomic_exchange_and_add_acq(mem, value) \
186   atomic_exchange_and_add (mem, value)
187 # else
188 #  define atomic_exchange_and_add_acq(mem, value) \
189   ({ __typeof (*(mem)) __atg6_oldval;					      \
190      __typeof (mem) __atg6_memp = (mem);				      \
191      __typeof (*(mem)) __atg6_value = (value);				      \
192 									      \
193      do									      \
194        __atg6_oldval = *__atg6_memp;					      \
195      while (__builtin_expect						      \
196 	    (atomic_compare_and_exchange_bool_acq (__atg6_memp,		      \
197 						   __atg6_oldval	      \
198 						   + __atg6_value,	      \
199 						   __atg6_oldval), 0));	      \
200 									      \
201      __atg6_oldval; })
202 # endif
203 #endif
204 
205 #ifndef atomic_exchange_and_add_rel
206 # define atomic_exchange_and_add_rel(mem, value) \
207   atomic_exchange_and_add_acq(mem, value)
208 #endif
209 
210 #ifndef atomic_exchange_and_add
211 # define atomic_exchange_and_add(mem, value) \
212   atomic_exchange_and_add_acq(mem, value)
213 #endif
214 
215 #ifndef catomic_exchange_and_add
216 # define catomic_exchange_and_add(mem, value) \
217   ({ __typeof (*(mem)) __atg7_oldv;					      \
218      __typeof (mem) __atg7_memp = (mem);				      \
219      __typeof (*(mem)) __atg7_value = (value);				      \
220 									      \
221      do									      \
222        __atg7_oldv = *__atg7_memp;					      \
223      while (__builtin_expect						      \
224 	    (catomic_compare_and_exchange_bool_acq (__atg7_memp,	      \
225 						    __atg7_oldv		      \
226 						    + __atg7_value,	      \
227 						    __atg7_oldv), 0));	      \
228 									      \
229      __atg7_oldv; })
230 #endif
231 
232 
233 #ifndef atomic_max
234 # define atomic_max(mem, value) \
235   do {									      \
236     __typeof (*(mem)) __atg8_oldval;					      \
237     __typeof (mem) __atg8_memp = (mem);					      \
238     __typeof (*(mem)) __atg8_value = (value);				      \
239     do {								      \
240       __atg8_oldval = *__atg8_memp;					      \
241       if (__atg8_oldval >= __atg8_value)				      \
242 	break;								      \
243     } while (__builtin_expect						      \
244 	     (atomic_compare_and_exchange_bool_acq (__atg8_memp, __atg8_value,\
245 						    __atg8_oldval), 0));      \
246   } while (0)
247 #endif
248 
249 
250 #ifndef catomic_max
251 # define catomic_max(mem, value) \
252   do {									      \
253     __typeof (*(mem)) __atg9_oldv;					      \
254     __typeof (mem) __atg9_memp = (mem);					      \
255     __typeof (*(mem)) __atg9_value = (value);				      \
256     do {								      \
257       __atg9_oldv = *__atg9_memp;					      \
258       if (__atg9_oldv >= __atg9_value)					      \
259 	break;								      \
260     } while (__builtin_expect						      \
261 	     (catomic_compare_and_exchange_bool_acq (__atg9_memp,	      \
262 						     __atg9_value,	      \
263 						     __atg9_oldv), 0));	      \
264   } while (0)
265 #endif
266 
267 
268 #ifndef atomic_min
269 # define atomic_min(mem, value) \
270   do {									      \
271     __typeof (*(mem)) __atg10_oldval;					      \
272     __typeof (mem) __atg10_memp = (mem);				      \
273     __typeof (*(mem)) __atg10_value = (value);				      \
274     do {								      \
275       __atg10_oldval = *__atg10_memp;					      \
276       if (__atg10_oldval <= __atg10_value)				      \
277 	break;								      \
278     } while (__builtin_expect						      \
279 	     (atomic_compare_and_exchange_bool_acq (__atg10_memp,	      \
280 						    __atg10_value,	      \
281 						    __atg10_oldval), 0));     \
282   } while (0)
283 #endif
284 
285 
286 #ifndef atomic_add
287 # define atomic_add(mem, value) (void) atomic_exchange_and_add ((mem), (value))
288 #endif
289 
290 
291 #ifndef catomic_add
292 # define catomic_add(mem, value) \
293   (void) catomic_exchange_and_add ((mem), (value))
294 #endif
295 
296 
297 #ifndef atomic_increment
298 # define atomic_increment(mem) atomic_add ((mem), 1)
299 #endif
300 
301 
302 #ifndef catomic_increment
303 # define catomic_increment(mem) catomic_add ((mem), 1)
304 #endif
305 
306 
307 #ifndef atomic_increment_val
308 # define atomic_increment_val(mem) (atomic_exchange_and_add ((mem), 1) + 1)
309 #endif
310 
311 
312 #ifndef catomic_increment_val
313 # define catomic_increment_val(mem) (catomic_exchange_and_add ((mem), 1) + 1)
314 #endif
315 
316 
317 /* Add one to *MEM and return true iff it's now zero.  */
318 #ifndef atomic_increment_and_test
319 # define atomic_increment_and_test(mem) \
320   (atomic_exchange_and_add ((mem), 1) + 1 == 0)
321 #endif
322 
323 
324 #ifndef atomic_decrement
325 # define atomic_decrement(mem) atomic_add ((mem), -1)
326 #endif
327 
328 
329 #ifndef catomic_decrement
330 # define catomic_decrement(mem) catomic_add ((mem), -1)
331 #endif
332 
333 
334 #ifndef atomic_decrement_val
335 # define atomic_decrement_val(mem) (atomic_exchange_and_add ((mem), -1) - 1)
336 #endif
337 
338 
339 #ifndef catomic_decrement_val
340 # define catomic_decrement_val(mem) (catomic_exchange_and_add ((mem), -1) - 1)
341 #endif
342 
343 
344 /* Subtract 1 from *MEM and return true iff it's now zero.  */
345 #ifndef atomic_decrement_and_test
346 # define atomic_decrement_and_test(mem) \
347   (atomic_exchange_and_add ((mem), -1) == 1)
348 #endif
349 
350 
351 /* Decrement *MEM if it is > 0, and return the old value.  */
352 #ifndef atomic_decrement_if_positive
353 # define atomic_decrement_if_positive(mem) \
354   ({ __typeof (*(mem)) __atg11_oldval;					      \
355      __typeof (mem) __atg11_memp = (mem);				      \
356 									      \
357      do									      \
358        {								      \
359 	 __atg11_oldval = *__atg11_memp;				      \
360 	 if (__glibc_unlikely (__atg11_oldval <= 0))			      \
361 	   break;							      \
362        }								      \
363      while (__builtin_expect						      \
364 	    (atomic_compare_and_exchange_bool_acq (__atg11_memp,	      \
365 						   __atg11_oldval - 1,	      \
366 						   __atg11_oldval), 0));      \
367      __atg11_oldval; })
368 #endif
369 
370 
371 #ifndef atomic_add_negative
372 # define atomic_add_negative(mem, value)				      \
373   ({ __typeof (value) __atg12_value = (value);				      \
374      atomic_exchange_and_add (mem, __atg12_value) < -__atg12_value; })
375 #endif
376 
377 
378 #ifndef atomic_add_zero
379 # define atomic_add_zero(mem, value)					      \
380   ({ __typeof (value) __atg13_value = (value);				      \
381      atomic_exchange_and_add (mem, __atg13_value) == -__atg13_value; })
382 #endif
383 
384 
385 #ifndef atomic_bit_set
386 # define atomic_bit_set(mem, bit) \
387   (void) atomic_bit_test_set(mem, bit)
388 #endif
389 
390 
391 #ifndef atomic_bit_test_set
392 # define atomic_bit_test_set(mem, bit) \
393   ({ __typeof (*(mem)) __atg14_old;					      \
394      __typeof (mem) __atg14_memp = (mem);				      \
395      __typeof (*(mem)) __atg14_mask = ((__typeof (*(mem))) 1 << (bit));	      \
396 									      \
397      do									      \
398        __atg14_old = (*__atg14_memp);					      \
399      while (__builtin_expect						      \
400 	    (atomic_compare_and_exchange_bool_acq (__atg14_memp,	      \
401 						   __atg14_old | __atg14_mask,\
402 						   __atg14_old), 0));	      \
403 									      \
404      __atg14_old & __atg14_mask; })
405 #endif
406 
407 /* Atomically *mem &= mask.  */
408 #ifndef atomic_and
409 # define atomic_and(mem, mask) \
410   do {									      \
411     __typeof (*(mem)) __atg15_old;					      \
412     __typeof (mem) __atg15_memp = (mem);				      \
413     __typeof (*(mem)) __atg15_mask = (mask);				      \
414 									      \
415     do									      \
416       __atg15_old = (*__atg15_memp);					      \
417     while (__builtin_expect						      \
418 	   (atomic_compare_and_exchange_bool_acq (__atg15_memp,		      \
419 						  __atg15_old & __atg15_mask, \
420 						  __atg15_old), 0));	      \
421   } while (0)
422 #endif
423 
424 #ifndef catomic_and
425 # define catomic_and(mem, mask) \
426   do {									      \
427     __typeof (*(mem)) __atg20_old;					      \
428     __typeof (mem) __atg20_memp = (mem);				      \
429     __typeof (*(mem)) __atg20_mask = (mask);				      \
430 									      \
431     do									      \
432       __atg20_old = (*__atg20_memp);					      \
433     while (__builtin_expect						      \
434 	   (catomic_compare_and_exchange_bool_acq (__atg20_memp,	      \
435 						   __atg20_old & __atg20_mask,\
436 						   __atg20_old), 0));	      \
437   } while (0)
438 #endif
439 
440 /* Atomically *mem &= mask and return the old value of *mem.  */
441 #ifndef atomic_and_val
442 # define atomic_and_val(mem, mask) \
443   ({ __typeof (*(mem)) __atg16_old;					      \
444      __typeof (mem) __atg16_memp = (mem);				      \
445      __typeof (*(mem)) __atg16_mask = (mask);				      \
446 									      \
447      do									      \
448        __atg16_old = (*__atg16_memp);					      \
449      while (__builtin_expect						      \
450 	    (atomic_compare_and_exchange_bool_acq (__atg16_memp,	      \
451 						   __atg16_old & __atg16_mask,\
452 						   __atg16_old), 0));	      \
453 									      \
454      __atg16_old; })
455 #endif
456 
457 /* Atomically *mem |= mask and return the old value of *mem.  */
458 #ifndef atomic_or
459 # define atomic_or(mem, mask) \
460   do {									      \
461     __typeof (*(mem)) __atg17_old;					      \
462     __typeof (mem) __atg17_memp = (mem);				      \
463     __typeof (*(mem)) __atg17_mask = (mask);				      \
464 									      \
465     do									      \
466       __atg17_old = (*__atg17_memp);					      \
467     while (__builtin_expect						      \
468 	   (atomic_compare_and_exchange_bool_acq (__atg17_memp,		      \
469 						  __atg17_old | __atg17_mask, \
470 						  __atg17_old), 0));	      \
471   } while (0)
472 #endif
473 
474 #ifndef catomic_or
475 # define catomic_or(mem, mask) \
476   do {									      \
477     __typeof (*(mem)) __atg18_old;					      \
478     __typeof (mem) __atg18_memp = (mem);				      \
479     __typeof (*(mem)) __atg18_mask = (mask);				      \
480 									      \
481     do									      \
482       __atg18_old = (*__atg18_memp);					      \
483     while (__builtin_expect						      \
484 	   (catomic_compare_and_exchange_bool_acq (__atg18_memp,	      \
485 						   __atg18_old | __atg18_mask,\
486 						   __atg18_old), 0));	      \
487   } while (0)
488 #endif
489 
490 /* Atomically *mem |= mask and return the old value of *mem.  */
491 #ifndef atomic_or_val
492 # define atomic_or_val(mem, mask) \
493   ({ __typeof (*(mem)) __atg19_old;					      \
494      __typeof (mem) __atg19_memp = (mem);				      \
495      __typeof (*(mem)) __atg19_mask = (mask);				      \
496 									      \
497      do									      \
498        __atg19_old = (*__atg19_memp);					      \
499      while (__builtin_expect						      \
500 	    (atomic_compare_and_exchange_bool_acq (__atg19_memp,	      \
501 						   __atg19_old | __atg19_mask,\
502 						   __atg19_old), 0));	      \
503 									      \
504      __atg19_old; })
505 #endif
506 
507 #ifndef atomic_full_barrier
508 # define atomic_full_barrier() __asm ("" ::: "memory")
509 #endif
510 
511 
512 #ifndef atomic_read_barrier
513 # define atomic_read_barrier() atomic_full_barrier ()
514 #endif
515 
516 
517 #ifndef atomic_write_barrier
518 # define atomic_write_barrier() atomic_full_barrier ()
519 #endif
520 
521 
522 #ifndef atomic_forced_read
523 # define atomic_forced_read(x) \
524   ({ __typeof (x) __x; __asm ("" : "=r" (__x) : "0" (x)); __x; })
525 #endif
526 
527 /* This is equal to 1 iff the architecture supports 64b atomic operations.  */
528 #ifndef __HAVE_64B_ATOMICS
529 #error Unable to determine if 64-bit atomics are present.
530 #endif
531 
532 /* The following functions are a subset of the atomic operations provided by
533    C11.  Usually, a function named atomic_OP_MO(args) is equivalent to C11's
534    atomic_OP_explicit(args, memory_order_MO); exceptions noted below.  */
535 
536 /* Each arch can request to use compiler built-ins for C11 atomics.  If it
537    does, all atomics will be based on these.  */
538 #if USE_ATOMIC_COMPILER_BUILTINS
539 
540 /* We require 32b atomic operations; some archs also support 64b atomic
541    operations.  */
542 void __atomic_link_error (void);
543 # if __HAVE_64B_ATOMICS == 1
544 #  define __atomic_check_size(mem) \
545    if ((sizeof (*mem) != 4) && (sizeof (*mem) != 8))			      \
546      __atomic_link_error ();
547 # else
548 #  define __atomic_check_size(mem) \
549    if (sizeof (*mem) != 4)						      \
550      __atomic_link_error ();
551 # endif
552 /* We additionally provide 8b and 16b atomic loads and stores; we do not yet
553    need other atomic operations of such sizes, and restricting the support to
554    loads and stores makes this easier for archs that do not have native
555    support for atomic operations to less-than-word-sized data.  */
556 # if __HAVE_64B_ATOMICS == 1
557 #  define __atomic_check_size_ls(mem) \
558    if ((sizeof (*mem) != 1) && (sizeof (*mem) != 2) && (sizeof (*mem) != 4)   \
559        && (sizeof (*mem) != 8))						      \
560      __atomic_link_error ();
561 # else
562 #  define __atomic_check_size_ls(mem) \
563    if ((sizeof (*mem) != 1) && (sizeof (*mem) != 2) && sizeof (*mem) != 4)    \
564      __atomic_link_error ();
565 # endif
566 
567 # define atomic_thread_fence_acquire() \
568   __atomic_thread_fence (__ATOMIC_ACQUIRE)
569 # define atomic_thread_fence_release() \
570   __atomic_thread_fence (__ATOMIC_RELEASE)
571 # define atomic_thread_fence_seq_cst() \
572   __atomic_thread_fence (__ATOMIC_SEQ_CST)
573 
574 # define atomic_load_relaxed(mem) \
575   ({ __atomic_check_size_ls((mem));					      \
576      __atomic_load_n ((mem), __ATOMIC_RELAXED); })
577 # define atomic_load_acquire(mem) \
578   ({ __atomic_check_size_ls((mem));					      \
579      __atomic_load_n ((mem), __ATOMIC_ACQUIRE); })
580 
581 # define atomic_store_relaxed(mem, val) \
582   do {									      \
583     __atomic_check_size_ls((mem));					      \
584     __atomic_store_n ((mem), (val), __ATOMIC_RELAXED);			      \
585   } while (0)
586 # define atomic_store_release(mem, val) \
587   do {									      \
588     __atomic_check_size_ls((mem));					      \
589     __atomic_store_n ((mem), (val), __ATOMIC_RELEASE);			      \
590   } while (0)
591 
592 /* On failure, this CAS has memory_order_relaxed semantics.  */
593 # define atomic_compare_exchange_weak_relaxed(mem, expected, desired) \
594   ({ __atomic_check_size((mem));					      \
595   __atomic_compare_exchange_n ((mem), (expected), (desired), 1,		      \
596     __ATOMIC_RELAXED, __ATOMIC_RELAXED); })
597 # define atomic_compare_exchange_weak_acquire(mem, expected, desired) \
598   ({ __atomic_check_size((mem));					      \
599   __atomic_compare_exchange_n ((mem), (expected), (desired), 1,		      \
600     __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); })
601 # define atomic_compare_exchange_weak_release(mem, expected, desired) \
602   ({ __atomic_check_size((mem));					      \
603   __atomic_compare_exchange_n ((mem), (expected), (desired), 1,		      \
604     __ATOMIC_RELEASE, __ATOMIC_RELAXED); })
605 
606 # define atomic_exchange_relaxed(mem, desired) \
607   ({ __atomic_check_size((mem));					      \
608   __atomic_exchange_n ((mem), (desired), __ATOMIC_RELAXED); })
609 # define atomic_exchange_acquire(mem, desired) \
610   ({ __atomic_check_size((mem));					      \
611   __atomic_exchange_n ((mem), (desired), __ATOMIC_ACQUIRE); })
612 # define atomic_exchange_release(mem, desired) \
613   ({ __atomic_check_size((mem));					      \
614   __atomic_exchange_n ((mem), (desired), __ATOMIC_RELEASE); })
615 
616 # define atomic_fetch_add_relaxed(mem, operand) \
617   ({ __atomic_check_size((mem));					      \
618   __atomic_fetch_add ((mem), (operand), __ATOMIC_RELAXED); })
619 # define atomic_fetch_add_acquire(mem, operand) \
620   ({ __atomic_check_size((mem));					      \
621   __atomic_fetch_add ((mem), (operand), __ATOMIC_ACQUIRE); })
622 # define atomic_fetch_add_release(mem, operand) \
623   ({ __atomic_check_size((mem));					      \
624   __atomic_fetch_add ((mem), (operand), __ATOMIC_RELEASE); })
625 # define atomic_fetch_add_acq_rel(mem, operand) \
626   ({ __atomic_check_size((mem));					      \
627   __atomic_fetch_add ((mem), (operand), __ATOMIC_ACQ_REL); })
628 
629 # define atomic_fetch_and_relaxed(mem, operand) \
630   ({ __atomic_check_size((mem));					      \
631   __atomic_fetch_and ((mem), (operand), __ATOMIC_RELAXED); })
632 # define atomic_fetch_and_acquire(mem, operand) \
633   ({ __atomic_check_size((mem));					      \
634   __atomic_fetch_and ((mem), (operand), __ATOMIC_ACQUIRE); })
635 # define atomic_fetch_and_release(mem, operand) \
636   ({ __atomic_check_size((mem));					      \
637   __atomic_fetch_and ((mem), (operand), __ATOMIC_RELEASE); })
638 
639 # define atomic_fetch_or_relaxed(mem, operand) \
640   ({ __atomic_check_size((mem));					      \
641   __atomic_fetch_or ((mem), (operand), __ATOMIC_RELAXED); })
642 # define atomic_fetch_or_acquire(mem, operand) \
643   ({ __atomic_check_size((mem));					      \
644   __atomic_fetch_or ((mem), (operand), __ATOMIC_ACQUIRE); })
645 # define atomic_fetch_or_release(mem, operand) \
646   ({ __atomic_check_size((mem));					      \
647   __atomic_fetch_or ((mem), (operand), __ATOMIC_RELEASE); })
648 
649 # define atomic_fetch_xor_release(mem, operand) \
650   ({ __atomic_check_size((mem));					      \
651   __atomic_fetch_xor ((mem), (operand), __ATOMIC_RELEASE); })
652 
653 #else /* !USE_ATOMIC_COMPILER_BUILTINS  */
654 
655 /* By default, we assume that read, write, and full barriers are equivalent
656    to acquire, release, and seq_cst barriers.  Archs for which this does not
657    hold have to provide custom definitions of the fences.  */
658 # ifndef atomic_thread_fence_acquire
659 #  define atomic_thread_fence_acquire() atomic_read_barrier ()
660 # endif
661 # ifndef atomic_thread_fence_release
662 #  define atomic_thread_fence_release() atomic_write_barrier ()
663 # endif
664 # ifndef atomic_thread_fence_seq_cst
665 #  define atomic_thread_fence_seq_cst() atomic_full_barrier ()
666 # endif
667 
668 # ifndef atomic_load_relaxed
669 #  define atomic_load_relaxed(mem) \
670    ({ __typeof ((__typeof (*(mem))) *(mem)) __atg100_val;		      \
671    __asm ("" : "=r" (__atg100_val) : "0" (*(mem)));			      \
672    __atg100_val; })
673 # endif
674 # ifndef atomic_load_acquire
675 #  define atomic_load_acquire(mem) \
676    ({ __typeof (*(mem)) __atg101_val = atomic_load_relaxed (mem);	      \
677    atomic_thread_fence_acquire ();					      \
678    __atg101_val; })
679 # endif
680 
681 # ifndef atomic_store_relaxed
682 /* XXX Use inline asm here?  */
683 #  define atomic_store_relaxed(mem, val) do { *(mem) = (val); } while (0)
684 # endif
685 # ifndef atomic_store_release
686 #  define atomic_store_release(mem, val) \
687    do {									      \
688      atomic_thread_fence_release ();					      \
689      atomic_store_relaxed ((mem), (val));				      \
690    } while (0)
691 # endif
692 
693 /* On failure, this CAS has memory_order_relaxed semantics.  */
694 /* XXX This potentially has one branch more than necessary, but archs
695    currently do not define a CAS that returns both the previous value and
696    the success flag.  */
697 # ifndef atomic_compare_exchange_weak_acquire
698 #  define atomic_compare_exchange_weak_acquire(mem, expected, desired) \
699    ({ typeof (*(expected)) __atg102_expected = *(expected);		      \
700    *(expected) =							      \
701      atomic_compare_and_exchange_val_acq ((mem), (desired), *(expected));     \
702    *(expected) == __atg102_expected; })
703 # endif
704 # ifndef atomic_compare_exchange_weak_relaxed
705 /* XXX Fall back to CAS with acquire MO because archs do not define a weaker
706    CAS.  */
707 #  define atomic_compare_exchange_weak_relaxed(mem, expected, desired) \
708    atomic_compare_exchange_weak_acquire ((mem), (expected), (desired))
709 # endif
710 # ifndef atomic_compare_exchange_weak_release
711 #  define atomic_compare_exchange_weak_release(mem, expected, desired) \
712    ({ typeof (*(expected)) __atg103_expected = *(expected);		      \
713    *(expected) =							      \
714      atomic_compare_and_exchange_val_rel ((mem), (desired), *(expected));     \
715    *(expected) == __atg103_expected; })
716 # endif
717 
718 /* XXX Fall back to acquire MO because archs do not define a weaker
719    atomic_exchange.  */
720 # ifndef atomic_exchange_relaxed
721 #  define atomic_exchange_relaxed(mem, val) \
722    atomic_exchange_acq ((mem), (val))
723 # endif
724 # ifndef atomic_exchange_acquire
725 #  define atomic_exchange_acquire(mem, val) \
726    atomic_exchange_acq ((mem), (val))
727 # endif
728 # ifndef atomic_exchange_release
729 #  define atomic_exchange_release(mem, val) \
730    atomic_exchange_rel ((mem), (val))
731 # endif
732 
733 # ifndef atomic_fetch_add_acquire
734 #  define atomic_fetch_add_acquire(mem, operand) \
735    atomic_exchange_and_add_acq ((mem), (operand))
736 # endif
737 # ifndef atomic_fetch_add_relaxed
738 /* XXX Fall back to acquire MO because the MO semantics of
739    atomic_exchange_and_add are not documented; the generic version falls back
740    to atomic_exchange_and_add_acq if atomic_exchange_and_add is not defined,
741    and vice versa.  */
742 #  define atomic_fetch_add_relaxed(mem, operand) \
743    atomic_fetch_add_acquire ((mem), (operand))
744 # endif
745 # ifndef atomic_fetch_add_release
746 #  define atomic_fetch_add_release(mem, operand) \
747    atomic_exchange_and_add_rel ((mem), (operand))
748 # endif
749 # ifndef atomic_fetch_add_acq_rel
750 #  define atomic_fetch_add_acq_rel(mem, operand) \
751    ({ atomic_thread_fence_release ();					      \
752    atomic_exchange_and_add_acq ((mem), (operand)); })
753 # endif
754 
755 /* XXX Fall back to acquire MO because archs do not define a weaker
756    atomic_and_val.  */
757 # ifndef atomic_fetch_and_relaxed
758 #  define atomic_fetch_and_relaxed(mem, operand) \
759    atomic_fetch_and_acquire ((mem), (operand))
760 # endif
761 /* XXX The default for atomic_and_val has acquire semantics, but this is not
762    documented.  */
763 # ifndef atomic_fetch_and_acquire
764 #  define atomic_fetch_and_acquire(mem, operand) \
765    atomic_and_val ((mem), (operand))
766 # endif
767 # ifndef atomic_fetch_and_release
768 /* XXX This unnecessarily has acquire MO.  */
769 #  define atomic_fetch_and_release(mem, operand) \
770    ({ atomic_thread_fence_release ();					      \
771    atomic_and_val ((mem), (operand)); })
772 # endif
773 
774 /* XXX The default for atomic_or_val has acquire semantics, but this is not
775    documented.  */
776 # ifndef atomic_fetch_or_acquire
777 #  define atomic_fetch_or_acquire(mem, operand) \
778    atomic_or_val ((mem), (operand))
779 # endif
780 /* XXX Fall back to acquire MO because archs do not define a weaker
781    atomic_or_val.  */
782 # ifndef atomic_fetch_or_relaxed
783 #  define atomic_fetch_or_relaxed(mem, operand) \
784    atomic_fetch_or_acquire ((mem), (operand))
785 # endif
786 /* XXX Contains an unnecessary acquire MO because archs do not define a weaker
787    atomic_or_val.  */
788 # ifndef atomic_fetch_or_release
789 #  define atomic_fetch_or_release(mem, operand) \
790    ({ atomic_thread_fence_release ();					      \
791    atomic_fetch_or_acquire ((mem), (operand)); })
792 # endif
793 
794 # ifndef atomic_fetch_xor_release
795 /* Failing the atomic_compare_exchange_weak_release reloads the value in
796    __atg104_expected, so we need only do the XOR again and retry.  */
797 # define atomic_fetch_xor_release(mem, operand) \
798   ({ __typeof (mem) __atg104_memp = (mem);				      \
799      __typeof (*(mem)) __atg104_expected = (*__atg104_memp);		      \
800      __typeof (*(mem)) __atg104_desired;				      \
801      __typeof (*(mem)) __atg104_op = (operand);				      \
802 									      \
803      do									      \
804        __atg104_desired = __atg104_expected ^ __atg104_op;		      \
805      while (__glibc_unlikely						      \
806 	    (atomic_compare_exchange_weak_release (			      \
807 	       __atg104_memp, &__atg104_expected, __atg104_desired)	      \
808 	     == 0));							      \
809      __atg104_expected; })
810 #endif
811 
812 #endif /* !USE_ATOMIC_COMPILER_BUILTINS  */
813 
814 /* This operation does not affect synchronization semantics but can be used
815    in the body of a spin loop to potentially improve its efficiency.  */
816 #ifndef atomic_spin_nop
817 # define atomic_spin_nop() do { /* nothing */ } while (0)
818 #endif
819 
820 /* ATOMIC_EXCHANGE_USES_CAS is non-zero if atomic_exchange operations
821    are implemented based on a CAS loop; otherwise, this is zero and we assume
822    that the atomic_exchange operations could provide better performance
823    than a CAS loop.  */
824 #ifndef ATOMIC_EXCHANGE_USES_CAS
825 # error ATOMIC_EXCHANGE_USES_CAS has to be defined.
826 #endif
827 
828 #endif	/* atomic.h */
829