1 /* Monotonically increasing wide counters (at least 62 bits).
2 Copyright (C) 2016-2022 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19 #ifndef _ATOMIC_WIDE_COUNTER_H
20 #define _ATOMIC_WIDE_COUNTER_H
21
22 #include <atomic.h>
23 #include <bits/atomic_wide_counter.h>
24
25 #if __HAVE_64B_ATOMICS
26
27 static inline uint64_t
__atomic_wide_counter_load_relaxed(__atomic_wide_counter * c)28 __atomic_wide_counter_load_relaxed (__atomic_wide_counter *c)
29 {
30 return atomic_load_relaxed (&c->__value64);
31 }
32
33 static inline uint64_t
__atomic_wide_counter_load_acquire(__atomic_wide_counter * c)34 __atomic_wide_counter_load_acquire (__atomic_wide_counter *c)
35 {
36 return atomic_load_acquire (&c->__value64);
37 }
38
39 static inline uint64_t
__atomic_wide_counter_fetch_add_relaxed(__atomic_wide_counter * c,unsigned int val)40 __atomic_wide_counter_fetch_add_relaxed (__atomic_wide_counter *c,
41 unsigned int val)
42 {
43 return atomic_fetch_add_relaxed (&c->__value64, val);
44 }
45
46 static inline uint64_t
__atomic_wide_counter_fetch_add_acquire(__atomic_wide_counter * c,unsigned int val)47 __atomic_wide_counter_fetch_add_acquire (__atomic_wide_counter *c,
48 unsigned int val)
49 {
50 return atomic_fetch_add_acquire (&c->__value64, val);
51 }
52
53 static inline void
__atomic_wide_counter_add_relaxed(__atomic_wide_counter * c,unsigned int val)54 __atomic_wide_counter_add_relaxed (__atomic_wide_counter *c,
55 unsigned int val)
56 {
57 atomic_store_relaxed (&c->__value64,
58 atomic_load_relaxed (&c->__value64) + val);
59 }
60
61 static uint64_t __attribute__ ((unused))
__atomic_wide_counter_fetch_xor_release(__atomic_wide_counter * c,unsigned int val)62 __atomic_wide_counter_fetch_xor_release (__atomic_wide_counter *c,
63 unsigned int val)
64 {
65 return atomic_fetch_xor_release (&c->__value64, val);
66 }
67
68 #else /* !__HAVE_64B_ATOMICS */
69
70 uint64_t __atomic_wide_counter_load_relaxed (__atomic_wide_counter *c)
71 attribute_hidden;
72
73 static inline uint64_t
__atomic_wide_counter_load_acquire(__atomic_wide_counter * c)74 __atomic_wide_counter_load_acquire (__atomic_wide_counter *c)
75 {
76 uint64_t r = __atomic_wide_counter_load_relaxed (c);
77 atomic_thread_fence_acquire ();
78 return r;
79 }
80
81 uint64_t __atomic_wide_counter_fetch_add_relaxed (__atomic_wide_counter *c,
82 unsigned int op)
83 attribute_hidden;
84
85 static inline uint64_t
__atomic_wide_counter_fetch_add_acquire(__atomic_wide_counter * c,unsigned int val)86 __atomic_wide_counter_fetch_add_acquire (__atomic_wide_counter *c,
87 unsigned int val)
88 {
89 uint64_t r = __atomic_wide_counter_fetch_add_relaxed (c, val);
90 atomic_thread_fence_acquire ();
91 return r;
92 }
93
94 static inline void
__atomic_wide_counter_add_relaxed(__atomic_wide_counter * c,unsigned int val)95 __atomic_wide_counter_add_relaxed (__atomic_wide_counter *c,
96 unsigned int val)
97 {
98 __atomic_wide_counter_fetch_add_relaxed (c, val);
99 }
100
101 #endif /* !__HAVE_64B_ATOMICS */
102
103 #endif /* _ATOMIC_WIDE_COUNTER_H */
104