1 /* Copyright (C) 2003-2022 Free Software Foundation, Inc. 2 This file is part of the GNU C Library. 3 4 The GNU C Library is free software; you can redistribute it and/or 5 modify it under the terms of the GNU Lesser General Public 6 License as published by the Free Software Foundation; either 7 version 2.1 of the License, or (at your option) any later version. 8 9 The GNU C Library is distributed in the hope that it will be useful, 10 but WITHOUT ANY WARRANTY; without even the implied warranty of 11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 Lesser General Public License for more details. 13 14 You should have received a copy of the GNU Lesser General Public 15 License along with the GNU C Library. If not, see 16 <https://www.gnu.org/licenses/>. */ 17 18 #define __HAVE_64B_ATOMICS 1 19 #define USE_ATOMIC_COMPILER_BUILTINS 0 20 21 /* XXX Is this actually correct? */ 22 #define ATOMIC_EXCHANGE_USES_CAS 1 23 24 #define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ 25 ({ __typeof (*(mem)) __ret; \ 26 __asm __volatile ("cas%.b %0,%2,%1" \ 27 : "=d" (__ret), "+m" (*(mem)) \ 28 : "d" (newval), "0" (oldval)); \ 29 __ret; }) 30 31 #define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ 32 ({ __typeof (*(mem)) __ret; \ 33 __asm __volatile ("cas%.w %0,%2,%1" \ 34 : "=d" (__ret), "+m" (*(mem)) \ 35 : "d" (newval), "0" (oldval)); \ 36 __ret; }) 37 38 #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ 39 ({ __typeof (*(mem)) __ret; \ 40 __asm __volatile ("cas%.l %0,%2,%1" \ 41 : "=d" (__ret), "+m" (*(mem)) \ 42 : "d" (newval), "0" (oldval)); \ 43 __ret; }) 44 45 # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ 46 ({ __typeof (*(mem)) __ret; \ 47 __typeof (mem) __memp = (mem); \ 48 __asm __volatile ("cas2%.l %0:%R0,%1:%R1,(%2):(%3)" \ 49 : "=d" (__ret) \ 50 : "d" ((__typeof (*(mem))) (newval)), "r" (__memp), \ 51 "r" ((char *) __memp + 4), "0" (oldval) \ 52 : "memory"); \ 53 __ret; }) 54 55 #define atomic_exchange_acq(mem, newvalue) \ 56 ({ __typeof (*(mem)) __result = *(mem); \ 57 if (sizeof (*(mem)) == 1) \ 58 __asm __volatile ("1: cas%.b %0,%2,%1;" \ 59 " jbne 1b" \ 60 : "=d" (__result), "+m" (*(mem)) \ 61 : "d" (newvalue), "0" (__result)); \ 62 else if (sizeof (*(mem)) == 2) \ 63 __asm __volatile ("1: cas%.w %0,%2,%1;" \ 64 " jbne 1b" \ 65 : "=d" (__result), "+m" (*(mem)) \ 66 : "d" (newvalue), "0" (__result)); \ 67 else if (sizeof (*(mem)) == 4) \ 68 __asm __volatile ("1: cas%.l %0,%2,%1;" \ 69 " jbne 1b" \ 70 : "=d" (__result), "+m" (*(mem)) \ 71 : "d" (newvalue), "0" (__result)); \ 72 else \ 73 { \ 74 __typeof (mem) __memp = (mem); \ 75 __asm __volatile ("1: cas2%.l %0:%R0,%1:%R1,(%2):(%3);" \ 76 " jbne 1b" \ 77 : "=d" (__result) \ 78 : "d" ((__typeof (*(mem))) (newvalue)), \ 79 "r" (__memp), "r" ((char *) __memp + 4), \ 80 "0" (__result) \ 81 : "memory"); \ 82 } \ 83 __result; }) 84 85 #define atomic_exchange_and_add(mem, value) \ 86 ({ __typeof (*(mem)) __result = *(mem); \ 87 __typeof (*(mem)) __temp; \ 88 if (sizeof (*(mem)) == 1) \ 89 __asm __volatile ("1: move%.b %0,%2;" \ 90 " add%.b %3,%2;" \ 91 " cas%.b %0,%2,%1;" \ 92 " jbne 1b" \ 93 : "=d" (__result), "+m" (*(mem)), \ 94 "=&d" (__temp) \ 95 : "d" (value), "0" (__result)); \ 96 else if (sizeof (*(mem)) == 2) \ 97 __asm __volatile ("1: move%.w %0,%2;" \ 98 " add%.w %3,%2;" \ 99 " cas%.w %0,%2,%1;" \ 100 " jbne 1b" \ 101 : "=d" (__result), "+m" (*(mem)), \ 102 "=&d" (__temp) \ 103 : "d" (value), "0" (__result)); \ 104 else if (sizeof (*(mem)) == 4) \ 105 __asm __volatile ("1: move%.l %0,%2;" \ 106 " add%.l %3,%2;" \ 107 " cas%.l %0,%2,%1;" \ 108 " jbne 1b" \ 109 : "=d" (__result), "+m" (*(mem)), \ 110 "=&d" (__temp) \ 111 : "d" (value), "0" (__result)); \ 112 else \ 113 { \ 114 __typeof (mem) __memp = (mem); \ 115 __asm __volatile ("1: move%.l %0,%1;" \ 116 " move%.l %R0,%R1;" \ 117 " add%.l %R2,%R1;" \ 118 " addx%.l %2,%1;" \ 119 " cas2%.l %0:%R0,%1:%R1,(%3):(%4);" \ 120 " jbne 1b" \ 121 : "=d" (__result), "=&d" (__temp) \ 122 : "d" ((__typeof (*(mem))) (value)), "r" (__memp), \ 123 "r" ((char *) __memp + 4), "0" (__result) \ 124 : "memory"); \ 125 } \ 126 __result; }) 127 128 #define atomic_add(mem, value) \ 129 (void) ({ if (sizeof (*(mem)) == 1) \ 130 __asm __volatile ("add%.b %1,%0" \ 131 : "+m" (*(mem)) \ 132 : "id" (value)); \ 133 else if (sizeof (*(mem)) == 2) \ 134 __asm __volatile ("add%.w %1,%0" \ 135 : "+m" (*(mem)) \ 136 : "id" (value)); \ 137 else if (sizeof (*(mem)) == 4) \ 138 __asm __volatile ("add%.l %1,%0" \ 139 : "+m" (*(mem)) \ 140 : "id" (value)); \ 141 else \ 142 { \ 143 __typeof (mem) __memp = (mem); \ 144 __typeof (*(mem)) __oldval = *__memp; \ 145 __typeof (*(mem)) __temp; \ 146 __asm __volatile ("1: move%.l %0,%1;" \ 147 " move%.l %R0,%R1;" \ 148 " add%.l %R2,%R1;" \ 149 " addx%.l %2,%1;" \ 150 " cas2%.l %0:%R0,%1:%R1,(%3):(%4);" \ 151 " jbne 1b" \ 152 : "=d" (__oldval), "=&d" (__temp) \ 153 : "d" ((__typeof (*(mem))) (value)), \ 154 "r" (__memp), "r" ((char *) __memp + 4), \ 155 "0" (__oldval) \ 156 : "memory"); \ 157 } \ 158 }) 159 160 #define atomic_increment_and_test(mem) \ 161 ({ char __result; \ 162 if (sizeof (*(mem)) == 1) \ 163 __asm __volatile ("addq%.b %#1,%1; seq %0" \ 164 : "=dm" (__result), "+m" (*(mem))); \ 165 else if (sizeof (*(mem)) == 2) \ 166 __asm __volatile ("addq%.w %#1,%1; seq %0" \ 167 : "=dm" (__result), "+m" (*(mem))); \ 168 else if (sizeof (*(mem)) == 4) \ 169 __asm __volatile ("addq%.l %#1,%1; seq %0" \ 170 : "=dm" (__result), "+m" (*(mem))); \ 171 else \ 172 { \ 173 __typeof (mem) __memp = (mem); \ 174 __typeof (*(mem)) __oldval = *__memp; \ 175 __typeof (*(mem)) __temp; \ 176 __asm __volatile ("1: move%.l %1,%2;" \ 177 " move%.l %R1,%R2;" \ 178 " addq%.l %#1,%R2;" \ 179 " addx%.l %5,%2;" \ 180 " seq %0;" \ 181 " cas2%.l %1:%R1,%2:%R2,(%3):(%4);" \ 182 " jbne 1b" \ 183 : "=&dm" (__result), "=d" (__oldval), \ 184 "=&d" (__temp) \ 185 : "r" (__memp), "r" ((char *) __memp + 4), \ 186 "d" (0), "1" (__oldval) \ 187 : "memory"); \ 188 } \ 189 __result; }) 190 191 #define atomic_decrement_and_test(mem) \ 192 ({ char __result; \ 193 if (sizeof (*(mem)) == 1) \ 194 __asm __volatile ("subq%.b %#1,%1; seq %0" \ 195 : "=dm" (__result), "+m" (*(mem))); \ 196 else if (sizeof (*(mem)) == 2) \ 197 __asm __volatile ("subq%.w %#1,%1; seq %0" \ 198 : "=dm" (__result), "+m" (*(mem))); \ 199 else if (sizeof (*(mem)) == 4) \ 200 __asm __volatile ("subq%.l %#1,%1; seq %0" \ 201 : "=dm" (__result), "+m" (*(mem))); \ 202 else \ 203 { \ 204 __typeof (mem) __memp = (mem); \ 205 __typeof (*(mem)) __oldval = *__memp; \ 206 __typeof (*(mem)) __temp; \ 207 __asm __volatile ("1: move%.l %1,%2;" \ 208 " move%.l %R1,%R2;" \ 209 " subq%.l %#1,%R2;" \ 210 " subx%.l %5,%2;" \ 211 " seq %0;" \ 212 " cas2%.l %1:%R1,%2:%R2,(%3):(%4);" \ 213 " jbne 1b" \ 214 : "=&dm" (__result), "=d" (__oldval), \ 215 "=&d" (__temp) \ 216 : "r" (__memp), "r" ((char *) __memp + 4), \ 217 "d" (0), "1" (__oldval) \ 218 : "memory"); \ 219 } \ 220 __result; }) 221 222 #define atomic_bit_set(mem, bit) \ 223 __asm __volatile ("bfset %0{%1,#1}" \ 224 : "+m" (*(mem)) \ 225 : "di" (sizeof (*(mem)) * 8 - (bit) - 1)) 226 227 #define atomic_bit_test_set(mem, bit) \ 228 ({ char __result; \ 229 __asm __volatile ("bfset %1{%2,#1}; sne %0" \ 230 : "=dm" (__result), "+m" (*(mem)) \ 231 : "di" (sizeof (*(mem)) * 8 - (bit) - 1)); \ 232 __result; }) 233