1 /* Atomic operations. PowerPC32 version. 2 Copyright (C) 2003-2022 Free Software Foundation, Inc. 3 This file is part of the GNU C Library. 4 5 The GNU C Library is free software; you can redistribute it and/or 6 modify it under the terms of the GNU Lesser General Public 7 License as published by the Free Software Foundation; either 8 version 2.1 of the License, or (at your option) any later version. 9 10 The GNU C Library is distributed in the hope that it will be useful, 11 but WITHOUT ANY WARRANTY; without even the implied warranty of 12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13 Lesser General Public License for more details. 14 15 You should have received a copy of the GNU Lesser General Public 16 License along with the GNU C Library; if not, see 17 <https://www.gnu.org/licenses/>. */ 18 19 /* POWER6 adds a "Mutex Hint" to the Load and Reserve instruction. 20 This is a hint to the hardware to expect additional updates adjacent 21 to the lock word or not. If we are acquiring a Mutex, the hint 22 should be true. Otherwise we releasing a Mutex or doing a simple 23 atomic operation. In that case we don't expect additional updates 24 adjacent to the lock word after the Store Conditional and the hint 25 should be false. */ 26 27 #if defined _ARCH_PWR6 || defined _ARCH_PWR6X 28 # define MUTEX_HINT_ACQ ",1" 29 # define MUTEX_HINT_REL ",0" 30 #else 31 # define MUTEX_HINT_ACQ 32 # define MUTEX_HINT_REL 33 #endif 34 35 #define __HAVE_64B_ATOMICS 0 36 #define USE_ATOMIC_COMPILER_BUILTINS 0 37 #define ATOMIC_EXCHANGE_USES_CAS 1 38 39 /* 40 * The 32-bit exchange_bool is different on powerpc64 because the subf 41 * does signed 64-bit arithmetic while the lwarx is 32-bit unsigned 42 * (a load word and zero (high 32) form). So powerpc64 has a slightly 43 * different version in sysdeps/powerpc/powerpc64/atomic-machine.h. 44 */ 45 #define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \ 46 ({ \ 47 unsigned int __tmp; \ 48 __asm __volatile ( \ 49 "1: lwarx %0,0,%1" MUTEX_HINT_ACQ "\n" \ 50 " subf. %0,%2,%0\n" \ 51 " bne 2f\n" \ 52 " stwcx. %3,0,%1\n" \ 53 " bne- 1b\n" \ 54 "2: " __ARCH_ACQ_INSTR \ 55 : "=&r" (__tmp) \ 56 : "b" (mem), "r" (oldval), "r" (newval) \ 57 : "cr0", "memory"); \ 58 __tmp != 0; \ 59 }) 60 61 /* Powerpc32 processors don't implement the 64-bit (doubleword) forms of 62 load and reserve (ldarx) and store conditional (stdcx.) instructions. 63 So for powerpc32 we stub out the 64-bit forms. */ 64 #define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \ 65 (abort (), 0) 66 67 #define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ 68 (abort (), (__typeof (*mem)) 0) 69 70 #define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \ 71 (abort (), (__typeof (*mem)) 0) 72 73 #define __arch_atomic_exchange_64_acq(mem, value) \ 74 ({ abort (); (*mem) = (value); }) 75 76 #define __arch_atomic_exchange_64_rel(mem, value) \ 77 ({ abort (); (*mem) = (value); }) 78 79 #define __arch_atomic_exchange_and_add_64(mem, value) \ 80 ({ abort (); (*mem) = (value); }) 81 82 #define __arch_atomic_exchange_and_add_64_acq(mem, value) \ 83 ({ abort (); (*mem) = (value); }) 84 85 #define __arch_atomic_exchange_and_add_64_rel(mem, value) \ 86 ({ abort (); (*mem) = (value); }) 87 88 #define __arch_atomic_increment_val_64(mem) \ 89 ({ abort (); (*mem)++; }) 90 91 #define __arch_atomic_decrement_val_64(mem) \ 92 ({ abort (); (*mem)--; }) 93 94 #define __arch_atomic_decrement_if_positive_64(mem) \ 95 ({ abort (); (*mem)--; }) 96 97 #ifdef _ARCH_PWR4 98 /* 99 * Newer powerpc64 processors support the new "light weight" sync (lwsync) 100 * So if the build is using -mcpu=[power4,power5,power5+,970] we can 101 * safely use lwsync. 102 */ 103 # define atomic_read_barrier() __asm ("lwsync" ::: "memory") 104 /* 105 * "light weight" sync can also be used for the release barrier. 106 */ 107 # define __ARCH_REL_INSTR "lwsync" 108 # define atomic_write_barrier() __asm ("lwsync" ::: "memory") 109 #else 110 /* 111 * Older powerpc32 processors don't support the new "light weight" 112 * sync (lwsync). So the only safe option is to use normal sync 113 * for all powerpc32 applications. 114 */ 115 # define atomic_read_barrier() __asm ("sync" ::: "memory") 116 # define atomic_write_barrier() __asm ("sync" ::: "memory") 117 #endif 118 119 /* 120 * Include the rest of the atomic ops macros which are common to both 121 * powerpc32 and powerpc64. 122 */ 123 #include_next <atomic-machine.h> 124