1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copied from the kernel sources to tools/: 4 * 5 * Memory barrier definitions. This is based on information published 6 * in the Processor Abstraction Layer and the System Abstraction Layer 7 * manual. 8 * 9 * Copyright (C) 1998-2003 Hewlett-Packard Co 10 * David Mosberger-Tang <davidm@hpl.hp.com> 11 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> 12 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> 13 */ 14 #ifndef _TOOLS_LINUX_ASM_IA64_BARRIER_H 15 #define _TOOLS_LINUX_ASM_IA64_BARRIER_H 16 17 #include <linux/compiler.h> 18 19 /* 20 * Macros to force memory ordering. In these descriptions, "previous" 21 * and "subsequent" refer to program order; "visible" means that all 22 * architecturally visible effects of a memory access have occurred 23 * (at a minimum, this means the memory has been read or written). 24 * 25 * wmb(): Guarantees that all preceding stores to memory- 26 * like regions are visible before any subsequent 27 * stores and that all following stores will be 28 * visible only after all previous stores. 29 * rmb(): Like wmb(), but for reads. 30 * mb(): wmb()/rmb() combo, i.e., all previous memory 31 * accesses are visible before all subsequent 32 * accesses and vice versa. This is also known as 33 * a "fence." 34 * 35 * Note: "mb()" and its variants cannot be used as a fence to order 36 * accesses to memory mapped I/O registers. For that, mf.a needs to 37 * be used. However, we don't want to always use mf.a because (a) 38 * it's (presumably) much slower than mf and (b) mf.a is supported for 39 * sequential memory pages only. 40 */ 41 42 #define mb() ia64_mf() 43 #define rmb() mb() 44 #define wmb() mb() 45 46 #define smp_store_release(p, v) \ 47 do { \ 48 barrier(); \ 49 WRITE_ONCE(*p, v); \ 50 } while (0) 51 52 #define smp_load_acquire(p) \ 53 ({ \ 54 typeof(*p) ___p1 = READ_ONCE(*p); \ 55 barrier(); \ 56 ___p1; \ 57 }) 58 59 #endif /* _TOOLS_LINUX_ASM_IA64_BARRIER_H */ 60