1 #ifndef _ASM_IA64_INTRINSICS_H
2 #define _ASM_IA64_INTRINSICS_H
3
4 /*
5 * Compiler-dependent intrinsics.
6 *
7 * Copyright (C) 2002-2003 Hewlett-Packard Co
8 * David Mosberger-Tang <davidm@hpl.hp.com>
9 */
10
11 #include <linux/config.h>
12
13 /*
14 * Force an unresolved reference if someone tries to use
15 * ia64_fetch_and_add() with a bad value.
16 */
17 extern unsigned long __bad_size_for_ia64_fetch_and_add (void);
18 extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
19
20 #define IA64_FETCHADD(tmp,v,n,sz) \
21 ({ \
22 switch (sz) { \
23 case 4: \
24 __asm__ __volatile__ ("fetchadd4.rel %0=[%1],%2" \
25 : "=r"(tmp) : "r"(v), "i"(n) : "memory"); \
26 break; \
27 \
28 case 8: \
29 __asm__ __volatile__ ("fetchadd8.rel %0=[%1],%2" \
30 : "=r"(tmp) : "r"(v), "i"(n) : "memory"); \
31 break; \
32 \
33 default: \
34 __bad_size_for_ia64_fetch_and_add(); \
35 } \
36 })
37
38 #define ia64_fetch_and_add(i,v) \
39 ({ \
40 __u64 _tmp; \
41 volatile __typeof__(*(v)) *_v = (v); \
42 /* Can't use a switch () here: gcc isn't always smart enough for that... */ \
43 if ((i) == -16) \
44 IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v))); \
45 else if ((i) == -8) \
46 IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v))); \
47 else if ((i) == -4) \
48 IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v))); \
49 else if ((i) == -2) \
50 IA64_FETCHADD(_tmp, _v, -2, sizeof(*(v))); \
51 else if ((i) == -1) \
52 IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v))); \
53 else if ((i) == 1) \
54 IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v))); \
55 else if ((i) == 2) \
56 IA64_FETCHADD(_tmp, _v, 2, sizeof(*(v))); \
57 else if ((i) == 4) \
58 IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v))); \
59 else if ((i) == 8) \
60 IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v))); \
61 else if ((i) == 16) \
62 IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v))); \
63 else \
64 _tmp = __bad_increment_for_ia64_fetch_and_add(); \
65 (__typeof__(*(v))) (_tmp + (i)); /* return new value */ \
66 })
67
68 /*
69 * This function doesn't exist, so you'll get a linker error if
70 * something tries to do an invalid xchg().
71 */
72 extern void __xchg_called_with_bad_pointer (void);
73
74 static __inline__ unsigned long
__xchg(unsigned long x,volatile void * ptr,int size)75 __xchg (unsigned long x, volatile void *ptr, int size)
76 {
77 unsigned long result;
78
79 switch (size) {
80 case 1:
81 __asm__ __volatile ("xchg1 %0=[%1],%2" : "=r" (result)
82 : "r" (ptr), "r" (x) : "memory");
83 return result;
84
85 case 2:
86 __asm__ __volatile ("xchg2 %0=[%1],%2" : "=r" (result)
87 : "r" (ptr), "r" (x) : "memory");
88 return result;
89
90 case 4:
91 __asm__ __volatile ("xchg4 %0=[%1],%2" : "=r" (result)
92 : "r" (ptr), "r" (x) : "memory");
93 return result;
94
95 case 8:
96 __asm__ __volatile ("xchg8 %0=[%1],%2" : "=r" (result)
97 : "r" (ptr), "r" (x) : "memory");
98 return result;
99 }
100 __xchg_called_with_bad_pointer();
101 return x;
102 }
103
104 #define xchg(ptr,x) \
105 ((__typeof__(*(ptr))) __xchg ((unsigned long) (x), (ptr), sizeof(*(ptr))))
106
107 /*
108 * Atomic compare and exchange. Compare OLD with MEM, if identical,
109 * store NEW in MEM. Return the initial value in MEM. Success is
110 * indicated by comparing RETURN with OLD.
111 */
112
113 #define __HAVE_ARCH_CMPXCHG 1
114
115 /*
116 * This function doesn't exist, so you'll get a linker error
117 * if something tries to do an invalid cmpxchg().
118 */
119 extern long __cmpxchg_called_with_bad_pointer(void);
120
121 #define ia64_cmpxchg(sem,ptr,old,new,size) \
122 ({ \
123 __typeof__(ptr) _p_ = (ptr); \
124 __typeof__(new) _n_ = (new); \
125 __u64 _o_, _r_; \
126 \
127 switch (size) { \
128 case 1: _o_ = (__u8 ) (long) (old); break; \
129 case 2: _o_ = (__u16) (long) (old); break; \
130 case 4: _o_ = (__u32) (long) (old); break; \
131 case 8: _o_ = (__u64) (long) (old); break; \
132 default: break; \
133 } \
134 __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(_o_)); \
135 switch (size) { \
136 case 1: \
137 __asm__ __volatile__ ("cmpxchg1."sem" %0=[%1],%2,ar.ccv" \
138 : "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \
139 break; \
140 \
141 case 2: \
142 __asm__ __volatile__ ("cmpxchg2."sem" %0=[%1],%2,ar.ccv" \
143 : "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \
144 break; \
145 \
146 case 4: \
147 __asm__ __volatile__ ("cmpxchg4."sem" %0=[%1],%2,ar.ccv" \
148 : "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \
149 break; \
150 \
151 case 8: \
152 __asm__ __volatile__ ("cmpxchg8."sem" %0=[%1],%2,ar.ccv" \
153 : "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \
154 break; \
155 \
156 default: \
157 _r_ = __cmpxchg_called_with_bad_pointer(); \
158 break; \
159 } \
160 (__typeof__(old)) _r_; \
161 })
162
163 #define cmpxchg_acq(ptr,o,n) ia64_cmpxchg("acq", (ptr), (o), (n), sizeof(*(ptr)))
164 #define cmpxchg_rel(ptr,o,n) ia64_cmpxchg("rel", (ptr), (o), (n), sizeof(*(ptr)))
165
166 /* for compatibility with other platforms: */
167 #define cmpxchg(ptr,o,n) cmpxchg_acq(ptr,o,n)
168
169 #ifdef CONFIG_IA64_DEBUG_CMPXCHG
170 # define CMPXCHG_BUGCHECK_DECL int _cmpxchg_bugcheck_count = 128;
171 # define CMPXCHG_BUGCHECK(v) \
172 do { \
173 if (_cmpxchg_bugcheck_count-- <= 0) { \
174 void *ip; \
175 extern int printk(const char *fmt, ...); \
176 asm ("mov %0=ip" : "=r"(ip)); \
177 printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v)); \
178 break; \
179 } \
180 } while (0)
181 #else /* !CONFIG_IA64_DEBUG_CMPXCHG */
182 # define CMPXCHG_BUGCHECK_DECL
183 # define CMPXCHG_BUGCHECK(v)
184 #endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
185
186 #endif /* _ASM_IA64_INTRINSICS_H */
187