1 #ifndef _ASM_IA64_GCC_INTRIN_H
2 #define _ASM_IA64_GCC_INTRIN_H
3 /*
4  *
5  * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
6  * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
7  */
8 
9 #include <linux/types.h>
10 #include <linux/compiler.h>
11 
12 /* define this macro to get some asm stmts included in 'c' files */
13 #define ASM_SUPPORTED
14 
15 /* Optimization barrier */
16 /* The "volatile" is due to gcc bugs */
17 #define ia64_barrier()	asm volatile ("":::"memory")
18 
19 #define ia64_stop()	asm volatile (";;"::)
20 
21 #define ia64_invala_gr(regnum)	asm volatile ("invala.e r%0" :: "i"(regnum))
22 
23 #define ia64_invala_fr(regnum)	asm volatile ("invala.e f%0" :: "i"(regnum))
24 
25 #define ia64_flushrs() asm volatile ("flushrs;;":::"memory")
26 
27 #define ia64_loadrs() asm volatile ("loadrs;;":::"memory")
28 
29 extern void ia64_bad_param_for_setreg (void);
30 extern void ia64_bad_param_for_getreg (void);
31 
32 #ifdef __KERNEL__
33 register unsigned long ia64_r13 asm ("r13") __used;
34 #endif
35 
36 #define ia64_native_setreg(regnum, val)						\
37 ({										\
38 	switch (regnum) {							\
39 	    case _IA64_REG_PSR_L:						\
40 		    asm volatile ("mov psr.l=%0" :: "r"(val) : "memory");	\
41 		    break;							\
42 	    case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC:				\
43 		    asm volatile ("mov ar%0=%1" ::				\
44 		    			  "i" (regnum - _IA64_REG_AR_KR0),	\
45 					  "r"(val): "memory");			\
46 		    break;							\
47 	    case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1:			\
48 		    asm volatile ("mov cr%0=%1" ::				\
49 				          "i" (regnum - _IA64_REG_CR_DCR),	\
50 					  "r"(val): "memory" );			\
51 		    break;							\
52 	    case _IA64_REG_SP:							\
53 		    asm volatile ("mov r12=%0" ::				\
54 			    		  "r"(val): "memory");			\
55 		    break;							\
56 	    case _IA64_REG_GP:							\
57 		    asm volatile ("mov gp=%0" :: "r"(val) : "memory");		\
58 		break;								\
59 	    default:								\
60 		    ia64_bad_param_for_setreg();				\
61 		    break;							\
62 	}									\
63 })
64 
65 #define ia64_native_getreg(regnum)						\
66 ({										\
67 	__u64 ia64_intri_res;							\
68 										\
69 	switch (regnum) {							\
70 	case _IA64_REG_GP:							\
71 		asm volatile ("mov %0=gp" : "=r"(ia64_intri_res));		\
72 		break;								\
73 	case _IA64_REG_IP:							\
74 		asm volatile ("mov %0=ip" : "=r"(ia64_intri_res));		\
75 		break;								\
76 	case _IA64_REG_PSR:							\
77 		asm volatile ("mov %0=psr" : "=r"(ia64_intri_res));		\
78 		break;								\
79 	case _IA64_REG_TP:	/* for current() */				\
80 		ia64_intri_res = ia64_r13;					\
81 		break;								\
82 	case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC:				\
83 		asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res)		\
84 				      : "i"(regnum - _IA64_REG_AR_KR0));	\
85 		break;								\
86 	case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1:				\
87 		asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res)		\
88 				      : "i" (regnum - _IA64_REG_CR_DCR));	\
89 		break;								\
90 	case _IA64_REG_SP:							\
91 		asm volatile ("mov %0=sp" : "=r" (ia64_intri_res));		\
92 		break;								\
93 	default:								\
94 		ia64_bad_param_for_getreg();					\
95 		break;								\
96 	}									\
97 	ia64_intri_res;								\
98 })
99 
100 #define ia64_hint_pause 0
101 
102 #define ia64_hint(mode)						\
103 ({								\
104 	switch (mode) {						\
105 	case ia64_hint_pause:					\
106 		asm volatile ("hint @pause" ::: "memory");	\
107 		break;						\
108 	}							\
109 })
110 
111 
112 /* Integer values for mux1 instruction */
113 #define ia64_mux1_brcst 0
114 #define ia64_mux1_mix   8
115 #define ia64_mux1_shuf  9
116 #define ia64_mux1_alt  10
117 #define ia64_mux1_rev  11
118 
119 #define ia64_mux1(x, mode)							\
120 ({										\
121 	__u64 ia64_intri_res;							\
122 										\
123 	switch (mode) {								\
124 	case ia64_mux1_brcst:							\
125 		asm ("mux1 %0=%1,@brcst" : "=r" (ia64_intri_res) : "r" (x));	\
126 		break;								\
127 	case ia64_mux1_mix:							\
128 		asm ("mux1 %0=%1,@mix" : "=r" (ia64_intri_res) : "r" (x));	\
129 		break;								\
130 	case ia64_mux1_shuf:							\
131 		asm ("mux1 %0=%1,@shuf" : "=r" (ia64_intri_res) : "r" (x));	\
132 		break;								\
133 	case ia64_mux1_alt:							\
134 		asm ("mux1 %0=%1,@alt" : "=r" (ia64_intri_res) : "r" (x));	\
135 		break;								\
136 	case ia64_mux1_rev:							\
137 		asm ("mux1 %0=%1,@rev" : "=r" (ia64_intri_res) : "r" (x));	\
138 		break;								\
139 	}									\
140 	ia64_intri_res;								\
141 })
142 
143 #if __GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
144 # define ia64_popcnt(x)		__builtin_popcountl(x)
145 #else
146 # define ia64_popcnt(x)						\
147   ({								\
148 	__u64 ia64_intri_res;					\
149 	asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x));	\
150 								\
151 	ia64_intri_res;						\
152   })
153 #endif
154 
155 #define ia64_getf_exp(x)					\
156 ({								\
157 	long ia64_intri_res;					\
158 								\
159 	asm ("getf.exp %0=%1" : "=r"(ia64_intri_res) : "f"(x));	\
160 								\
161 	ia64_intri_res;						\
162 })
163 
164 #define ia64_shrp(a, b, count)								\
165 ({											\
166 	__u64 ia64_intri_res;								\
167 	asm ("shrp %0=%1,%2,%3" : "=r"(ia64_intri_res) : "r"(a), "r"(b), "i"(count));	\
168 	ia64_intri_res;									\
169 })
170 
171 #define ia64_ldfs(regnum, x)					\
172 ({								\
173 	register double __f__ asm ("f"#regnum);			\
174 	asm volatile ("ldfs %0=[%1]" :"=f"(__f__): "r"(x));	\
175 })
176 
177 #define ia64_ldfd(regnum, x)					\
178 ({								\
179 	register double __f__ asm ("f"#regnum);			\
180 	asm volatile ("ldfd %0=[%1]" :"=f"(__f__): "r"(x));	\
181 })
182 
183 #define ia64_ldfe(regnum, x)					\
184 ({								\
185 	register double __f__ asm ("f"#regnum);			\
186 	asm volatile ("ldfe %0=[%1]" :"=f"(__f__): "r"(x));	\
187 })
188 
189 #define ia64_ldf8(regnum, x)					\
190 ({								\
191 	register double __f__ asm ("f"#regnum);			\
192 	asm volatile ("ldf8 %0=[%1]" :"=f"(__f__): "r"(x));	\
193 })
194 
195 #define ia64_ldf_fill(regnum, x)				\
196 ({								\
197 	register double __f__ asm ("f"#regnum);			\
198 	asm volatile ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x));	\
199 })
200 
201 #define ia64_st4_rel_nta(m, val)					\
202 ({									\
203 	asm volatile ("st4.rel.nta [%0] = %1\n\t" :: "r"(m), "r"(val));	\
204 })
205 
206 #define ia64_stfs(x, regnum)						\
207 ({									\
208 	register double __f__ asm ("f"#regnum);				\
209 	asm volatile ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory");	\
210 })
211 
212 #define ia64_stfd(x, regnum)						\
213 ({									\
214 	register double __f__ asm ("f"#regnum);				\
215 	asm volatile ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory");	\
216 })
217 
218 #define ia64_stfe(x, regnum)						\
219 ({									\
220 	register double __f__ asm ("f"#regnum);				\
221 	asm volatile ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory");	\
222 })
223 
224 #define ia64_stf8(x, regnum)						\
225 ({									\
226 	register double __f__ asm ("f"#regnum);				\
227 	asm volatile ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory");	\
228 })
229 
230 #define ia64_stf_spill(x, regnum)						\
231 ({										\
232 	register double __f__ asm ("f"#regnum);					\
233 	asm volatile ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory");	\
234 })
235 
236 #define ia64_fetchadd4_acq(p, inc)						\
237 ({										\
238 										\
239 	__u64 ia64_intri_res;							\
240 	asm volatile ("fetchadd4.acq %0=[%1],%2"				\
241 				: "=r"(ia64_intri_res) : "r"(p), "i" (inc)	\
242 				: "memory");					\
243 										\
244 	ia64_intri_res;								\
245 })
246 
247 #define ia64_fetchadd4_rel(p, inc)						\
248 ({										\
249 	__u64 ia64_intri_res;							\
250 	asm volatile ("fetchadd4.rel %0=[%1],%2"				\
251 				: "=r"(ia64_intri_res) : "r"(p), "i" (inc)	\
252 				: "memory");					\
253 										\
254 	ia64_intri_res;								\
255 })
256 
257 #define ia64_fetchadd8_acq(p, inc)						\
258 ({										\
259 										\
260 	__u64 ia64_intri_res;							\
261 	asm volatile ("fetchadd8.acq %0=[%1],%2"				\
262 				: "=r"(ia64_intri_res) : "r"(p), "i" (inc)	\
263 				: "memory");					\
264 										\
265 	ia64_intri_res;								\
266 })
267 
268 #define ia64_fetchadd8_rel(p, inc)						\
269 ({										\
270 	__u64 ia64_intri_res;							\
271 	asm volatile ("fetchadd8.rel %0=[%1],%2"				\
272 				: "=r"(ia64_intri_res) : "r"(p), "i" (inc)	\
273 				: "memory");					\
274 										\
275 	ia64_intri_res;								\
276 })
277 
278 #define ia64_xchg1(ptr,x)							\
279 ({										\
280 	__u64 ia64_intri_res;							\
281 	asm volatile ("xchg1 %0=[%1],%2"					\
282 		      : "=r" (ia64_intri_res) : "r" (ptr), "r" (x) : "memory");	\
283 	ia64_intri_res;								\
284 })
285 
286 #define ia64_xchg2(ptr,x)						\
287 ({									\
288 	__u64 ia64_intri_res;						\
289 	asm volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res)	\
290 		      : "r" (ptr), "r" (x) : "memory");			\
291 	ia64_intri_res;							\
292 })
293 
294 #define ia64_xchg4(ptr,x)						\
295 ({									\
296 	__u64 ia64_intri_res;						\
297 	asm volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res)	\
298 		      : "r" (ptr), "r" (x) : "memory");			\
299 	ia64_intri_res;							\
300 })
301 
302 #define ia64_xchg8(ptr,x)						\
303 ({									\
304 	__u64 ia64_intri_res;						\
305 	asm volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res)	\
306 		      : "r" (ptr), "r" (x) : "memory");			\
307 	ia64_intri_res;							\
308 })
309 
310 #define ia64_cmpxchg1_acq(ptr, new, old)						\
311 ({											\
312 	__u64 ia64_intri_res;								\
313 	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
314 	asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv":					\
315 			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
316 	ia64_intri_res;									\
317 })
318 
319 #define ia64_cmpxchg1_rel(ptr, new, old)						\
320 ({											\
321 	__u64 ia64_intri_res;								\
322 	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
323 	asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv":					\
324 			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
325 	ia64_intri_res;									\
326 })
327 
328 #define ia64_cmpxchg2_acq(ptr, new, old)						\
329 ({											\
330 	__u64 ia64_intri_res;								\
331 	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
332 	asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv":					\
333 			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
334 	ia64_intri_res;									\
335 })
336 
337 #define ia64_cmpxchg2_rel(ptr, new, old)						\
338 ({											\
339 	__u64 ia64_intri_res;								\
340 	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
341 											\
342 	asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv":					\
343 			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
344 	ia64_intri_res;									\
345 })
346 
347 #define ia64_cmpxchg4_acq(ptr, new, old)						\
348 ({											\
349 	__u64 ia64_intri_res;								\
350 	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
351 	asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv":					\
352 			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
353 	ia64_intri_res;									\
354 })
355 
356 #define ia64_cmpxchg4_rel(ptr, new, old)						\
357 ({											\
358 	__u64 ia64_intri_res;								\
359 	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
360 	asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv":					\
361 			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
362 	ia64_intri_res;									\
363 })
364 
365 #define ia64_cmpxchg8_acq(ptr, new, old)						\
366 ({											\
367 	__u64 ia64_intri_res;								\
368 	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
369 	asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv":					\
370 			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
371 	ia64_intri_res;									\
372 })
373 
374 #define ia64_cmpxchg8_rel(ptr, new, old)						\
375 ({											\
376 	__u64 ia64_intri_res;								\
377 	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\
378 											\
379 	asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv":					\
380 			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\
381 	ia64_intri_res;									\
382 })
383 
384 #define ia64_mf()	asm volatile ("mf" ::: "memory")
385 #define ia64_mfa()	asm volatile ("mf.a" ::: "memory")
386 
387 #define ia64_invala() asm volatile ("invala" ::: "memory")
388 
389 #define ia64_native_thash(addr)							\
390 ({										\
391 	unsigned long ia64_intri_res;						\
392 	asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr));	\
393 	ia64_intri_res;								\
394 })
395 
396 #define ia64_srlz_i()	asm volatile (";; srlz.i ;;" ::: "memory")
397 #define ia64_srlz_d()	asm volatile (";; srlz.d" ::: "memory");
398 
399 #ifdef HAVE_SERIALIZE_DIRECTIVE
400 # define ia64_dv_serialize_data()		asm volatile (".serialize.data");
401 # define ia64_dv_serialize_instruction()	asm volatile (".serialize.instruction");
402 #else
403 # define ia64_dv_serialize_data()
404 # define ia64_dv_serialize_instruction()
405 #endif
406 
407 #define ia64_nop(x)	asm volatile ("nop %0"::"i"(x));
408 
409 #define ia64_itci(addr)	asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
410 
411 #define ia64_itcd(addr)	asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
412 
413 
414 #define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1"				\
415 					     :: "r"(trnum), "r"(addr) : "memory")
416 
417 #define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1"				\
418 					     :: "r"(trnum), "r"(addr) : "memory")
419 
420 #define ia64_tpa(addr)								\
421 ({										\
422 	unsigned long ia64_pa;							\
423 	asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory");	\
424 	ia64_pa;								\
425 })
426 
427 #define __ia64_set_dbr(index, val)						\
428 	asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")
429 
430 #define ia64_set_ibr(index, val)						\
431 	asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")
432 
433 #define ia64_set_pkr(index, val)						\
434 	asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")
435 
436 #define ia64_set_pmc(index, val)						\
437 	asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")
438 
439 #define ia64_set_pmd(index, val)						\
440 	asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
441 
442 #define ia64_native_set_rr(index, val)							\
443 	asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
444 
445 #define ia64_native_get_cpuid(index)							\
446 ({											\
447 	unsigned long ia64_intri_res;							\
448 	asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index));	\
449 	ia64_intri_res;									\
450 })
451 
452 #define __ia64_get_dbr(index)							\
453 ({										\
454 	unsigned long ia64_intri_res;						\
455 	asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
456 	ia64_intri_res;								\
457 })
458 
459 #define ia64_get_ibr(index)							\
460 ({										\
461 	unsigned long ia64_intri_res;						\
462 	asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
463 	ia64_intri_res;								\
464 })
465 
466 #define ia64_get_pkr(index)							\
467 ({										\
468 	unsigned long ia64_intri_res;						\
469 	asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
470 	ia64_intri_res;								\
471 })
472 
473 #define ia64_get_pmc(index)							\
474 ({										\
475 	unsigned long ia64_intri_res;						\
476 	asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
477 	ia64_intri_res;								\
478 })
479 
480 
481 #define ia64_native_get_pmd(index)						\
482 ({										\
483 	unsigned long ia64_intri_res;						\
484 	asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
485 	ia64_intri_res;								\
486 })
487 
488 #define ia64_native_get_rr(index)						\
489 ({										\
490 	unsigned long ia64_intri_res;						\
491 	asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index));	\
492 	ia64_intri_res;								\
493 })
494 
495 #define ia64_native_fc(addr)	asm volatile ("fc %0" :: "r"(addr) : "memory")
496 
497 
498 #define ia64_sync_i()	asm volatile (";; sync.i" ::: "memory")
499 
500 #define ia64_native_ssm(mask)	asm volatile ("ssm %0":: "i"((mask)) : "memory")
501 #define ia64_native_rsm(mask)	asm volatile ("rsm %0":: "i"((mask)) : "memory")
502 #define ia64_sum(mask)	asm volatile ("sum %0":: "i"((mask)) : "memory")
503 #define ia64_rum(mask)	asm volatile ("rum %0":: "i"((mask)) : "memory")
504 
505 #define ia64_ptce(addr)	asm volatile ("ptc.e %0" :: "r"(addr))
506 
507 #define ia64_native_ptcga(addr, size)						\
508 do {										\
509 	asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory");	\
510 	ia64_dv_serialize_data();						\
511 } while (0)
512 
513 #define ia64_ptcl(addr, size)							\
514 do {										\
515 	asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory");	\
516 	ia64_dv_serialize_data();						\
517 } while (0)
518 
519 #define ia64_ptri(addr, size)						\
520 	asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
521 
522 #define ia64_ptrd(addr, size)						\
523 	asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
524 
525 #define ia64_ttag(addr)							\
526 ({									  \
527 	__u64 ia64_intri_res;						   \
528 	asm volatile ("ttag %0=%1" : "=r"(ia64_intri_res) : "r" (addr));   \
529 	ia64_intri_res;							 \
530 })
531 
532 
533 /* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
534 
535 #define ia64_lfhint_none   0
536 #define ia64_lfhint_nt1    1
537 #define ia64_lfhint_nt2    2
538 #define ia64_lfhint_nta    3
539 
540 #define ia64_lfetch(lfhint, y)					\
541 ({								\
542         switch (lfhint) {					\
543         case ia64_lfhint_none:					\
544                 asm volatile ("lfetch [%0]" : : "r"(y));	\
545                 break;						\
546         case ia64_lfhint_nt1:					\
547                 asm volatile ("lfetch.nt1 [%0]" : : "r"(y));	\
548                 break;						\
549         case ia64_lfhint_nt2:					\
550                 asm volatile ("lfetch.nt2 [%0]" : : "r"(y));	\
551                 break;						\
552         case ia64_lfhint_nta:					\
553                 asm volatile ("lfetch.nta [%0]" : : "r"(y));	\
554                 break;						\
555         }							\
556 })
557 
558 #define ia64_lfetch_excl(lfhint, y)					\
559 ({									\
560         switch (lfhint) {						\
561         case ia64_lfhint_none:						\
562                 asm volatile ("lfetch.excl [%0]" :: "r"(y));		\
563                 break;							\
564         case ia64_lfhint_nt1:						\
565                 asm volatile ("lfetch.excl.nt1 [%0]" :: "r"(y));	\
566                 break;							\
567         case ia64_lfhint_nt2:						\
568                 asm volatile ("lfetch.excl.nt2 [%0]" :: "r"(y));	\
569                 break;							\
570         case ia64_lfhint_nta:						\
571                 asm volatile ("lfetch.excl.nta [%0]" :: "r"(y));	\
572                 break;							\
573         }								\
574 })
575 
576 #define ia64_lfetch_fault(lfhint, y)					\
577 ({									\
578         switch (lfhint) {						\
579         case ia64_lfhint_none:						\
580                 asm volatile ("lfetch.fault [%0]" : : "r"(y));		\
581                 break;							\
582         case ia64_lfhint_nt1:						\
583                 asm volatile ("lfetch.fault.nt1 [%0]" : : "r"(y));	\
584                 break;							\
585         case ia64_lfhint_nt2:						\
586                 asm volatile ("lfetch.fault.nt2 [%0]" : : "r"(y));	\
587                 break;							\
588         case ia64_lfhint_nta:						\
589                 asm volatile ("lfetch.fault.nta [%0]" : : "r"(y));	\
590                 break;							\
591         }								\
592 })
593 
594 #define ia64_lfetch_fault_excl(lfhint, y)				\
595 ({									\
596         switch (lfhint) {						\
597         case ia64_lfhint_none:						\
598                 asm volatile ("lfetch.fault.excl [%0]" :: "r"(y));	\
599                 break;							\
600         case ia64_lfhint_nt1:						\
601                 asm volatile ("lfetch.fault.excl.nt1 [%0]" :: "r"(y));	\
602                 break;							\
603         case ia64_lfhint_nt2:						\
604                 asm volatile ("lfetch.fault.excl.nt2 [%0]" :: "r"(y));	\
605                 break;							\
606         case ia64_lfhint_nta:						\
607                 asm volatile ("lfetch.fault.excl.nta [%0]" :: "r"(y));	\
608                 break;							\
609         }								\
610 })
611 
612 #define ia64_native_intrin_local_irq_restore(x)			\
613 do {								\
614 	asm volatile (";;   cmp.ne p6,p7=%0,r0;;"		\
615 		      "(p6) ssm psr.i;"				\
616 		      "(p7) rsm psr.i;;"			\
617 		      "(p6) srlz.d"				\
618 		      :: "r"((x)) : "p6", "p7", "memory");	\
619 } while (0)
620 
621 #endif /* _ASM_IA64_GCC_INTRIN_H */
622