1 #ifndef _ASM_IA64_PROCESSOR_H
2 #define _ASM_IA64_PROCESSOR_H
3 
4 /*
5  * Copyright (C) 1998-2004 Hewlett-Packard Co
6  *	David Mosberger-Tang <davidm@hpl.hp.com>
7  *	Stephane Eranian <eranian@hpl.hp.com>
8  * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
9  * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
10  *
11  * 11/24/98	S.Eranian	added ia64_set_iva()
12  * 12/03/99	D. Mosberger	implement thread_saved_pc() via kernel unwind API
13  * 06/16/00	A. Mallick	added csd/ssd/tssd for ia32 support
14  */
15 
16 
17 #include <asm/intrinsics.h>
18 #include <asm/kregs.h>
19 #include <asm/ptrace.h>
20 #include <asm/ustack.h>
21 
22 #define __ARCH_WANT_UNLOCKED_CTXSW
23 #define ARCH_HAS_PREFETCH_SWITCH_STACK
24 
25 #define IA64_NUM_PHYS_STACK_REG	96
26 #define IA64_NUM_DBG_REGS	8
27 
28 #define DEFAULT_MAP_BASE	__IA64_UL_CONST(0x2000000000000000)
29 #define DEFAULT_TASK_SIZE	__IA64_UL_CONST(0xa000000000000000)
30 
31 /*
32  * TASK_SIZE really is a mis-named.  It really is the maximum user
33  * space address (plus one).  On IA-64, there are five regions of 2TB
34  * each (assuming 8KB page size), for a total of 8TB of user virtual
35  * address space.
36  */
37 #define TASK_SIZE_OF(tsk)	((tsk)->thread.task_size)
38 #define TASK_SIZE       	TASK_SIZE_OF(current)
39 
40 /*
41  * This decides where the kernel will search for a free chunk of vm
42  * space during mmap's.
43  */
44 #define TASK_UNMAPPED_BASE	(current->thread.map_base)
45 
46 #define IA64_THREAD_FPH_VALID	(__IA64_UL(1) << 0)	/* floating-point high state valid? */
47 #define IA64_THREAD_DBG_VALID	(__IA64_UL(1) << 1)	/* debug registers valid? */
48 #define IA64_THREAD_PM_VALID	(__IA64_UL(1) << 2)	/* performance registers valid? */
49 #define IA64_THREAD_UAC_NOPRINT	(__IA64_UL(1) << 3)	/* don't log unaligned accesses */
50 #define IA64_THREAD_UAC_SIGBUS	(__IA64_UL(1) << 4)	/* generate SIGBUS on unaligned acc. */
51 #define IA64_THREAD_MIGRATION	(__IA64_UL(1) << 5)	/* require migration
52 							   sync at ctx sw */
53 #define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6)	/* don't log any fpswa faults */
54 #define IA64_THREAD_FPEMU_SIGFPE  (__IA64_UL(1) << 7)	/* send a SIGFPE for fpswa faults */
55 
56 #define IA64_THREAD_UAC_SHIFT	3
57 #define IA64_THREAD_UAC_MASK	(IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS)
58 #define IA64_THREAD_FPEMU_SHIFT	6
59 #define IA64_THREAD_FPEMU_MASK	(IA64_THREAD_FPEMU_NOPRINT | IA64_THREAD_FPEMU_SIGFPE)
60 
61 
62 /*
63  * This shift should be large enough to be able to represent 1000000000/itc_freq with good
64  * accuracy while being small enough to fit 10*1000000000<<IA64_NSEC_PER_CYC_SHIFT in 64 bits
65  * (this will give enough slack to represent 10 seconds worth of time as a scaled number).
66  */
67 #define IA64_NSEC_PER_CYC_SHIFT	30
68 
69 #ifndef __ASSEMBLY__
70 
71 #include <linux/cache.h>
72 #include <linux/compiler.h>
73 #include <linux/threads.h>
74 #include <linux/types.h>
75 
76 #include <asm/fpu.h>
77 #include <asm/page.h>
78 #include <asm/percpu.h>
79 #include <asm/rse.h>
80 #include <asm/unwind.h>
81 #include <linux/atomic.h>
82 #ifdef CONFIG_NUMA
83 #include <asm/nodedata.h>
84 #endif
85 
86 /* like above but expressed as bitfields for more efficient access: */
87 struct ia64_psr {
88 	__u64 reserved0 : 1;
89 	__u64 be : 1;
90 	__u64 up : 1;
91 	__u64 ac : 1;
92 	__u64 mfl : 1;
93 	__u64 mfh : 1;
94 	__u64 reserved1 : 7;
95 	__u64 ic : 1;
96 	__u64 i : 1;
97 	__u64 pk : 1;
98 	__u64 reserved2 : 1;
99 	__u64 dt : 1;
100 	__u64 dfl : 1;
101 	__u64 dfh : 1;
102 	__u64 sp : 1;
103 	__u64 pp : 1;
104 	__u64 di : 1;
105 	__u64 si : 1;
106 	__u64 db : 1;
107 	__u64 lp : 1;
108 	__u64 tb : 1;
109 	__u64 rt : 1;
110 	__u64 reserved3 : 4;
111 	__u64 cpl : 2;
112 	__u64 is : 1;
113 	__u64 mc : 1;
114 	__u64 it : 1;
115 	__u64 id : 1;
116 	__u64 da : 1;
117 	__u64 dd : 1;
118 	__u64 ss : 1;
119 	__u64 ri : 2;
120 	__u64 ed : 1;
121 	__u64 bn : 1;
122 	__u64 reserved4 : 19;
123 };
124 
125 union ia64_isr {
126 	__u64  val;
127 	struct {
128 		__u64 code : 16;
129 		__u64 vector : 8;
130 		__u64 reserved1 : 8;
131 		__u64 x : 1;
132 		__u64 w : 1;
133 		__u64 r : 1;
134 		__u64 na : 1;
135 		__u64 sp : 1;
136 		__u64 rs : 1;
137 		__u64 ir : 1;
138 		__u64 ni : 1;
139 		__u64 so : 1;
140 		__u64 ei : 2;
141 		__u64 ed : 1;
142 		__u64 reserved2 : 20;
143 	};
144 };
145 
146 union ia64_lid {
147 	__u64 val;
148 	struct {
149 		__u64  rv  : 16;
150 		__u64  eid : 8;
151 		__u64  id  : 8;
152 		__u64  ig  : 32;
153 	};
154 };
155 
156 union ia64_tpr {
157 	__u64 val;
158 	struct {
159 		__u64 ig0 : 4;
160 		__u64 mic : 4;
161 		__u64 rsv : 8;
162 		__u64 mmi : 1;
163 		__u64 ig1 : 47;
164 	};
165 };
166 
167 union ia64_itir {
168 	__u64 val;
169 	struct {
170 		__u64 rv3  :  2; /* 0-1 */
171 		__u64 ps   :  6; /* 2-7 */
172 		__u64 key  : 24; /* 8-31 */
173 		__u64 rv4  : 32; /* 32-63 */
174 	};
175 };
176 
177 union  ia64_rr {
178 	__u64 val;
179 	struct {
180 		__u64  ve	:  1;  /* enable hw walker */
181 		__u64  reserved0:  1;  /* reserved */
182 		__u64  ps	:  6;  /* log page size */
183 		__u64  rid	: 24;  /* region id */
184 		__u64  reserved1: 32;  /* reserved */
185 	};
186 };
187 
188 /*
189  * CPU type, hardware bug flags, and per-CPU state.  Frequently used
190  * state comes earlier:
191  */
192 struct cpuinfo_ia64 {
193 	unsigned int softirq_pending;
194 	unsigned long itm_delta;	/* # of clock cycles between clock ticks */
195 	unsigned long itm_next;		/* interval timer mask value to use for next clock tick */
196 	unsigned long nsec_per_cyc;	/* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */
197 	unsigned long unimpl_va_mask;	/* mask of unimplemented virtual address bits (from PAL) */
198 	unsigned long unimpl_pa_mask;	/* mask of unimplemented physical address bits (from PAL) */
199 	unsigned long itc_freq;		/* frequency of ITC counter */
200 	unsigned long proc_freq;	/* frequency of processor */
201 	unsigned long cyc_per_usec;	/* itc_freq/1000000 */
202 	unsigned long ptce_base;
203 	unsigned int ptce_count[2];
204 	unsigned int ptce_stride[2];
205 	struct task_struct *ksoftirqd;	/* kernel softirq daemon for this CPU */
206 
207 #ifdef CONFIG_SMP
208 	unsigned long loops_per_jiffy;
209 	int cpu;
210 	unsigned int socket_id;	/* physical processor socket id */
211 	unsigned short core_id;	/* core id */
212 	unsigned short thread_id; /* thread id */
213 	unsigned short num_log;	/* Total number of logical processors on
214 				 * this socket that were successfully booted */
215 	unsigned char cores_per_socket;	/* Cores per processor socket */
216 	unsigned char threads_per_core;	/* Threads per core */
217 #endif
218 
219 	/* CPUID-derived information: */
220 	unsigned long ppn;
221 	unsigned long features;
222 	unsigned char number;
223 	unsigned char revision;
224 	unsigned char model;
225 	unsigned char family;
226 	unsigned char archrev;
227 	char vendor[16];
228 	char *model_name;
229 
230 #ifdef CONFIG_NUMA
231 	struct ia64_node_data *node_data;
232 #endif
233 };
234 
235 DECLARE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
236 
237 /*
238  * The "local" data variable.  It refers to the per-CPU data of the currently executing
239  * CPU, much like "current" points to the per-task data of the currently executing task.
240  * Do not use the address of local_cpu_data, since it will be different from
241  * cpu_data(smp_processor_id())!
242  */
243 #define local_cpu_data		(&__ia64_per_cpu_var(ia64_cpu_info))
244 #define cpu_data(cpu)		(&per_cpu(ia64_cpu_info, cpu))
245 
246 extern void print_cpu_info (struct cpuinfo_ia64 *);
247 
248 typedef struct {
249 	unsigned long seg;
250 } mm_segment_t;
251 
252 #define SET_UNALIGN_CTL(task,value)								\
253 ({												\
254 	(task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK)			\
255 				| (((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK));	\
256 	0;											\
257 })
258 #define GET_UNALIGN_CTL(task,addr)								\
259 ({												\
260 	put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> IA64_THREAD_UAC_SHIFT,	\
261 		 (int __user *) (addr));							\
262 })
263 
264 #define SET_FPEMU_CTL(task,value)								\
265 ({												\
266 	(task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_FPEMU_MASK)		\
267 			  | (((value) << IA64_THREAD_FPEMU_SHIFT) & IA64_THREAD_FPEMU_MASK));	\
268 	0;											\
269 })
270 #define GET_FPEMU_CTL(task,addr)								\
271 ({												\
272 	put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> IA64_THREAD_FPEMU_SHIFT,	\
273 		 (int __user *) (addr));							\
274 })
275 
276 struct thread_struct {
277 	__u32 flags;			/* various thread flags (see IA64_THREAD_*) */
278 	/* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */
279 	__u8 on_ustack;			/* executing on user-stacks? */
280 	__u8 pad[3];
281 	__u64 ksp;			/* kernel stack pointer */
282 	__u64 map_base;			/* base address for get_unmapped_area() */
283 	__u64 task_size;		/* limit for task size */
284 	__u64 rbs_bot;			/* the base address for the RBS */
285 	int last_fph_cpu;		/* CPU that may hold the contents of f32-f127 */
286 
287 #ifdef CONFIG_PERFMON
288 	void *pfm_context;		     /* pointer to detailed PMU context */
289 	unsigned long pfm_needs_checking;    /* when >0, pending perfmon work on kernel exit */
290 # define INIT_THREAD_PM		.pfm_context =		NULL,     \
291 				.pfm_needs_checking =	0UL,
292 #else
293 # define INIT_THREAD_PM
294 #endif
295 	unsigned long dbr[IA64_NUM_DBG_REGS];
296 	unsigned long ibr[IA64_NUM_DBG_REGS];
297 	struct ia64_fpreg fph[96];	/* saved/loaded on demand */
298 };
299 
300 #define INIT_THREAD {						\
301 	.flags =	0,					\
302 	.on_ustack =	0,					\
303 	.ksp =		0,					\
304 	.map_base =	DEFAULT_MAP_BASE,			\
305 	.rbs_bot =	STACK_TOP - DEFAULT_USER_STACK_SIZE,	\
306 	.task_size =	DEFAULT_TASK_SIZE,			\
307 	.last_fph_cpu =  -1,					\
308 	INIT_THREAD_PM						\
309 	.dbr =		{0, },					\
310 	.ibr =		{0, },					\
311 	.fph =		{{{{0}}}, }				\
312 }
313 
314 #define start_thread(regs,new_ip,new_sp) do {							\
315 	regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL))		\
316 			 & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS));		\
317 	regs->cr_iip = new_ip;									\
318 	regs->ar_rsc = 0xf;		/* eager mode, privilege level 3 */			\
319 	regs->ar_rnat = 0;									\
320 	regs->ar_bspstore = current->thread.rbs_bot;						\
321 	regs->ar_fpsr = FPSR_DEFAULT;								\
322 	regs->loadrs = 0;									\
323 	regs->r8 = get_dumpable(current->mm);	/* set "don't zap registers" flag */		\
324 	regs->r12 = new_sp - 16;	/* allocate 16 byte scratch area */			\
325 	if (unlikely(get_dumpable(current->mm) != SUID_DUMP_USER)) {	\
326 		/*										\
327 		 * Zap scratch regs to avoid leaking bits between processes with different	\
328 		 * uid/privileges.								\
329 		 */										\
330 		regs->ar_pfs = 0; regs->b0 = 0; regs->pr = 0;					\
331 		regs->r1 = 0; regs->r9  = 0; regs->r11 = 0; regs->r13 = 0; regs->r15 = 0;	\
332 	}											\
333 } while (0)
334 
335 /* Forward declarations, a strange C thing... */
336 struct mm_struct;
337 struct task_struct;
338 
339 /*
340  * Free all resources held by a thread. This is called after the
341  * parent of DEAD_TASK has collected the exit status of the task via
342  * wait().
343  */
344 #define release_thread(dead_task)
345 
346 /* Prepare to copy thread state - unlazy all lazy status */
347 #define prepare_to_copy(tsk)	do { } while (0)
348 
349 /*
350  * This is the mechanism for creating a new kernel thread.
351  *
352  * NOTE 1: Only a kernel-only process (ie the swapper or direct
353  * descendants who haven't done an "execve()") should use this: it
354  * will work within a system call from a "real" process, but the
355  * process memory space will not be free'd until both the parent and
356  * the child have exited.
357  *
358  * NOTE 2: This MUST NOT be an inlined function.  Otherwise, we get
359  * into trouble in init/main.c when the child thread returns to
360  * do_basic_setup() and the timing is such that free_initmem() has
361  * been called already.
362  */
363 extern pid_t kernel_thread (int (*fn)(void *), void *arg, unsigned long flags);
364 
365 /* Get wait channel for task P.  */
366 extern unsigned long get_wchan (struct task_struct *p);
367 
368 /* Return instruction pointer of blocked task TSK.  */
369 #define KSTK_EIP(tsk)					\
370   ({							\
371 	struct pt_regs *_regs = task_pt_regs(tsk);	\
372 	_regs->cr_iip + ia64_psr(_regs)->ri;		\
373   })
374 
375 /* Return stack pointer of blocked task TSK.  */
376 #define KSTK_ESP(tsk)  ((tsk)->thread.ksp)
377 
378 extern void ia64_getreg_unknown_kr (void);
379 extern void ia64_setreg_unknown_kr (void);
380 
381 #define ia64_get_kr(regnum)					\
382 ({								\
383 	unsigned long r = 0;					\
384 								\
385 	switch (regnum) {					\
386 	    case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break;	\
387 	    case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break;	\
388 	    case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break;	\
389 	    case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break;	\
390 	    case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break;	\
391 	    case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break;	\
392 	    case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break;	\
393 	    case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break;	\
394 	    default: ia64_getreg_unknown_kr(); break;		\
395 	}							\
396 	r;							\
397 })
398 
399 #define ia64_set_kr(regnum, r) 					\
400 ({								\
401 	switch (regnum) {					\
402 	    case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break;	\
403 	    case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break;	\
404 	    case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break;	\
405 	    case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break;	\
406 	    case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break;	\
407 	    case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break;	\
408 	    case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break;	\
409 	    case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break;	\
410 	    default: ia64_setreg_unknown_kr(); break;		\
411 	}							\
412 })
413 
414 /*
415  * The following three macros can't be inline functions because we don't have struct
416  * task_struct at this point.
417  */
418 
419 /*
420  * Return TRUE if task T owns the fph partition of the CPU we're running on.
421  * Must be called from code that has preemption disabled.
422  */
423 #define ia64_is_local_fpu_owner(t)								\
424 ({												\
425 	struct task_struct *__ia64_islfo_task = (t);						\
426 	(__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id()				\
427 	 && __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER));	\
428 })
429 
430 /*
431  * Mark task T as owning the fph partition of the CPU we're running on.
432  * Must be called from code that has preemption disabled.
433  */
434 #define ia64_set_local_fpu_owner(t) do {						\
435 	struct task_struct *__ia64_slfo_task = (t);					\
436 	__ia64_slfo_task->thread.last_fph_cpu = smp_processor_id();			\
437 	ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) __ia64_slfo_task);		\
438 } while (0)
439 
440 /* Mark the fph partition of task T as being invalid on all CPUs.  */
441 #define ia64_drop_fpu(t)	((t)->thread.last_fph_cpu = -1)
442 
443 extern void __ia64_init_fpu (void);
444 extern void __ia64_save_fpu (struct ia64_fpreg *fph);
445 extern void __ia64_load_fpu (struct ia64_fpreg *fph);
446 extern void ia64_save_debug_regs (unsigned long *save_area);
447 extern void ia64_load_debug_regs (unsigned long *save_area);
448 
449 #define ia64_fph_enable()	do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
450 #define ia64_fph_disable()	do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
451 
452 /* load fp 0.0 into fph */
453 static inline void
ia64_init_fpu(void)454 ia64_init_fpu (void) {
455 	ia64_fph_enable();
456 	__ia64_init_fpu();
457 	ia64_fph_disable();
458 }
459 
460 /* save f32-f127 at FPH */
461 static inline void
ia64_save_fpu(struct ia64_fpreg * fph)462 ia64_save_fpu (struct ia64_fpreg *fph) {
463 	ia64_fph_enable();
464 	__ia64_save_fpu(fph);
465 	ia64_fph_disable();
466 }
467 
468 /* load f32-f127 from FPH */
469 static inline void
ia64_load_fpu(struct ia64_fpreg * fph)470 ia64_load_fpu (struct ia64_fpreg *fph) {
471 	ia64_fph_enable();
472 	__ia64_load_fpu(fph);
473 	ia64_fph_disable();
474 }
475 
476 static inline __u64
ia64_clear_ic(void)477 ia64_clear_ic (void)
478 {
479 	__u64 psr;
480 	psr = ia64_getreg(_IA64_REG_PSR);
481 	ia64_stop();
482 	ia64_rsm(IA64_PSR_I | IA64_PSR_IC);
483 	ia64_srlz_i();
484 	return psr;
485 }
486 
487 /*
488  * Restore the psr.
489  */
490 static inline void
ia64_set_psr(__u64 psr)491 ia64_set_psr (__u64 psr)
492 {
493 	ia64_stop();
494 	ia64_setreg(_IA64_REG_PSR_L, psr);
495 	ia64_srlz_i();
496 }
497 
498 /*
499  * Insert a translation into an instruction and/or data translation
500  * register.
501  */
502 static inline void
ia64_itr(__u64 target_mask,__u64 tr_num,__u64 vmaddr,__u64 pte,__u64 log_page_size)503 ia64_itr (__u64 target_mask, __u64 tr_num,
504 	  __u64 vmaddr, __u64 pte,
505 	  __u64 log_page_size)
506 {
507 	ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
508 	ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
509 	ia64_stop();
510 	if (target_mask & 0x1)
511 		ia64_itri(tr_num, pte);
512 	if (target_mask & 0x2)
513 		ia64_itrd(tr_num, pte);
514 }
515 
516 /*
517  * Insert a translation into the instruction and/or data translation
518  * cache.
519  */
520 static inline void
ia64_itc(__u64 target_mask,__u64 vmaddr,__u64 pte,__u64 log_page_size)521 ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,
522 	  __u64 log_page_size)
523 {
524 	ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
525 	ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
526 	ia64_stop();
527 	/* as per EAS2.6, itc must be the last instruction in an instruction group */
528 	if (target_mask & 0x1)
529 		ia64_itci(pte);
530 	if (target_mask & 0x2)
531 		ia64_itcd(pte);
532 }
533 
534 /*
535  * Purge a range of addresses from instruction and/or data translation
536  * register(s).
537  */
538 static inline void
ia64_ptr(__u64 target_mask,__u64 vmaddr,__u64 log_size)539 ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size)
540 {
541 	if (target_mask & 0x1)
542 		ia64_ptri(vmaddr, (log_size << 2));
543 	if (target_mask & 0x2)
544 		ia64_ptrd(vmaddr, (log_size << 2));
545 }
546 
547 /* Set the interrupt vector address.  The address must be suitably aligned (32KB).  */
548 static inline void
ia64_set_iva(void * ivt_addr)549 ia64_set_iva (void *ivt_addr)
550 {
551 	ia64_setreg(_IA64_REG_CR_IVA, (__u64) ivt_addr);
552 	ia64_srlz_i();
553 }
554 
555 /* Set the page table address and control bits.  */
556 static inline void
ia64_set_pta(__u64 pta)557 ia64_set_pta (__u64 pta)
558 {
559 	/* Note: srlz.i implies srlz.d */
560 	ia64_setreg(_IA64_REG_CR_PTA, pta);
561 	ia64_srlz_i();
562 }
563 
564 static inline void
ia64_eoi(void)565 ia64_eoi (void)
566 {
567 	ia64_setreg(_IA64_REG_CR_EOI, 0);
568 	ia64_srlz_d();
569 }
570 
571 #define cpu_relax()	ia64_hint(ia64_hint_pause)
572 
573 static inline int
ia64_get_irr(unsigned int vector)574 ia64_get_irr(unsigned int vector)
575 {
576 	unsigned int reg = vector / 64;
577 	unsigned int bit = vector % 64;
578 	u64 irr;
579 
580 	switch (reg) {
581 	case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break;
582 	case 1: irr = ia64_getreg(_IA64_REG_CR_IRR1); break;
583 	case 2: irr = ia64_getreg(_IA64_REG_CR_IRR2); break;
584 	case 3: irr = ia64_getreg(_IA64_REG_CR_IRR3); break;
585 	}
586 
587 	return test_bit(bit, &irr);
588 }
589 
590 static inline void
ia64_set_lrr0(unsigned long val)591 ia64_set_lrr0 (unsigned long val)
592 {
593 	ia64_setreg(_IA64_REG_CR_LRR0, val);
594 	ia64_srlz_d();
595 }
596 
597 static inline void
ia64_set_lrr1(unsigned long val)598 ia64_set_lrr1 (unsigned long val)
599 {
600 	ia64_setreg(_IA64_REG_CR_LRR1, val);
601 	ia64_srlz_d();
602 }
603 
604 
605 /*
606  * Given the address to which a spill occurred, return the unat bit
607  * number that corresponds to this address.
608  */
609 static inline __u64
ia64_unat_pos(void * spill_addr)610 ia64_unat_pos (void *spill_addr)
611 {
612 	return ((__u64) spill_addr >> 3) & 0x3f;
613 }
614 
615 /*
616  * Set the NaT bit of an integer register which was spilled at address
617  * SPILL_ADDR.  UNAT is the mask to be updated.
618  */
619 static inline void
ia64_set_unat(__u64 * unat,void * spill_addr,unsigned long nat)620 ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat)
621 {
622 	__u64 bit = ia64_unat_pos(spill_addr);
623 	__u64 mask = 1UL << bit;
624 
625 	*unat = (*unat & ~mask) | (nat << bit);
626 }
627 
628 /*
629  * Return saved PC of a blocked thread.
630  * Note that the only way T can block is through a call to schedule() -> switch_to().
631  */
632 static inline unsigned long
thread_saved_pc(struct task_struct * t)633 thread_saved_pc (struct task_struct *t)
634 {
635 	struct unw_frame_info info;
636 	unsigned long ip;
637 
638 	unw_init_from_blocked_task(&info, t);
639 	if (unw_unwind(&info) < 0)
640 		return 0;
641 	unw_get_ip(&info, &ip);
642 	return ip;
643 }
644 
645 /*
646  * Get the current instruction/program counter value.
647  */
648 #define current_text_addr() \
649 	({ void *_pc; _pc = (void *)ia64_getreg(_IA64_REG_IP); _pc; })
650 
651 static inline __u64
ia64_get_ivr(void)652 ia64_get_ivr (void)
653 {
654 	__u64 r;
655 	ia64_srlz_d();
656 	r = ia64_getreg(_IA64_REG_CR_IVR);
657 	ia64_srlz_d();
658 	return r;
659 }
660 
661 static inline void
ia64_set_dbr(__u64 regnum,__u64 value)662 ia64_set_dbr (__u64 regnum, __u64 value)
663 {
664 	__ia64_set_dbr(regnum, value);
665 #ifdef CONFIG_ITANIUM
666 	ia64_srlz_d();
667 #endif
668 }
669 
670 static inline __u64
ia64_get_dbr(__u64 regnum)671 ia64_get_dbr (__u64 regnum)
672 {
673 	__u64 retval;
674 
675 	retval = __ia64_get_dbr(regnum);
676 #ifdef CONFIG_ITANIUM
677 	ia64_srlz_d();
678 #endif
679 	return retval;
680 }
681 
682 static inline __u64
ia64_rotr(__u64 w,__u64 n)683 ia64_rotr (__u64 w, __u64 n)
684 {
685 	return (w >> n) | (w << (64 - n));
686 }
687 
688 #define ia64_rotl(w,n)	ia64_rotr((w), (64) - (n))
689 
690 /*
691  * Take a mapped kernel address and return the equivalent address
692  * in the region 7 identity mapped virtual area.
693  */
694 static inline void *
ia64_imva(void * addr)695 ia64_imva (void *addr)
696 {
697 	void *result;
698 	result = (void *) ia64_tpa(addr);
699 	return __va(result);
700 }
701 
702 #define ARCH_HAS_PREFETCH
703 #define ARCH_HAS_PREFETCHW
704 #define ARCH_HAS_SPINLOCK_PREFETCH
705 #define PREFETCH_STRIDE			L1_CACHE_BYTES
706 
707 static inline void
prefetch(const void * x)708 prefetch (const void *x)
709 {
710 	 ia64_lfetch(ia64_lfhint_none, x);
711 }
712 
713 static inline void
prefetchw(const void * x)714 prefetchw (const void *x)
715 {
716 	ia64_lfetch_excl(ia64_lfhint_none, x);
717 }
718 
719 #define spin_lock_prefetch(x)	prefetchw(x)
720 
721 extern unsigned long boot_option_idle_override;
722 
723 enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_FORCE_MWAIT,
724 			 IDLE_NOMWAIT, IDLE_POLL};
725 
726 void cpu_idle_wait(void);
727 void default_idle(void);
728 
729 #define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
730 
731 #endif /* !__ASSEMBLY__ */
732 
733 #endif /* _ASM_IA64_PROCESSOR_H */
734