1 /* $Id: processor.h,v 1.80.2.1 2002/02/02 02:11:52 kanoj Exp $
2  * include/asm-sparc64/processor.h
3  *
4  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5  */
6 
7 #ifndef __ASM_SPARC64_PROCESSOR_H
8 #define __ASM_SPARC64_PROCESSOR_H
9 
10 /*
11  * Sparc64 implementation of macro that returns current
12  * instruction pointer ("program counter").
13  */
14 #define current_text_addr() ({ void *pc; __asm__("rd %%pc, %0" : "=r" (pc)); pc; })
15 
16 #include <linux/config.h>
17 #include <asm/asi.h>
18 #include <asm/a.out.h>
19 #include <asm/pstate.h>
20 #include <asm/ptrace.h>
21 #include <asm/signal.h>
22 #include <asm/segment.h>
23 #include <asm/page.h>
24 #include <asm/delay.h>
25 
26 /* Bus types */
27 #define EISA_bus 0
28 #define EISA_bus__is_a_macro /* for versions in ksyms.c */
29 #define MCA_bus 0
30 #define MCA_bus__is_a_macro /* for versions in ksyms.c */
31 
32 /* The sparc has no problems with write protection */
33 #define wp_works_ok 1
34 #define wp_works_ok__is_a_macro /* for versions in ksyms.c */
35 
36 /*
37  * User lives in his very own context, and cannot reference us. Note
38  * that TASK_SIZE is a misnomer, it really gives maximum user virtual
39  * address that the kernel will allocate out.
40  */
41 #define VA_BITS		44
42 #ifndef __ASSEMBLY__
43 #define VPTE_SIZE	(1UL << (VA_BITS - PAGE_SHIFT + 3))
44 #else
45 #define VPTE_SIZE	(1 << (VA_BITS - PAGE_SHIFT + 3))
46 #endif
47 #define TASK_SIZE	((unsigned long)-VPTE_SIZE)
48 
49 /*
50  * The vpte base must be able to hold the entire vpte, half
51  * of which lives above, and half below, the base. And it
52  * is placed as close to the highest address range as possible.
53  */
54 #define VPTE_BASE_SPITFIRE	(-(VPTE_SIZE/2))
55 #if 1
56 #define VPTE_BASE_CHEETAH	VPTE_BASE_SPITFIRE
57 #else
58 #define VPTE_BASE_CHEETAH	0xffe0000000000000
59 #endif
60 
61 #ifndef __ASSEMBLY__
62 
63 #define NSWINS		7
64 
65 typedef struct {
66 	unsigned char seg;
67 } mm_segment_t;
68 
69 /* The Sparc processor specific thread struct. */
70 struct thread_struct {
71 	/* D$ line 1 */
72 	unsigned long ksp __attribute__ ((aligned(16)));
73 	unsigned char wstate, cwp, flags;
74 	mm_segment_t current_ds;
75 	unsigned char w_saved, fpdepth, fault_code, use_blkcommit;
76 	unsigned long fault_address;
77 	unsigned char fpsaved[7];
78 	unsigned char __pad2;
79 
80 	/* D$ line 2, 3, 4 */
81 	struct pt_regs *kregs;
82 	unsigned long *utraps;
83 	unsigned long gsr[7];
84 	unsigned long xfsr[7];
85 
86 #ifdef CONFIG_DEBUG_SPINLOCK
87 	/* How many spinlocks held by this thread.
88 	 * Used with spin lock debugging to catch tasks
89 	 * sleeping illegally with locks held.
90 	 */
91 	int smp_lock_count;
92 	unsigned int smp_lock_pc;
93 #endif
94 
95 	struct reg_window reg_window[NSWINS];
96 	unsigned long rwbuf_stkptrs[NSWINS];
97 
98 	/* Performance counter state */
99 	u64 *user_cntd0, *user_cntd1;
100 	u64 kernel_cntd0, kernel_cntd1;
101 	u64 pcr_reg;
102 };
103 
104 #endif /* !(__ASSEMBLY__) */
105 
106 #define SPARC_FLAG_UNALIGNED    0x01    /* is allowed to do unaligned accesses	*/
107 #define SPARC_FLAG_NEWSIGNALS   0x02    /* task wants new-style signals		*/
108 #define SPARC_FLAG_32BIT        0x04    /* task is older 32-bit binary		*/
109 #define SPARC_FLAG_NEWCHILD     0x08    /* task is just-spawned child process	*/
110 #define SPARC_FLAG_PERFCTR	0x10    /* task has performance counters active	*/
111 #define SPARC_FLAG_ABI_PENDING	0x20    /* change of SPARC_FLAG_32BIT pending	*/
112 #define SPARC_FLAG_SYS_SUCCESS	0x40    /* Force successful syscall return.	*/
113 
114 #define FAULT_CODE_WRITE	0x01	/* Write access, implies D-TLB		*/
115 #define FAULT_CODE_DTLB		0x02	/* Miss happened in D-TLB		*/
116 #define FAULT_CODE_ITLB		0x04	/* Miss happened in I-TLB		*/
117 #define FAULT_CODE_WINFIXUP	0x08	/* Miss happened during spill/fill	*/
118 
119 #ifndef CONFIG_DEBUG_SPINLOCK
120 #define INIT_THREAD  {					\
121 /* ksp, wstate, cwp, flags, current_ds, */ 		\
122    0,   0,      0,   0,     KERNEL_DS,			\
123 /* w_saved, fpdepth, fault_code, use_blkcommit, */	\
124    0,       0,       0,          0,			\
125 /* fault_address, fpsaved, __pad2, kregs, */		\
126    0,             { 0 },   0,      0,			\
127 /* utraps, gsr,   xfsr, */				\
128    0,	   { 0 }, { 0 },				\
129 /* reg_window */					\
130    { { { 0, }, { 0, } }, }, 				\
131 /* rwbuf_stkptrs */					\
132    { 0, 0, 0, 0, 0, 0, 0, },				\
133 /* user_cntd0, user_cndd1, kernel_cntd0, kernel_cntd0, pcr_reg */ \
134    0,          0,          0,		 0,            0, \
135 }
136 #else /* CONFIG_DEBUG_SPINLOCK */
137 #define INIT_THREAD  {					\
138 /* ksp, wstate, cwp, flags, current_ds, */ 		\
139    0,   0,      0,   0,     KERNEL_DS,			\
140 /* w_saved, fpdepth, fault_code, use_blkcommit, */	\
141    0,       0,       0,          0,			\
142 /* fault_address, fpsaved, __pad2, kregs, */		\
143    0,             { 0 },   0,      0,			\
144 /* utraps, gsr,   xfsr,  smp_lock_count, smp_lock_pc, */\
145    0,	   { 0 }, { 0 }, 0,		 0,		\
146 /* reg_window */					\
147    { { { 0, }, { 0, } }, }, 				\
148 /* rwbuf_stkptrs */					\
149    { 0, 0, 0, 0, 0, 0, 0, },				\
150 /* user_cntd0, user_cndd1, kernel_cntd0, kernel_cntd0, pcr_reg */ \
151    0,          0,          0,		 0,            0, \
152 }
153 #endif /* !(CONFIG_DEBUG_SPINLOCK) */
154 
155 #ifdef __KERNEL__
156 #if PAGE_SHIFT == 13
157 #define THREAD_SIZE (2*PAGE_SIZE)
158 #define THREAD_SHIFT (PAGE_SHIFT + 1)
159 #else /* PAGE_SHIFT == 13 */
160 #define THREAD_SIZE PAGE_SIZE
161 #define THREAD_SHIFT PAGE_SHIFT
162 #endif /* PAGE_SHIFT == 13 */
163 #endif /* __KERNEL__ */
164 
165 #ifndef __ASSEMBLY__
166 
167 /* Return saved PC of a blocked thread. */
thread_saved_pc(struct thread_struct * t)168 extern __inline__ unsigned long thread_saved_pc(struct thread_struct *t)
169 {
170 	unsigned long ret = 0xdeadbeefUL;
171 
172 	if (t->ksp) {
173 		unsigned long *sp;
174 		sp = (unsigned long *)(t->ksp + STACK_BIAS);
175 		if (((unsigned long)sp & (sizeof(long) - 1)) == 0UL &&
176 		    sp[14]) {
177 			unsigned long *fp;
178 			fp = (unsigned long *)(sp[14] + STACK_BIAS);
179 			if (((unsigned long)fp & (sizeof(long) - 1)) == 0UL)
180 				ret = fp[15];
181 		}
182 	}
183 	return ret;
184 }
185 
186 /* On Uniprocessor, even in RMO processes see TSO semantics */
187 #ifdef CONFIG_SMP
188 #define TSTATE_INITIAL_MM	TSTATE_TSO
189 #else
190 #define TSTATE_INITIAL_MM	TSTATE_RMO
191 #endif
192 
193 /* Do necessary setup to start up a newly executed thread. */
194 #define start_thread(regs, pc, sp) \
195 do { \
196 	regs->tstate = (regs->tstate & (TSTATE_CWP)) | (TSTATE_INITIAL_MM|TSTATE_IE) | (ASI_PNF << 24); \
197 	regs->tpc = ((pc & (~3)) - 4); \
198 	regs->tnpc = regs->tpc + 4; \
199 	regs->y = 0; \
200 	current->thread.wstate = (1 << 3); \
201 	if (current->thread.utraps) { \
202 		if (*(current->thread.utraps) < 2) \
203 			kfree (current->thread.utraps); \
204 		else \
205 			(*(current->thread.utraps))--; \
206 		current->thread.utraps = NULL; \
207 	} \
208 	__asm__ __volatile__( \
209 	"stx		%%g0, [%0 + %2 + 0x00]\n\t" \
210 	"stx		%%g0, [%0 + %2 + 0x08]\n\t" \
211 	"stx		%%g0, [%0 + %2 + 0x10]\n\t" \
212 	"stx		%%g0, [%0 + %2 + 0x18]\n\t" \
213 	"stx		%%g0, [%0 + %2 + 0x20]\n\t" \
214 	"stx		%%g0, [%0 + %2 + 0x28]\n\t" \
215 	"stx		%%g0, [%0 + %2 + 0x30]\n\t" \
216 	"stx		%%g0, [%0 + %2 + 0x38]\n\t" \
217 	"stx		%%g0, [%0 + %2 + 0x40]\n\t" \
218 	"stx		%%g0, [%0 + %2 + 0x48]\n\t" \
219 	"stx		%%g0, [%0 + %2 + 0x50]\n\t" \
220 	"stx		%%g0, [%0 + %2 + 0x58]\n\t" \
221 	"stx		%%g0, [%0 + %2 + 0x60]\n\t" \
222 	"stx		%%g0, [%0 + %2 + 0x68]\n\t" \
223 	"stx		%1,   [%0 + %2 + 0x70]\n\t" \
224 	"stx		%%g0, [%0 + %2 + 0x78]\n\t" \
225 	"wrpr		%%g0, (1 << 3), %%wstate\n\t" \
226 	: \
227 	: "r" (regs), "r" (sp - sizeof(struct reg_window) - STACK_BIAS), \
228 	  "i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))); \
229 } while(0)
230 
231 #define start_thread32(regs, pc, sp) \
232 do { \
233 	pc &= 0x00000000ffffffffUL; \
234 	sp &= 0x00000000ffffffffUL; \
235 \
236 	regs->tstate = (regs->tstate & (TSTATE_CWP))|(TSTATE_INITIAL_MM|TSTATE_IE|TSTATE_AM); \
237 	regs->tpc = ((pc & (~3)) - 4); \
238 	regs->tnpc = regs->tpc + 4; \
239 	regs->y = 0; \
240 	current->thread.wstate = (2 << 3); \
241 	if (current->thread.utraps) { \
242 		if (*(current->thread.utraps) < 2) \
243 			kfree (current->thread.utraps); \
244 		else \
245 			(*(current->thread.utraps))--; \
246 		current->thread.utraps = NULL; \
247 	} \
248 	__asm__ __volatile__( \
249 	"stx		%%g0, [%0 + %2 + 0x00]\n\t" \
250 	"stx		%%g0, [%0 + %2 + 0x08]\n\t" \
251 	"stx		%%g0, [%0 + %2 + 0x10]\n\t" \
252 	"stx		%%g0, [%0 + %2 + 0x18]\n\t" \
253 	"stx		%%g0, [%0 + %2 + 0x20]\n\t" \
254 	"stx		%%g0, [%0 + %2 + 0x28]\n\t" \
255 	"stx		%%g0, [%0 + %2 + 0x30]\n\t" \
256 	"stx		%%g0, [%0 + %2 + 0x38]\n\t" \
257 	"stx		%%g0, [%0 + %2 + 0x40]\n\t" \
258 	"stx		%%g0, [%0 + %2 + 0x48]\n\t" \
259 	"stx		%%g0, [%0 + %2 + 0x50]\n\t" \
260 	"stx		%%g0, [%0 + %2 + 0x58]\n\t" \
261 	"stx		%%g0, [%0 + %2 + 0x60]\n\t" \
262 	"stx		%%g0, [%0 + %2 + 0x68]\n\t" \
263 	"stx		%1,   [%0 + %2 + 0x70]\n\t" \
264 	"stx		%%g0, [%0 + %2 + 0x78]\n\t" \
265 	"wrpr		%%g0, (2 << 3), %%wstate\n\t" \
266 	: \
267 	: "r" (regs), "r" (sp - sizeof(struct reg_window32)), \
268 	  "i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))); \
269 } while(0)
270 
271 /* Free all resources held by a thread. */
272 #define release_thread(tsk)		do { } while(0)
273 
274 extern pid_t arch_kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
275 
276 #define copy_segments(tsk, mm)		do { } while (0)
277 #define release_segments(mm)		do { } while (0)
278 
279 #define get_wchan(__TSK) \
280 ({	extern void scheduling_functions_start_here(void); \
281 	extern void scheduling_functions_end_here(void); \
282 	unsigned long pc, fp, bias = 0; \
283 	unsigned long task_base = (unsigned long) (__TSK); \
284 	struct reg_window *rw; \
285         unsigned long __ret = 0; \
286 	int count = 0; \
287 	if (!(__TSK) || (__TSK) == current || \
288             (__TSK)->state == TASK_RUNNING) \
289 		goto __out; \
290 	bias = STACK_BIAS; \
291 	fp = (__TSK)->thread.ksp + bias; \
292 	do { \
293 		/* Bogus frame pointer? */ \
294 		if (fp < (task_base + sizeof(struct task_struct)) || \
295 		    fp >= (task_base + THREAD_SIZE)) \
296 			break; \
297 		rw = (struct reg_window *) fp; \
298 		pc = rw->ins[7]; \
299 		if (pc < ((unsigned long) scheduling_functions_start_here) || \
300 		    pc >= ((unsigned long) scheduling_functions_end_here)) { \
301 			__ret = pc; \
302 			goto __out; \
303 		} \
304 		fp = rw->ins[6] + bias; \
305 	} while (++count < 16); \
306 __out:	__ret; \
307 })
308 
309 #define KSTK_EIP(tsk)  ((tsk)->thread.kregs->tpc)
310 #define KSTK_ESP(tsk)  ((tsk)->thread.kregs->u_regs[UREG_FP])
311 
312 #ifdef __KERNEL__
313 /* Allocation and freeing of task_struct and kernel stack. */
314 #if PAGE_SHIFT == 13
315 #define alloc_task_struct()   ((struct task_struct *)__get_free_pages(GFP_KERNEL, 1))
316 #define free_task_struct(tsk) free_pages((unsigned long)(tsk),1)
317 #else /* PAGE_SHIFT == 13 */
318 #define alloc_task_struct()   ((struct task_struct *)__get_free_pages(GFP_KERNEL, 0))
319 #define free_task_struct(tsk) free_pages((unsigned long)(tsk),0)
320 #endif /* PAGE_SHIFT == 13 */
321 #define get_task_struct(tsk)      atomic_inc(&virt_to_page(tsk)->count)
322 
323 #define init_task	(init_task_union.task)
324 #define init_stack	(init_task_union.stack)
325 
326 #define cpu_relax()	do { udelay(1 + smp_processor_id()); barrier(); } while (0)
327 
328 #endif /* __KERNEL__ */
329 
330 #endif /* !(__ASSEMBLY__) */
331 
332 #endif /* !(__ASM_SPARC64_PROCESSOR_H) */
333