1/* $Id: entry.S,v 1.141 2001/12/05 23:56:32 davem Exp $
2 * arch/sparc64/kernel/entry.S:  Sparc64 trap low-level entry points.
3 *
4 * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost        (ecd@skynet.be)
6 * Copyright (C) 1996 Miguel de Icaza      (miguel@nuclecu.unam.mx)
7 * Copyright (C) 1996,98,99 Jakub Jelinek  (jj@sunsite.mff.cuni.cz)
8 */
9
10#include <linux/config.h>
11#include <linux/errno.h>
12
13#include <asm/head.h>
14#include <asm/asi.h>
15#include <asm/smp.h>
16#include <asm/ptrace.h>
17#include <asm/page.h>
18#include <asm/signal.h>
19#include <asm/pgtable.h>
20#include <asm/processor.h>
21#include <asm/visasm.h>
22#include <asm/estate.h>
23#include <asm/auxio.h>
24#include <asm/sfafsr.h>
25
26/* #define SYSCALL_TRACING	1 */
27
28#define curptr      g6
29
30#define NR_SYSCALLS 256      /* Each OS is different... */
31
32	.text
33	.align		32
34
35	.globl		sparc64_vpte_patchme1
36	.globl		sparc64_vpte_patchme2
37/*
38 * On a second level vpte miss, check whether the original fault is to the OBP
39 * range (note that this is only possible for instruction miss, data misses to
40 * obp range do not use vpte). If so, go back directly to the faulting address.
41 * This is because we want to read the tpc, otherwise we have no way of knowing
42 * the 8k aligned faulting address if we are using >8k kernel pagesize. This also
43 * ensures no vpte range addresses are dropped into tlb while obp is executing
44 * (see inherit_locked_prom_mappings() rant).
45 */
46sparc64_vpte_nucleus:
47	mov		0xf, %g5
48	sllx		%g5, 28, %g5			! Load 0xf0000000
49	cmp		%g4, %g5			! Is addr >= LOW_OBP_ADDRESS?
50	blu,pn		%xcc, sparc64_vpte_patchme1
51	 mov		0x1, %g5
52	sllx		%g5, 32, %g5			! Load 0x100000000
53	cmp		%g4, %g5			! Is addr < HI_OBP_ADDRESS?
54	blu,pn		%xcc, obp_iaddr_patch
55	 nop
56sparc64_vpte_patchme1:
57	sethi		%hi(0), %g5			! This has to be patched
58sparc64_vpte_patchme2:
59	or		%g5, %lo(0), %g5		! This is patched too
60	ba,pt		%xcc, sparc64_kpte_continue	! Part of dtlb_backend
61	 add		%g1, %g1, %g1			! Finish PMD offset adjustment
62
63vpte_noent:
64	mov		TLB_SFSR, %g1			! Restore %g1 value
65	stxa		%g4, [%g1 + %g1] ASI_DMMU	! Restore previous TAG_ACCESS
66	done						! Slick trick
67
68	.globl		obp_iaddr_patch
69	.globl		obp_daddr_patch
70
71obp_iaddr_patch:
72	sethi		%hi(0), %g5			! This and following is patched
73	or		%g5, %lo(0), %g5		! g5 now holds obp pmd base physaddr
74	wrpr		%g0, 1, %tl			! Behave as if we are at TL0
75	rdpr		%tpc, %g4			! Find original faulting iaddr
76	srlx		%g4, 13, %g4			! Throw out context bits
77	sllx		%g4, 13, %g4			! g4 has vpn + ctx0 now
78	mov		TLB_SFSR, %g1			! Restore %g1 value
79	stxa		%g4, [%g1 + %g1] ASI_IMMU	! Restore previous TAG_ACCESS
80	srlx		%g4, 23, %g6			! Find pmd number
81	and		%g6, 0x7ff, %g6			! Find pmd number
82	sllx		%g6, 2, %g6			! Find pmd offset
83	lduwa		[%g5 + %g6] ASI_PHYS_USE_EC, %g5! Load pmd, ie pagetable physaddr
84	brz,pn		%g5, longpath			! Kill the PROM ? :-)
85	 sllx		%g5, 11, %g5			! Shift into place
86	srlx		%g4, 13, %g6			! find pte number in pagetable
87	and		%g6, 0x3ff, %g6			! find pte number in pagetable
88	sllx		%g6, 3, %g6			! find pte offset in pagetable
89	ldxa		[%g5 + %g6] ASI_PHYS_USE_EC, %g5! Load pte
90	brgez,pn	%g5, longpath			! Kill the PROM ? :-)
91	 nop
92	stxa		%g5, [%g0] ASI_ITLB_DATA_IN	! put into tlb
93	retry						! go back to original fault
94
95obp_daddr_patch:
96	sethi		%hi(0), %g5			! This and following is patched
97	or		%g5, %lo(0), %g5		! g5 now holds obp pmd base physaddr
98	srlx		%g4, 23, %g6			! Find pmd number
99	and		%g6, 0x7ff, %g6			! Find pmd number
100	sllx		%g6, 2, %g6			! Find pmd offset
101	lduwa		[%g5 + %g6] ASI_PHYS_USE_EC, %g5! Load pmd, ie pagetable physaddr
102	brz,pn		%g5, longpath
103	 sllx		%g5, 11, %g5			! Shift into place
104	srlx		%g4, 13, %g6			! find pte number in pagetable
105	and		%g6, 0x3ff, %g6			! find pte number in pagetable
106	sllx		%g6, 3, %g6			! find pte offset in pagetable
107	ldxa		[%g5 + %g6] ASI_PHYS_USE_EC, %g5! Load pte
108	brgez,pn	%g5, longpath
109	 nop
110	stxa		%g5, [%g0] ASI_DTLB_DATA_IN	! put into tlb
111	retry
112
113/*
114 * On a first level data miss, check whether this is to the OBP range (note that
115 * such accesses can be made by prom, as well as by kernel using prom_getproperty
116 * on "address"), and if so, do not use vpte access ... rather, use information
117 * saved during inherit_prom_mappings() using 8k pagesize.
118 */
119kvmap:
120	mov		0xf, %g5
121	sllx		%g5, 28, %g5			! Load 0xf0000000
122	cmp		%g4, %g5			! Is addr >= LOW_OBP_ADDRESS?
123	blu,pn		%xcc, vmalloc_addr
124	 mov		0x1, %g5
125	sllx		%g5, 32, %g5			! Load 0x100000000
126	cmp		%g4, %g5			! Is addr < HI_OBP_ADDRESS?
127	blu,pn		%xcc, obp_daddr_patch
128	 nop
129vmalloc_addr:						! vmalloc addr accessed
130	ldxa		[%g3 + %g6] ASI_N, %g5		! Yep, load k-vpte
131	brgez,pn	%g5, longpath			! Valid, load into TLB
132	 nop
133	stxa		%g5, [%g0] ASI_DTLB_DATA_IN	! Reload TLB
134	retry
135
136	/* This is trivial with the new code... */
137	.globl		do_fpdis
138do_fpdis:
139	sethi		%hi(TSTATE_PEF), %g4					! IEU0
140	rdpr		%tstate, %g5
141	andcc		%g5, %g4, %g0
142	be,pt		%xcc, 1f
143	 nop
144	rd		%fprs, %g5
145	andcc		%g5, FPRS_FEF, %g0
146	be,pt		%xcc, 1f
147	 nop
148
149	/* Legal state when DCR_IFPOE is set in Cheetah %dcr. */
150	sethi		%hi(109f), %g7
151	ba,pt		%xcc, etrap
152109:	 or		%g7, %lo(109b), %g7
153	add		%g0, %g0, %g0
154	ba,a,pt		%xcc, rtrap_clr_l6
155
1561:	ldub		[%g6 + AOFF_task_thread + AOFF_thread_fpsaved], %g5	! Load	Group
157	wr		%g0, FPRS_FEF, %fprs					! LSU	Group+4bubbles
158	andcc		%g5, FPRS_FEF, %g0					! IEU1	Group
159	be,a,pt		%icc, 1f						! CTI
160	 clr		%g7							! IEU0
161	ldx		[%g6 + AOFF_task_thread + AOFF_thread_gsr], %g7		! Load	Group
1621:	andcc		%g5, FPRS_DL, %g0					! IEU1
163	bne,pn		%icc, 2f						! CTI
164	 fzero		%f0							! FPA
165	andcc		%g5, FPRS_DU, %g0					! IEU1  Group
166	bne,pn		%icc, 1f						! CTI
167	 fzero		%f2							! FPA
168	faddd		%f0, %f2, %f4
169	fmuld		%f0, %f2, %f6
170	faddd		%f0, %f2, %f8
171	fmuld		%f0, %f2, %f10
172	faddd		%f0, %f2, %f12
173	fmuld		%f0, %f2, %f14
174	faddd		%f0, %f2, %f16
175	fmuld		%f0, %f2, %f18
176	faddd		%f0, %f2, %f20
177	fmuld		%f0, %f2, %f22
178	faddd		%f0, %f2, %f24
179	fmuld		%f0, %f2, %f26
180	faddd		%f0, %f2, %f28
181	fmuld		%f0, %f2, %f30
182	faddd		%f0, %f2, %f32
183	fmuld		%f0, %f2, %f34
184	faddd		%f0, %f2, %f36
185	fmuld		%f0, %f2, %f38
186	faddd		%f0, %f2, %f40
187	fmuld		%f0, %f2, %f42
188	faddd		%f0, %f2, %f44
189	fmuld		%f0, %f2, %f46
190	faddd		%f0, %f2, %f48
191	fmuld		%f0, %f2, %f50
192	faddd		%f0, %f2, %f52
193	fmuld		%f0, %f2, %f54
194	faddd		%f0, %f2, %f56
195	fmuld		%f0, %f2, %f58
196	b,pt		%xcc, fpdis_exit2
197	 faddd		%f0, %f2, %f60
1981:	mov		SECONDARY_CONTEXT, %g3
199	add		%g6, AOFF_task_fpregs + 0x80, %g1
200	faddd		%f0, %f2, %f4
201	fmuld		%f0, %f2, %f6
202	ldxa		[%g3] ASI_DMMU, %g5
203	add		%g6, AOFF_task_fpregs + 0xc0, %g2
204	stxa		%g0, [%g3] ASI_DMMU
205	membar		#Sync
206	faddd		%f0, %f2, %f8
207	fmuld		%f0, %f2, %f10
208	ldda		[%g1] ASI_BLK_S, %f32	! grrr, where is ASI_BLK_NUCLEUS 8-(
209	ldda		[%g2] ASI_BLK_S, %f48
210	faddd		%f0, %f2, %f12
211	fmuld		%f0, %f2, %f14
212	faddd		%f0, %f2, %f16
213	fmuld		%f0, %f2, %f18
214	faddd		%f0, %f2, %f20
215	fmuld		%f0, %f2, %f22
216	faddd		%f0, %f2, %f24
217	fmuld		%f0, %f2, %f26
218	faddd		%f0, %f2, %f28
219	fmuld		%f0, %f2, %f30
220	b,pt		%xcc, fpdis_exit
221	 membar		#Sync
2222:	andcc		%g5, FPRS_DU, %g0
223	bne,pt		%icc, 3f
224	 fzero		%f32
225	mov		SECONDARY_CONTEXT, %g3
226	fzero		%f34
227	ldxa		[%g3] ASI_DMMU, %g5
228	add		%g6, AOFF_task_fpregs, %g1
229	stxa		%g0, [%g3] ASI_DMMU
230	membar		#Sync
231	add		%g6, AOFF_task_fpregs + 0x40, %g2
232	faddd		%f32, %f34, %f36
233	fmuld		%f32, %f34, %f38
234	ldda		[%g1] ASI_BLK_S, %f0	! grrr, where is ASI_BLK_NUCLEUS 8-(
235	ldda		[%g2] ASI_BLK_S, %f16
236	faddd		%f32, %f34, %f40
237	fmuld		%f32, %f34, %f42
238	faddd		%f32, %f34, %f44
239	fmuld		%f32, %f34, %f46
240	faddd		%f32, %f34, %f48
241	fmuld		%f32, %f34, %f50
242	faddd		%f32, %f34, %f52
243	fmuld		%f32, %f34, %f54
244	faddd		%f32, %f34, %f56
245	fmuld		%f32, %f34, %f58
246	faddd		%f32, %f34, %f60
247	fmuld		%f32, %f34, %f62
248	ba,pt		%xcc, fpdis_exit
249	 membar		#Sync
2503:	mov		SECONDARY_CONTEXT, %g3
251	add		%g6, AOFF_task_fpregs, %g1
252	ldxa		[%g3] ASI_DMMU, %g5
253	mov		0x40, %g2
254	stxa		%g0, [%g3] ASI_DMMU
255	membar		#Sync
256	ldda		[%g1] ASI_BLK_S, %f0		! grrr, where is ASI_BLK_NUCLEUS 8-(
257	ldda		[%g1 + %g2] ASI_BLK_S, %f16
258	add		%g1, 0x80, %g1
259	ldda		[%g1] ASI_BLK_S, %f32
260	ldda		[%g1 + %g2] ASI_BLK_S, %f48
261	membar		#Sync
262fpdis_exit:
263	stxa		%g5, [%g3] ASI_DMMU
264	membar		#Sync
265fpdis_exit2:
266	wr		%g7, 0, %gsr
267	ldx		[%g6 + AOFF_task_thread + AOFF_thread_xfsr], %fsr
268	rdpr		%tstate, %g3
269	or		%g3, %g4, %g3		! anal...
270	wrpr		%g3, %tstate
271	wr		%g0, FPRS_FEF, %fprs	! clean DU/DL bits
272	retry
273
274	.align		32
275fp_other_bounce:
276	call		do_fpother
277	 add		%sp, PTREGS_OFF, %o0
278	ba,pt		%xcc, rtrap
279	 clr		%l6
280
281	.globl		do_fpother_check_fitos
282	.align		32
283do_fpother_check_fitos:
284	sethi		%hi(fp_other_bounce - 4), %g7
285	or		%g7, %lo(fp_other_bounce - 4), %g7
286
287	/* NOTE: Need to preserve %g7 until we fully commit
288	 *       to the fitos fixup.
289	 */
290	stx		%fsr, [%g6 + AOFF_task_thread + AOFF_thread_xfsr]
291	rdpr		%tstate, %g3
292	andcc		%g3, TSTATE_PRIV, %g0
293	bne,pn		%xcc, do_fptrap_after_fsr
294	 nop
295	ldx		[%g6 + AOFF_task_thread + AOFF_thread_xfsr], %g3
296	srlx		%g3, 14, %g1
297	and		%g1, 7, %g1
298	cmp		%g1, 2			! Unfinished FP-OP
299	bne,pn		%xcc, do_fptrap_after_fsr
300	 sethi		%hi(1 << 23), %g1	! Inexact
301	andcc		%g3, %g1, %g0
302	bne,pn		%xcc, do_fptrap_after_fsr
303	 rdpr		%tpc, %g1
304	lduwa		[%g1] ASI_AIUP, %g3	! This cannot ever fail
305#define FITOS_MASK	0xc1f83fe0
306#define FITOS_COMPARE	0x81a01880
307	sethi		%hi(FITOS_MASK), %g1
308	or		%g1, %lo(FITOS_MASK), %g1
309	and		%g3, %g1, %g1
310	sethi		%hi(FITOS_COMPARE), %g2
311	or		%g2, %lo(FITOS_COMPARE), %g2
312	cmp		%g1, %g2
313	bne,pn		%xcc, do_fptrap_after_fsr
314	 nop
315	std		%f62, [%g6 + AOFF_task_fpregs + (62 * 4)]
316	sethi		%hi(fitos_table_1), %g1
317	and		%g3, 0x1f, %g2
318	or		%g1, %lo(fitos_table_1),  %g1
319	sllx		%g2, 2, %g2
320	jmpl		%g1 + %g2, %g0
321	 ba,pt		%xcc, fitos_emul_continue
322
323fitos_table_1:
324	fitod		%f0, %f62
325	fitod		%f1, %f62
326	fitod		%f2, %f62
327	fitod		%f3, %f62
328	fitod		%f4, %f62
329	fitod		%f5, %f62
330	fitod		%f6, %f62
331	fitod		%f7, %f62
332	fitod		%f8, %f62
333	fitod		%f9, %f62
334	fitod		%f10, %f62
335	fitod		%f11, %f62
336	fitod		%f12, %f62
337	fitod		%f13, %f62
338	fitod		%f14, %f62
339	fitod		%f15, %f62
340	fitod		%f16, %f62
341	fitod		%f17, %f62
342	fitod		%f18, %f62
343	fitod		%f19, %f62
344	fitod		%f20, %f62
345	fitod		%f21, %f62
346	fitod		%f22, %f62
347	fitod		%f23, %f62
348	fitod		%f24, %f62
349	fitod		%f25, %f62
350	fitod		%f26, %f62
351	fitod		%f27, %f62
352	fitod		%f28, %f62
353	fitod		%f29, %f62
354	fitod		%f30, %f62
355	fitod		%f31, %f62
356
357fitos_emul_continue:
358	sethi		%hi(fitos_table_2), %g1
359	srl		%g3, 25, %g2
360	or		%g1, %lo(fitos_table_2), %g1
361	and		%g2, 0x1f, %g2
362	sllx		%g2, 2, %g2
363	jmpl		%g1 + %g2, %g0
364	 ba,pt		%xcc, fitos_emul_fini
365
366fitos_table_2:
367	fdtos		%f62, %f0
368	fdtos		%f62, %f1
369	fdtos		%f62, %f2
370	fdtos		%f62, %f3
371	fdtos		%f62, %f4
372	fdtos		%f62, %f5
373	fdtos		%f62, %f6
374	fdtos		%f62, %f7
375	fdtos		%f62, %f8
376	fdtos		%f62, %f9
377	fdtos		%f62, %f10
378	fdtos		%f62, %f11
379	fdtos		%f62, %f12
380	fdtos		%f62, %f13
381	fdtos		%f62, %f14
382	fdtos		%f62, %f15
383	fdtos		%f62, %f16
384	fdtos		%f62, %f17
385	fdtos		%f62, %f18
386	fdtos		%f62, %f19
387	fdtos		%f62, %f20
388	fdtos		%f62, %f21
389	fdtos		%f62, %f22
390	fdtos		%f62, %f23
391	fdtos		%f62, %f24
392	fdtos		%f62, %f25
393	fdtos		%f62, %f26
394	fdtos		%f62, %f27
395	fdtos		%f62, %f28
396	fdtos		%f62, %f29
397	fdtos		%f62, %f30
398	fdtos		%f62, %f31
399
400fitos_emul_fini:
401	ldd		[%g6 + AOFF_task_fpregs + (62 * 4)], %f62
402	done
403
404	.globl		do_fptrap
405	.align		32
406do_fptrap:
407	stx		%fsr, [%g6 + AOFF_task_thread + AOFF_thread_xfsr]
408do_fptrap_after_fsr:
409	ldub		[%g6 + AOFF_task_thread + AOFF_thread_fpsaved], %g3
410	rd		%fprs, %g1
411	or		%g3, %g1, %g3
412	stb		%g3, [%g6 + AOFF_task_thread + AOFF_thread_fpsaved]
413	rd		%gsr, %g3
414	stx		%g3, [%g6 + AOFF_task_thread + AOFF_thread_gsr]
415	mov		SECONDARY_CONTEXT, %g3
416	add		%g6, AOFF_task_fpregs, %g2
417	ldxa		[%g3] ASI_DMMU, %g5
418	stxa		%g0, [%g3] ASI_DMMU
419	membar		#Sync
420	andcc		%g1, FPRS_DL, %g0
421	be,pn		%icc, 4f
422	 mov		0x40, %g3
423	stda		%f0, [%g2] ASI_BLK_S
424	stda		%f16, [%g2 + %g3] ASI_BLK_S
425	andcc		%g1, FPRS_DU, %g0
426	be,pn		%icc, 5f
4274:       add		%g2, 128, %g2
428	stda		%f32, [%g2] ASI_BLK_S
429	stda		%f48, [%g2 + %g3] ASI_BLK_S
4305:	mov		SECONDARY_CONTEXT, %g1
431	membar		#Sync
432	stxa		%g5, [%g1] ASI_DMMU
433	membar		#Sync
434	ba,pt		%xcc, etrap
435	 wr		%g0, 0, %fprs
436
437	/* The registers for cross calls will be:
438	 *
439	 * DATA 0: [low 32-bits]  Address of function to call, jmp to this
440	 *         [high 32-bits] MMU Context Argument 0, place in %g5
441	 * DATA 1: Address Argument 1, place in %g6
442	 * DATA 2: Address Argument 2, place in %g7
443	 *
444	 * With this method we can do most of the cross-call tlb/cache
445	 * flushing very quickly.
446	 *
447	 * Current CPU's IRQ worklist table is locked into %g1,
448	 * don't touch.
449	 */
450	.text
451	.align		32
452	.globl		do_ivec
453do_ivec:
454	mov		0x40, %g3
455	ldxa		[%g3 + %g0] ASI_INTR_R, %g3
456	sethi		%hi(KERNBASE), %g4
457	cmp		%g3, %g4
458	bgeu,pn		%xcc, do_ivec_xcall
459	 srlx		%g3, 32, %g5
460	stxa		%g0, [%g0] ASI_INTR_RECEIVE
461	membar		#Sync
462
463	sethi		%hi(ivector_table), %g2
464	sllx		%g3, 5, %g3
465	or		%g2, %lo(ivector_table), %g2
466	add		%g2, %g3, %g3
467	ldx		[%g3 + 0x08], %g2	/* irq_info */
468	ldub		[%g3 + 0x04], %g4	/* pil */
469	brz,pn		%g2, do_ivec_spurious
470	 mov		1, %g2
471
472	sllx		%g2, %g4, %g2
473	sllx		%g4, 2, %g4
474	lduw		[%g6 + %g4], %g5	/* g5 = irq_work(cpu, pil) */
475	stw		%g5, [%g3 + 0x00]	/* bucket->irq_chain = g5 */
476	stw		%g3, [%g6 + %g4]	/* irq_work(cpu, pil) = bucket */
477	wr		%g2, 0x0, %set_softint
478	retry
479do_ivec_xcall:
480	mov		0x50, %g1
481
482	ldxa		[%g1 + %g0] ASI_INTR_R, %g1
483	srl		%g3, 0, %g3
484	mov		0x60, %g7
485	ldxa		[%g7 + %g0] ASI_INTR_R, %g7
486	stxa		%g0, [%g0] ASI_INTR_RECEIVE
487	membar		#Sync
488	ba,pt		%xcc, 1f
489	 nop
490
491	.align		32
4921:	jmpl		%g3, %g0
493	 nop
494
495do_ivec_spurious:
496	stw		%g3, [%g6 + 0x00]	/* irq_work(cpu, 0) = bucket */
497	rdpr		%pstate, %g5
498
499	wrpr		%g5, PSTATE_IG | PSTATE_AG, %pstate
500	sethi		%hi(109f), %g7
501	ba,pt		%xcc, etrap
502109:	 or		%g7, %lo(109b), %g7
503	call		catch_disabled_ivec
504	 add		%sp, PTREGS_OFF, %o0
505	ba,pt		%xcc, rtrap
506	 clr		%l6
507
508	.globl		save_alternate_globals
509save_alternate_globals: /* %o0 = save_area */
510	rdpr		%pstate, %o5
511	andn		%o5, PSTATE_IE, %o1
512	wrpr		%o1, PSTATE_AG, %pstate
513	stx		%g0, [%o0 + 0x00]
514	stx		%g1, [%o0 + 0x08]
515	stx		%g2, [%o0 + 0x10]
516	stx		%g3, [%o0 + 0x18]
517	stx		%g4, [%o0 + 0x20]
518	stx		%g5, [%o0 + 0x28]
519	stx		%g6, [%o0 + 0x30]
520	stx		%g7, [%o0 + 0x38]
521	wrpr		%o1, PSTATE_IG, %pstate
522	stx		%g0, [%o0 + 0x40]
523	stx		%g1, [%o0 + 0x48]
524	stx		%g2, [%o0 + 0x50]
525	stx		%g3, [%o0 + 0x58]
526	stx		%g4, [%o0 + 0x60]
527	stx		%g5, [%o0 + 0x68]
528	stx		%g6, [%o0 + 0x70]
529	stx		%g7, [%o0 + 0x78]
530	wrpr		%o1, PSTATE_MG, %pstate
531	stx		%g0, [%o0 + 0x80]
532	stx		%g1, [%o0 + 0x88]
533	stx		%g2, [%o0 + 0x90]
534	stx		%g3, [%o0 + 0x98]
535	stx		%g4, [%o0 + 0xa0]
536	stx		%g5, [%o0 + 0xa8]
537	stx		%g6, [%o0 + 0xb0]
538	stx		%g7, [%o0 + 0xb8]
539	wrpr		%o5, 0x0, %pstate
540	retl
541	 nop
542
543	.globl		restore_alternate_globals
544restore_alternate_globals: /* %o0 = save_area */
545	rdpr		%pstate, %o5
546	andn		%o5, PSTATE_IE, %o1
547	wrpr		%o1, PSTATE_AG, %pstate
548	ldx		[%o0 + 0x00], %g0
549	ldx		[%o0 + 0x08], %g1
550	ldx		[%o0 + 0x10], %g2
551	ldx		[%o0 + 0x18], %g3
552	ldx		[%o0 + 0x20], %g4
553	ldx		[%o0 + 0x28], %g5
554	ldx		[%o0 + 0x30], %g6
555	ldx		[%o0 + 0x38], %g7
556	wrpr		%o1, PSTATE_IG, %pstate
557	ldx		[%o0 + 0x40], %g0
558	ldx		[%o0 + 0x48], %g1
559	ldx		[%o0 + 0x50], %g2
560	ldx		[%o0 + 0x58], %g3
561	ldx		[%o0 + 0x60], %g4
562	ldx		[%o0 + 0x68], %g5
563	ldx		[%o0 + 0x70], %g6
564	ldx		[%o0 + 0x78], %g7
565	wrpr		%o1, PSTATE_MG, %pstate
566	ldx		[%o0 + 0x80], %g0
567	ldx		[%o0 + 0x88], %g1
568	ldx		[%o0 + 0x90], %g2
569	ldx		[%o0 + 0x98], %g3
570	ldx		[%o0 + 0xa0], %g4
571	ldx		[%o0 + 0xa8], %g5
572	ldx		[%o0 + 0xb0], %g6
573	ldx		[%o0 + 0xb8], %g7
574	wrpr		%o5, 0x0, %pstate
575	retl
576	 nop
577
578	.globl		getcc, setcc
579getcc:
580	ldx		[%o0 + PT_V9_TSTATE], %o1
581	srlx		%o1, 32, %o1
582	and		%o1, 0xf, %o1
583	retl
584	 stx		%o1, [%o0 + PT_V9_G1]
585setcc:
586	ldx		[%o0 + PT_V9_TSTATE], %o1
587	ldx		[%o0 + PT_V9_G1], %o2
588	or		%g0, %ulo(TSTATE_ICC), %o3
589	sllx		%o3, 32, %o3
590	andn		%o1, %o3, %o1
591	sllx		%o2, 32, %o2
592	and		%o2, %o3, %o2
593	or		%o1, %o2, %o1
594	retl
595	 stx		%o1, [%o0 + PT_V9_TSTATE]
596
597	.globl		utrap, utrap_ill
598utrap:	brz,pn		%g1, etrap
599	 nop
600	save		%sp, -128, %sp
601	rdpr		%tstate, %l6
602	rdpr		%cwp, %l7
603	andn		%l6, TSTATE_CWP, %l6
604	wrpr		%l6, %l7, %tstate
605	rdpr		%tpc, %l6
606	rdpr		%tnpc, %l7
607	wrpr		%g1, 0, %tnpc
608	done
609utrap_ill:
610        call		bad_trap
611	 add		%sp, PTREGS_OFF, %o0
612	ba,pt		%xcc, rtrap
613	 clr		%l6
614
615#ifdef CONFIG_BLK_DEV_FD
616	.globl		floppy_hardint
617floppy_hardint:
618	wr		%g0, (1 << 11), %clear_softint
619	sethi		%hi(doing_pdma), %g1
620	ld		[%g1 + %lo(doing_pdma)], %g2
621	brz,pn		%g2, floppy_dosoftint
622	 sethi		%hi(fdc_status), %g3
623	ldx		[%g3 + %lo(fdc_status)], %g3
624	sethi		%hi(pdma_vaddr), %g5
625	ldx		[%g5 + %lo(pdma_vaddr)], %g4
626	sethi		%hi(pdma_size), %g5
627	ldx		[%g5 + %lo(pdma_size)], %g5
628
629next_byte:
630	lduba		[%g3] ASI_PHYS_BYPASS_EC_E, %g7
631	andcc		%g7, 0x80, %g0
632	be,pn		%icc, floppy_fifo_emptied
633	 andcc		%g7, 0x20, %g0
634	be,pn		%icc, floppy_overrun
635	 andcc		%g7, 0x40, %g0
636	be,pn		%icc, floppy_write
637	 sub		%g5, 1, %g5
638
639	inc		%g3
640	lduba		[%g3] ASI_PHYS_BYPASS_EC_E, %g7
641	dec		%g3
642	orcc		%g0, %g5, %g0
643	stb		%g7, [%g4]
644	bne,pn		%xcc, next_byte
645	 add		%g4, 1, %g4
646
647	b,pt		%xcc, floppy_tdone
648	 nop
649
650floppy_write:
651	ldub		[%g4], %g7
652	orcc		%g0, %g5, %g0
653	inc		%g3
654	stba		%g7, [%g3] ASI_PHYS_BYPASS_EC_E
655	dec		%g3
656	bne,pn		%xcc, next_byte
657	 add		%g4, 1, %g4
658
659floppy_tdone:
660	sethi		%hi(pdma_vaddr), %g1
661	stx		%g4, [%g1 + %lo(pdma_vaddr)]
662	sethi		%hi(pdma_size), %g1
663	stx		%g5, [%g1 + %lo(pdma_size)]
664	sethi		%hi(auxio_register), %g1
665	ldx		[%g1 + %lo(auxio_register)], %g7
666	lduba		[%g7] ASI_PHYS_BYPASS_EC_E, %g5
667	or		%g5, AUXIO_AUX1_FTCNT, %g5
668/*	andn		%g5, AUXIO_AUX1_MASK, %g5 */
669	stba		%g5, [%g7] ASI_PHYS_BYPASS_EC_E
670	andn		%g5, AUXIO_AUX1_FTCNT, %g5
671/*	andn		%g5, AUXIO_AUX1_MASK, %g5 */
672
673	nop; nop;  nop; nop;  nop; nop;
674	nop; nop;  nop; nop;  nop; nop;
675
676	stba		%g5, [%g7] ASI_PHYS_BYPASS_EC_E
677	sethi		%hi(doing_pdma), %g1
678	b,pt		%xcc, floppy_dosoftint
679	 st		%g0, [%g1 + %lo(doing_pdma)]
680
681floppy_fifo_emptied:
682	sethi		%hi(pdma_vaddr), %g1
683	stx		%g4, [%g1 + %lo(pdma_vaddr)]
684	sethi		%hi(pdma_size), %g1
685	stx		%g5, [%g1 + %lo(pdma_size)]
686	sethi		%hi(irq_action), %g1
687	or		%g1, %lo(irq_action), %g1
688	ldx		[%g1 + (11 << 3)], %g3		! irqaction[floppy_irq]
689	ldx		[%g3 + 0x08], %g4		! action->flags>>48==ino
690	sethi		%hi(ivector_table), %g3
691	srlx		%g4, 48, %g4
692	or		%g3, %lo(ivector_table), %g3
693	sllx		%g4, 5, %g4
694	ldx		[%g3 + %g4], %g4		! &ivector_table[ino]
695	ldx		[%g4 + 0x10], %g4		! bucket->iclr
696	stwa		%g0, [%g4] ASI_PHYS_BYPASS_EC_E	! ICLR_IDLE
697	membar		#Sync				! probably not needed...
698	retry
699
700floppy_overrun:
701	sethi		%hi(pdma_vaddr), %g1
702	stx		%g4, [%g1 + %lo(pdma_vaddr)]
703	sethi		%hi(pdma_size), %g1
704	stx		%g5, [%g1 + %lo(pdma_size)]
705	sethi		%hi(doing_pdma), %g1
706	st		%g0, [%g1 + %lo(doing_pdma)]
707
708floppy_dosoftint:
709	rdpr		%pil, %g2
710	wrpr		%g0, 15, %pil
711	sethi		%hi(109f), %g7
712	b,pt		%xcc, etrap_irq
713109:	 or		%g7, %lo(109b), %g7
714
715	mov		11, %o0
716	mov		0, %o1
717	call		sparc_floppy_irq
718	 add		%sp, PTREGS_OFF, %o2
719
720	b,pt		%xcc, rtrap
721	 clr		%l6
722
723#endif /* CONFIG_BLK_DEV_FD */
724
725	/* XXX Here is stuff we still need to write... -DaveM XXX */
726	.globl		netbsd_syscall
727netbsd_syscall:
728	retl
729	 nop
730
731	/* We need to carefully read the error status, ACK
732	 * the errors, prevent recursive traps, and pass the
733	 * information on to C code for logging.
734	 *
735	 * We pass the AFAR in as-is, and we encode the status
736	 * information as described in asm-sparc64/sfafsr.h
737	 */
738	.globl		__spitfire_access_error
739__spitfire_access_error:
740	/* Disable ESTATE error reporting so that we do not
741	 * take recursive traps and RED state the processor.
742	 */
743	stxa		%g0, [%g0] ASI_ESTATE_ERROR_EN
744	membar		#Sync
745
746	mov		UDBE_UE, %g1
747	ldxa		[%g0] ASI_AFSR, %g4	! Get AFSR
748
749	/* __spitfire_cee_trap branches here with AFSR in %g4 and
750	 * UDBE_CE in %g1.  It only clears ESTATE_ERR_CE in the
751	 * ESTATE Error Enable register.
752	 */
753__spitfire_cee_trap_continue:
754	ldxa		[%g0] ASI_AFAR, %g5	! Get AFAR
755
756	rdpr		%tt, %g3
757	and		%g3, 0x1ff, %g3		! Paranoia
758	sllx		%g3, SFSTAT_TRAP_TYPE_SHIFT, %g3
759	or		%g4, %g3, %g4
760	rdpr		%tl, %g3
761	cmp		%g3, 1
762	mov		1, %g3
763	bleu		%xcc, 1f
764	 sllx		%g3, SFSTAT_TL_GT_ONE_SHIFT, %g3
765
766	or		%g4, %g3, %g4
767
768	/* Read in the UDB error register state, clearing the
769	 * sticky error bits as-needed.  We only clear them if
770	 * the UE bit is set.  Likewise, __spitfire_cee_trap
771	 * below will only do so if the CE bit is set.
772	 *
773	 * NOTE: UltraSparc-I/II have high and low UDB error
774	 *       registers, corresponding to the two UDB units
775	 *       present on those chips.  UltraSparc-IIi only
776	 *       has a single UDB, called "SDB" in the manual.
777	 *       For IIi the upper UDB register always reads
778	 *       as zero so for our purposes things will just
779	 *       work with the checks below.
780	 */
7811:	ldxa		[%g0] ASI_UDBH_ERROR_R, %g3
782	and		%g3, 0x3ff, %g7		! Paranoia
783	sllx		%g7, SFSTAT_UDBH_SHIFT, %g7
784	or		%g4, %g7, %g4
785	andcc		%g3, %g1, %g3		! UDBE_UE or UDBE_CE
786	be,pn		%xcc, 1f
787	 nop
788	stxa		%g3, [%g0] ASI_UDB_ERROR_W
789	membar		#Sync
790
7911:	mov		0x18, %g3
792	ldxa		[%g3] ASI_UDBL_ERROR_R, %g3
793	and		%g3, 0x3ff, %g7		! Paranoia
794	sllx		%g7, SFSTAT_UDBL_SHIFT, %g7
795	or		%g4, %g7, %g4
796	andcc		%g3, %g1, %g3		! UDBE_UE or UDBE_CE
797	be,pn		%xcc, 1f
798	 nop
799	mov		0x18, %g7
800	stxa		%g3, [%g7] ASI_UDB_ERROR_W
801	membar		#Sync
802
8031:	/* Ok, now that we've latched the error state,
804	 * clear the sticky bits in the AFSR.
805	 */
806	stxa		%g4, [%g0] ASI_AFSR
807	membar		#Sync
808
809	rdpr		%tl, %g2
810	cmp		%g2, 1
811	rdpr		%pil, %g2
812	bleu,pt		%xcc, 1f
813	 wrpr		%g0, 15, %pil
814
815	ba,pt		%xcc, etraptl1
816	 rd		%pc, %g7
817
818	ba,pt		%xcc, 2f
819	 nop
820
8211:	ba,pt		%xcc, etrap_irq
822	 rd		%pc, %g7
823
8242:	mov		%l4, %o1
825	mov		%l5, %o2
826	call		spitfire_access_error
827	 add		%sp, PTREGS_OFF, %o0
828	ba,pt		%xcc, rtrap
829	 clr		%l6
830
831	/* This is the trap handler entry point for ECC correctable
832	 * errors.  They are corrected, but we listen for the trap
833	 * so that the event can be logged.
834	 *
835	 * Disrupting errors are either:
836	 * 1) single-bit ECC errors during UDB reads to system
837	 *    memory
838	 * 2) data parity errors during write-back events
839	 *
840	 * As far as I can make out from the manual, the CEE trap
841	 * is only for correctable errors during memory read
842	 * accesses by the front-end of the processor.
843	 *
844	 * The code below is only for trap level 1 CEE events,
845	 * as it is the only situation where we can safely record
846	 * and log.  For trap level >1 we just clear the CE bit
847	 * in the AFSR and return.
848	 *
849	 * This is just like __spiftire_access_error above, but it
850	 * specifically handles correctable errors.  If an
851	 * uncorrectable error is indicated in the AFSR we
852	 * will branch directly above to __spitfire_access_error
853	 * to handle it instead.  Uncorrectable therefore takes
854	 * priority over correctable, and the error logging
855	 * C code will notice this case by inspecting the
856	 * trap type.
857	 */
858	.globl		__spitfire_cee_trap
859__spitfire_cee_trap:
860	ldxa		[%g0] ASI_AFSR, %g4	! Get AFSR
861	mov		1, %g3
862	sllx		%g3, SFAFSR_UE_SHIFT, %g3
863	andcc		%g4, %g3, %g0		! Check for UE
864	bne,pn		%xcc, __spitfire_access_error
865	 nop
866
867	/* Ok, in this case we only have a correctable error.
868	 * Indicate we only wish to capture that state in register
869	 * %g1, and we only disable CE error reporting unlike UE
870	 * handling which disables all errors.
871	 */
872	ldxa		[%g0] ASI_ESTATE_ERROR_EN, %g3
873	andn		%g3, ESTATE_ERR_CE, %g3
874	stxa		%g3, [%g0] ASI_ESTATE_ERROR_EN
875	membar		#Sync
876
877	/* Preserve AFSR in %g4, indicate UDB state to capture in %g1 */
878	ba,pt		%xcc, __spitfire_cee_trap_continue
879	 mov		UDBE_CE, %g1
880
881	.globl		__spitfire_data_access_exception
882	.globl		__spitfire_data_access_exception_tl1
883__spitfire_data_access_exception_tl1:
884	rdpr		%pstate, %g4
885	wrpr		%g4, PSTATE_MG|PSTATE_AG, %pstate
886	mov		TLB_SFSR, %g3
887	mov		DMMU_SFAR, %g5
888	ldxa		[%g3] ASI_DMMU, %g4	! Get SFSR
889	ldxa		[%g5] ASI_DMMU, %g5	! Get SFAR
890	stxa		%g0, [%g3] ASI_DMMU	! Clear SFSR.FaultValid bit
891	membar		#Sync
892	rdpr		%tt, %g3
893	cmp		%g3, 0x80		! first win spill/fill trap
894	blu,pn		%xcc, 1f
895	 cmp		%g3, 0xff		! last win spill/fill trap
896	bgu,pn		%xcc, 1f
897	 nop
898	ba,pt		%xcc, winfix_dax
899	 rdpr		%tpc, %g3
9001:	sethi		%hi(109f), %g7
901	ba,pt		%xcc, etraptl1
902109:	 or		%g7, %lo(109b), %g7
903	mov		%l4, %o1
904	mov		%l5, %o2
905	call		spitfire_data_access_exception_tl1
906	 add		%sp, PTREGS_OFF, %o0
907	ba,pt		%xcc, rtrap
908	 clr		%l6
909
910__spitfire_data_access_exception:
911	rdpr		%pstate, %g4
912	wrpr		%g4, PSTATE_MG|PSTATE_AG, %pstate
913	mov		TLB_SFSR, %g3
914	mov		DMMU_SFAR, %g5
915	ldxa		[%g3] ASI_DMMU, %g4	! Get SFSR
916	ldxa		[%g5] ASI_DMMU, %g5	! Get SFAR
917	stxa		%g0, [%g3] ASI_DMMU	! Clear SFSR.FaultValid bit
918	membar		#Sync
919	sethi		%hi(109f), %g7
920	ba,pt		%xcc, etrap
921109:	 or		%g7, %lo(109b), %g7
922	mov		%l4, %o1
923	mov		%l5, %o2
924	call		spitfire_data_access_exception
925	 add		%sp, PTREGS_OFF, %o0
926	ba,pt		%xcc, rtrap
927	 clr		%l6
928
929	.globl		__spitfire_insn_access_exception
930	.globl		__spitfire_insn_access_exception_tl1
931__spitfire_insn_access_exception_tl1:
932	rdpr		%pstate, %g4
933	wrpr		%g4, PSTATE_MG|PSTATE_AG, %pstate
934	mov		TLB_SFSR, %g3
935	ldxa		[%g3] ASI_IMMU, %g4	! Get SFSR
936	rdpr		%tpc, %g5		! IMMU has no SFAR, use TPC
937	stxa		%g0, [%g3] ASI_IMMU	! Clear FaultValid bit
938	membar		#Sync
939	sethi		%hi(109f), %g7
940	ba,pt		%xcc, etraptl1
941109:	 or		%g7, %lo(109b), %g7
942	mov		%l4, %o1
943	mov		%l5, %o2
944	call		spitfire_insn_access_exception_tl1
945	 add		%sp, PTREGS_OFF, %o0
946	ba,pt		%xcc, rtrap
947	 clr		%l6
948
949__spitfire_insn_access_exception:
950	rdpr		%pstate, %g4
951	wrpr		%g4, PSTATE_MG|PSTATE_AG, %pstate
952	mov		TLB_SFSR, %g3
953	ldxa		[%g3] ASI_IMMU, %g4	! Get SFSR
954	rdpr		%tpc, %g5		! IMMU has no SFAR, use TPC
955	stxa		%g0, [%g3] ASI_IMMU	! Clear FaultValid bit
956	membar		#Sync
957	sethi		%hi(109f), %g7
958	ba,pt		%xcc, etrap
959109:	 or		%g7, %lo(109b), %g7
960	mov		%l4, %o1
961	mov		%l5, %o2
962	call		spitfire_insn_access_exception
963	 add		%sp, PTREGS_OFF, %o0
964	ba,pt		%xcc, rtrap
965	 clr		%l6
966
967	/* Capture I/D/E-cache state into per-cpu error scoreboard.
968	 *
969	 * %g1:		(TL>=0) ? 1 : 0
970	 * %g2:		scratch
971	 * %g3:		scratch
972	 * %g4:		AFSR
973	 * %g5:		AFAR
974	 * %g6:		current thread ptr
975	 * %g7:		scratch
976	 */
977#define CHEETAH_LOG_ERROR						\
978	/* Put "TL1" software bit into AFSR. */				\
979	and		%g1, 0x1, %g1;					\
980	sllx		%g1, 63, %g2;					\
981	or		%g4, %g2, %g4;					\
982	/* Get log entry pointer for this cpu at this trap level. */	\
983	BRANCH_IF_JALAPENO(g2,g3,50f)					\
984	ldxa		[%g0] ASI_SAFARI_CONFIG, %g2;			\
985	srlx		%g2, 17, %g2;					\
986	ba,pt		%xcc, 60f; 					\
987	 and		%g2, 0x3ff, %g2;				\
98850:	ldxa		[%g0] ASI_JBUS_CONFIG, %g2;			\
989	srlx		%g2, 17, %g2;					\
990	and		%g2, 0x1f, %g2;					\
99160:	sllx		%g2, 9, %g2;					\
992	sethi		%hi(cheetah_error_log), %g3;			\
993	ldx		[%g3 + %lo(cheetah_error_log)], %g3;		\
994	brz,pn		%g3, 80f;					\
995	 nop;								\
996	add		%g3, %g2, %g3;					\
997	sllx		%g1, 8, %g1;					\
998	add		%g3, %g1, %g1;					\
999	/* %g1 holds pointer to the top of the logging scoreboard */	\
1000	ldx		[%g1 + 0x0], %g7;				\
1001	cmp		%g7, -1;					\
1002	bne,pn		%xcc, 80f;					\
1003	 nop;								\
1004	stx		%g4, [%g1 + 0x0];				\
1005	stx		%g5, [%g1 + 0x8];				\
1006	add		%g1, 0x10, %g1;					\
1007	/* %g1 now points to D-cache logging area */			\
1008	set		0x3ff8, %g2;	/* DC_addr mask		*/	\
1009	and		%g5, %g2, %g2;	/* DC_addr bits of AFAR	*/	\
1010	srlx		%g5, 12, %g3;					\
1011	or		%g3, 1, %g3;	/* PHYS tag + valid	*/	\
101210:	ldxa		[%g2] ASI_DCACHE_TAG, %g7;			\
1013	cmp		%g3, %g7;	/* TAG match?		*/	\
1014	bne,pt		%xcc, 13f;					\
1015	 nop;								\
1016	/* Yep, what we want, capture state. */				\
1017	stx		%g2, [%g1 + 0x20];				\
1018	stx		%g7, [%g1 + 0x28];				\
1019	/* A membar Sync is required before and after utag access. */	\
1020	membar		#Sync;						\
1021	ldxa		[%g2] ASI_DCACHE_UTAG, %g7;			\
1022	membar		#Sync;						\
1023	stx		%g7, [%g1 + 0x30];				\
1024	ldxa		[%g2] ASI_DCACHE_SNOOP_TAG, %g7;		\
1025	stx		%g7, [%g1 + 0x38];				\
1026	clr		%g3;						\
102712:	ldxa		[%g2 + %g3] ASI_DCACHE_DATA, %g7;		\
1028	stx		%g7, [%g1];					\
1029	add		%g3, (1 << 5), %g3;				\
1030	cmp		%g3, (4 << 5);					\
1031	bl,pt		%xcc, 12b;					\
1032	 add		%g1, 0x8, %g1;					\
1033	ba,pt		%xcc, 20f;					\
1034	 add		%g1, 0x20, %g1;					\
103513:	sethi		%hi(1 << 14), %g7;				\
1036	add		%g2, %g7, %g2;					\
1037	srlx		%g2, 14, %g7;					\
1038	cmp		%g7, 4;						\
1039	bl,pt		%xcc, 10b;					\
1040	 nop;								\
1041	add		%g1, 0x40, %g1;					\
104220:	/* %g1 now points to I-cache logging area */			\
1043	set		0x1fe0, %g2;	/* IC_addr mask		*/	\
1044	and		%g5, %g2, %g2;	/* IC_addr bits of AFAR	*/	\
1045	sllx		%g2, 1, %g2;	/* IC_addr[13:6]==VA[12:5] */	\
1046	srlx		%g5, (13 - 8), %g3; /* Make PTAG */		\
1047	andn		%g3, 0xff, %g3;	/* Mask off undefined bits */	\
104821:	ldxa		[%g2] ASI_IC_TAG, %g7;				\
1049	andn		%g7, 0xff, %g7;					\
1050	cmp		%g3, %g7;					\
1051	bne,pt		%xcc, 23f;					\
1052	 nop;								\
1053	/* Yep, what we want, capture state. */				\
1054	stx		%g2, [%g1 + 0x40];				\
1055	stx		%g7, [%g1 + 0x48];				\
1056	add		%g2, (1 << 3), %g2;				\
1057	ldxa		[%g2] ASI_IC_TAG, %g7;				\
1058	add		%g2, (1 << 3), %g2;				\
1059	stx		%g7, [%g1 + 0x50];				\
1060	ldxa		[%g2] ASI_IC_TAG, %g7;				\
1061	add		%g2, (1 << 3), %g2;				\
1062	stx		%g7, [%g1 + 0x60];				\
1063	ldxa		[%g2] ASI_IC_TAG, %g7;				\
1064	stx		%g7, [%g1 + 0x68];				\
1065	sub		%g2, (3 << 3), %g2;				\
1066	ldxa		[%g2] ASI_IC_STAG, %g7;				\
1067	stx		%g7, [%g1 + 0x58];				\
1068	clr		%g3;						\
1069	srlx		%g2, 2, %g2;					\
107022:	ldxa		[%g2 + %g3] ASI_IC_INSTR, %g7;			\
1071	stx		%g7, [%g1];					\
1072	add		%g3, (1 << 3), %g3;				\
1073	cmp		%g3, (8 << 3);					\
1074	bl,pt		%xcc, 22b;					\
1075	 add		%g1, 0x8, %g1;					\
1076	ba,pt		%xcc, 30f;					\
1077	 add		%g1, 0x30, %g1;					\
107823:	sethi		%hi(1 << 14), %g7;				\
1079	add		%g2, %g7, %g2;					\
1080	srlx		%g2, 14, %g7;					\
1081	cmp		%g7, 4;						\
1082	bl,pt		%xcc, 21b;					\
1083	 nop;								\
1084	add		%g1, 0x70, %g1;					\
108530:	/* %g1 now points to E-cache logging area */			\
1086	andn		%g5, (32 - 1), %g2;	/* E-cache subblock */	\
1087	stx		%g2, [%g1 + 0x20];				\
1088	ldxa		[%g2] ASI_EC_TAG_DATA, %g7;			\
1089	stx		%g7, [%g1 + 0x28];				\
1090	ldxa		[%g2] ASI_EC_R, %g0;				\
1091	clr		%g3;						\
109231:	ldxa		[%g3] ASI_EC_DATA, %g7;				\
1093	stx		%g7, [%g1 + %g3];				\
1094	add		%g3, 0x8, %g3;					\
1095	cmp		%g3, 0x20;					\
1096	bl,pt		%xcc, 31b;					\
1097	 nop;								\
109880:	/* DONE */
1099
1100	/* These get patched into the trap table at boot time
1101	 * once we know we have a cheetah processor.
1102	 */
1103	.globl		cheetah_fecc_trap_vector, cheetah_fecc_trap_vector_tl1
1104cheetah_fecc_trap_vector:
1105	membar		#Sync
1106	ldxa		[%g0] ASI_DCU_CONTROL_REG, %g1
1107	andn		%g1, DCU_DC | DCU_IC, %g1
1108	stxa		%g1, [%g0] ASI_DCU_CONTROL_REG
1109	membar		#Sync
1110	sethi		%hi(cheetah_fast_ecc), %g2
1111	jmpl		%g2 + %lo(cheetah_fast_ecc), %g0
1112	 mov		0, %g1
1113cheetah_fecc_trap_vector_tl1:
1114	membar		#Sync
1115	ldxa		[%g0] ASI_DCU_CONTROL_REG, %g1
1116	andn		%g1, DCU_DC | DCU_IC, %g1
1117	stxa		%g1, [%g0] ASI_DCU_CONTROL_REG
1118	membar		#Sync
1119	sethi		%hi(cheetah_fast_ecc), %g2
1120	jmpl		%g2 + %lo(cheetah_fast_ecc), %g0
1121	 mov		1, %g1
1122	.globl	cheetah_cee_trap_vector, cheetah_cee_trap_vector_tl1
1123cheetah_cee_trap_vector:
1124	membar		#Sync
1125	ldxa		[%g0] ASI_DCU_CONTROL_REG, %g1
1126	andn		%g1, DCU_IC, %g1
1127	stxa		%g1, [%g0] ASI_DCU_CONTROL_REG
1128	membar		#Sync
1129	sethi		%hi(cheetah_cee), %g2
1130	jmpl		%g2 + %lo(cheetah_cee), %g0
1131	 mov		0, %g1
1132cheetah_cee_trap_vector_tl1:
1133	membar		#Sync
1134	ldxa		[%g0] ASI_DCU_CONTROL_REG, %g1
1135	andn		%g1, DCU_IC, %g1
1136	stxa		%g1, [%g0] ASI_DCU_CONTROL_REG
1137	membar		#Sync
1138	sethi		%hi(cheetah_cee), %g2
1139	jmpl		%g2 + %lo(cheetah_cee), %g0
1140	 mov		1, %g1
1141	.globl	cheetah_deferred_trap_vector, cheetah_deferred_trap_vector_tl1
1142cheetah_deferred_trap_vector:
1143	membar		#Sync
1144	ldxa		[%g0] ASI_DCU_CONTROL_REG, %g1;
1145	andn		%g1, DCU_DC | DCU_IC, %g1;
1146	stxa		%g1, [%g0] ASI_DCU_CONTROL_REG;
1147	membar		#Sync;
1148	sethi		%hi(cheetah_deferred_trap), %g2
1149	jmpl		%g2 + %lo(cheetah_deferred_trap), %g0
1150	 mov		0, %g1
1151cheetah_deferred_trap_vector_tl1:
1152	membar		#Sync;
1153	ldxa		[%g0] ASI_DCU_CONTROL_REG, %g1;
1154	andn		%g1, DCU_DC | DCU_IC, %g1;
1155	stxa		%g1, [%g0] ASI_DCU_CONTROL_REG;
1156	membar		#Sync;
1157	sethi		%hi(cheetah_deferred_trap), %g2
1158	jmpl		%g2 + %lo(cheetah_deferred_trap), %g0
1159	 mov		1, %g1
1160
1161	/* Cheetah+ specific traps. These are for the new I/D cache parity
1162	 * error traps.  The first argument to cheetah_plus_parity_handler
1163	 * is encoded as follows:
1164	 *
1165	 * Bit0:	0=dcache,1=icache
1166	 * Bit1:	0=recoverable,1=unrecoverable
1167	 */
1168	.globl		cheetah_plus_dcpe_trap_vector, cheetah_plus_dcpe_trap_vector_tl1
1169cheetah_plus_dcpe_trap_vector:
1170	membar		#Sync
1171	sethi		%hi(do_cheetah_plus_data_parity), %g7
1172	jmpl		%g7 + %lo(do_cheetah_plus_data_parity), %g0
1173	 nop
1174	nop
1175	nop
1176	nop
1177	nop
1178
1179do_cheetah_plus_data_parity:
1180	ba,pt		%xcc, etrap
1181	 rd		%pc, %g7
1182	mov		0x0, %o0
1183	call		cheetah_plus_parity_error
1184	 add		%sp, PTREGS_OFF, %o1
1185	ba,pt		%xcc, rtrap
1186	 clr		%l6
1187
1188cheetah_plus_dcpe_trap_vector_tl1:
1189	membar		#Sync
1190	wrpr		PSTATE_IG | PSTATE_PEF | PSTATE_PRIV, %pstate
1191	sethi		%hi(do_dcpe_tl1), %g3
1192	jmpl		%g3 + %lo(do_dcpe_tl1), %g0
1193	 nop
1194	nop
1195	nop
1196	nop
1197
1198	.globl		cheetah_plus_icpe_trap_vector, cheetah_plus_icpe_trap_vector_tl1
1199cheetah_plus_icpe_trap_vector:
1200	membar		#Sync
1201	sethi		%hi(do_cheetah_plus_insn_parity), %g7
1202	jmpl		%g7 + %lo(do_cheetah_plus_insn_parity), %g0
1203	 nop
1204	nop
1205	nop
1206	nop
1207	nop
1208
1209do_cheetah_plus_insn_parity:
1210	ba,pt		%xcc, etrap
1211	 rd		%pc, %g7
1212	mov		0x1, %o0
1213	call		cheetah_plus_parity_error
1214	 add		%sp, PTREGS_OFF, %o1
1215	ba,pt		%xcc, rtrap
1216	 clr		%l6
1217
1218cheetah_plus_icpe_trap_vector_tl1:
1219	membar		#Sync
1220	wrpr		PSTATE_IG | PSTATE_PEF | PSTATE_PRIV, %pstate
1221	sethi		%hi(do_icpe_tl1), %g3
1222	jmpl		%g3 + %lo(do_icpe_tl1), %g0
1223	 nop
1224	nop
1225	nop
1226	nop
1227
1228	/* If we take one of these traps when tl >= 1, then we
1229	 * jump to interrupt globals.  If some trap level above us
1230	 * was also using interrupt globals, we cannot recover.
1231	 * We may use all interrupt global registers except %g6.
1232	 */
1233	.globl		do_dcpe_tl1, do_icpe_tl1
1234do_dcpe_tl1:
1235	rdpr		%tl, %g1		! Save original trap level
1236	mov		1, %g2			! Setup TSTATE checking loop
1237	sethi		%hi(TSTATE_IG), %g3	! TSTATE mask bit
12381:	wrpr		%g2, %tl		! Set trap level to check
1239	rdpr		%tstate, %g4		! Read TSTATE for this level
1240	andcc		%g4, %g3, %g0		! Interrupt globals in use?
1241	bne,a,pn	%xcc, do_dcpe_tl1_fatal	! Yep, irrecoverable
1242	 wrpr		%g1, %tl		! Restore original trap level
1243	add		%g2, 1, %g2		! Next trap level
1244	cmp		%g2, %g1		! Hit them all yet?
1245	ble,pt		%icc, 1b		! Not yet
1246	 nop
1247	wrpr		%g1, %tl		! Restore original trap level
1248do_dcpe_tl1_nonfatal:	/* Ok we may use interrupt globals safely. */
1249	/* Reset D-cache parity */
1250	sethi		%hi(1 << 16), %g1	! D-cache size
1251	mov		(1 << 5), %g2		! D-cache line size
1252	sub		%g1, %g2, %g1		! Move down 1 cacheline
12531:	srl		%g1, 14, %g3		! Compute UTAG
1254	membar		#Sync
1255	stxa		%g3, [%g1] ASI_DCACHE_UTAG
1256	membar		#Sync
1257	sub		%g2, 8, %g3		! 64-bit data word within line
12582:	membar		#Sync
1259	stxa		%g0, [%g1 + %g3] ASI_DCACHE_DATA
1260	membar		#Sync
1261	subcc		%g3, 8, %g3		! Next 64-bit data word
1262	bge,pt		%icc, 2b
1263	 nop
1264	subcc		%g1, %g2, %g1		! Next cacheline
1265	bge,pt		%icc, 1b
1266	 nop
1267	ba,pt		%xcc, dcpe_icpe_tl1_common
1268	 nop
1269
1270do_dcpe_tl1_fatal:
1271	sethi		%hi(1f), %g7
1272	ba,pt		%xcc, etraptl1
12731:	or		%g7, %lo(1b), %g7
1274	mov		0x2, %o0
1275	call		cheetah_plus_parity_error
1276	 add		%sp, PTREGS_OFF, %o1
1277	ba,pt		%xcc, rtrap
1278	 clr		%l6
1279
1280do_icpe_tl1:
1281	rdpr		%tl, %g1		! Save original trap level
1282	mov		1, %g2			! Setup TSTATE checking loop
1283	sethi		%hi(TSTATE_IG), %g3	! TSTATE mask bit
12841:	wrpr		%g2, %tl		! Set trap level to check
1285	rdpr		%tstate, %g4		! Read TSTATE for this level
1286	andcc		%g4, %g3, %g0		! Interrupt globals in use?
1287	bne,a,pn	%xcc, do_icpe_tl1_fatal	! Yep, irrecoverable
1288	 wrpr		%g1, %tl		! Restore original trap level
1289	add		%g2, 1, %g2		! Next trap level
1290	cmp		%g2, %g1		! Hit them all yet?
1291	ble,pt		%icc, 1b		! Not yet
1292	 nop
1293	wrpr		%g1, %tl		! Restore original trap level
1294do_icpe_tl1_nonfatal:	/* Ok we may use interrupt globals safely. */
1295	/* Flush I-cache */
1296	sethi		%hi(1 << 15), %g1	! I-cache size
1297	mov		(1 << 5), %g2		! I-cache line size
1298	sub		%g1, %g2, %g1
12991:	or		%g1, (2 << 3), %g3
1300	stxa		%g0, [%g3] ASI_IC_TAG
1301	membar		#Sync
1302	subcc		%g1, %g2, %g1
1303	bge,pt		%icc, 1b
1304	 nop
1305	ba,pt		%xcc, dcpe_icpe_tl1_common
1306	 nop
1307
1308do_icpe_tl1_fatal:
1309	sethi		%hi(1f), %g7
1310	ba,pt		%xcc, etraptl1
13111:	or		%g7, %lo(1b), %g7
1312	mov		0x3, %o0
1313	call		cheetah_plus_parity_error
1314	 add		%sp, PTREGS_OFF, %o1
1315	ba,pt		%xcc, rtrap
1316	 clr		%l6
1317
1318dcpe_icpe_tl1_common:
1319	/* Flush D-cache, re-enable D/I caches in DCU and finally
1320	 * retry the trapping instruction.
1321	 */
1322	sethi		%hi(1 << 16), %g1	! D-cache size
1323	mov		(1 << 5), %g2		! D-cache line size
1324	sub		%g1, %g2, %g1
13251:	stxa		%g0, [%g1] ASI_DCACHE_TAG
1326	membar		#Sync
1327	subcc		%g1, %g2, %g1
1328	bge,pt		%icc, 1b
1329	 nop
1330	ldxa		[%g0] ASI_DCU_CONTROL_REG, %g1
1331	or		%g1, (DCU_DC | DCU_IC), %g1
1332	stxa		%g1, [%g0] ASI_DCU_CONTROL_REG
1333	membar		#Sync
1334	retry
1335
1336	/* Cheetah FECC trap handling, we get here from tl{0,1}_fecc
1337	 * in the trap table.  That code has done a memory barrier
1338	 * and has disabled both the I-cache and D-cache in the DCU
1339	 * control register.  The I-cache is disabled so that we may
1340	 * capture the corrupted cache line, and the D-cache is disabled
1341	 * because corrupt data may have been placed there and we don't
1342	 * want to reference it.
1343	 *
1344	 * %g1 is one if this trap occured at %tl >= 1.
1345	 *
1346	 * Next, we turn off error reporting so that we don't recurse.
1347	 */
1348	.globl		cheetah_fast_ecc
1349cheetah_fast_ecc:
1350	ldxa		[%g0] ASI_ESTATE_ERROR_EN, %g2
1351	andn		%g2, ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN, %g2
1352	stxa		%g2, [%g0] ASI_ESTATE_ERROR_EN
1353	membar		#Sync
1354
1355	/* Fetch and clear AFSR/AFAR */
1356	ldxa		[%g0] ASI_AFSR, %g4
1357	ldxa		[%g0] ASI_AFAR, %g5
1358	stxa		%g4, [%g0] ASI_AFSR
1359	membar		#Sync
1360
1361	CHEETAH_LOG_ERROR
1362
1363	rdpr		%pil, %g2
1364	wrpr		%g0, 15, %pil
1365	ba,pt		%xcc, etrap_irq
1366	 rd		%pc, %g7
1367	mov		%l4, %o1
1368	mov		%l5, %o2
1369	call		cheetah_fecc_handler
1370	 add		%sp, PTREGS_OFF, %o0
1371	ba,a,pt		%xcc, rtrap_clr_l6
1372
1373	/* Our caller has disabled I-cache and performed membar Sync. */
1374	.globl		cheetah_cee
1375cheetah_cee:
1376	ldxa		[%g0] ASI_ESTATE_ERROR_EN, %g2
1377	andn		%g2, ESTATE_ERROR_CEEN, %g2
1378	stxa		%g2, [%g0] ASI_ESTATE_ERROR_EN
1379	membar		#Sync
1380
1381	/* Fetch and clear AFSR/AFAR */
1382	ldxa		[%g0] ASI_AFSR, %g4
1383	ldxa		[%g0] ASI_AFAR, %g5
1384	stxa		%g4, [%g0] ASI_AFSR
1385	membar		#Sync
1386
1387	CHEETAH_LOG_ERROR
1388
1389	rdpr		%pil, %g2
1390	wrpr		%g0, 15, %pil
1391	ba,pt		%xcc, etrap_irq
1392	 rd		%pc, %g7
1393	mov		%l4, %o1
1394	mov		%l5, %o2
1395	call		cheetah_cee_handler
1396	 add		%sp, PTREGS_OFF, %o0
1397	ba,a,pt		%xcc, rtrap_clr_l6
1398
1399	/* Our caller has disabled I-cache+D-cache and performed membar Sync. */
1400	.globl		cheetah_deferred_trap
1401cheetah_deferred_trap:
1402	ldxa		[%g0] ASI_ESTATE_ERROR_EN, %g2
1403	andn		%g2, ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN, %g2
1404	stxa		%g2, [%g0] ASI_ESTATE_ERROR_EN
1405	membar		#Sync
1406
1407	/* Fetch and clear AFSR/AFAR */
1408	ldxa		[%g0] ASI_AFSR, %g4
1409	ldxa		[%g0] ASI_AFAR, %g5
1410	stxa		%g4, [%g0] ASI_AFSR
1411	membar		#Sync
1412
1413	CHEETAH_LOG_ERROR
1414
1415	rdpr		%pil, %g2
1416	wrpr		%g0, 15, %pil
1417	ba,pt		%xcc, etrap_irq
1418	 rd		%pc, %g7
1419	mov		%l4, %o1
1420	mov		%l5, %o2
1421	call		cheetah_deferred_handler
1422	 add		%sp, PTREGS_OFF, %o0
1423	ba,a,pt		%xcc, rtrap_clr_l6
1424
1425	.globl		__do_privact
1426__do_privact:
1427	mov		TLB_SFSR, %g3
1428	stxa		%g0, [%g3] ASI_DMMU	! Clear FaultValid bit
1429	membar		#Sync
1430	sethi		%hi(109f), %g7
1431	ba,pt		%xcc, etrap
1432109:	or		%g7, %lo(109b), %g7
1433	call		do_privact
1434	 add		%sp, PTREGS_OFF, %o0
1435	ba,pt		%xcc, rtrap
1436	 clr		%l6
1437
1438	.globl		do_mna
1439do_mna:
1440	rdpr		%tl, %g3
1441	cmp		%g3, 1
1442
1443	/* Setup %g4/%g5 now as they are used in the
1444	 * winfixup code.
1445	 */
1446	mov		TLB_SFSR, %g3
1447	mov		DMMU_SFAR, %g4
1448	ldxa		[%g4] ASI_DMMU, %g4
1449	ldxa		[%g3] ASI_DMMU, %g5
1450	stxa		%g0, [%g3] ASI_DMMU	! Clear FaultValid bit
1451	membar		#Sync
1452	bgu,pn		%icc, winfix_mna
1453	 rdpr		%tpc, %g3
1454
14551:	sethi		%hi(109f), %g7
1456	ba,pt		%xcc, etrap
1457109:	 or		%g7, %lo(109b), %g7
1458	mov		%l4, %o1
1459	mov		%l5, %o2
1460	call		mem_address_unaligned
1461	 add		%sp, PTREGS_OFF, %o0
1462	ba,pt		%xcc, rtrap
1463	 clr		%l6
1464
1465	.globl		do_lddfmna
1466do_lddfmna:
1467	sethi		%hi(109f), %g7
1468	mov		TLB_SFSR, %g4
1469	ldxa		[%g4] ASI_DMMU, %g5
1470	stxa		%g0, [%g4] ASI_DMMU	! Clear FaultValid bit
1471	membar		#Sync
1472	mov		DMMU_SFAR, %g4
1473	ldxa		[%g4] ASI_DMMU, %g4
1474	ba,pt		%xcc, etrap
1475109:	 or		%g7, %lo(109b), %g7
1476	mov		%l4, %o1
1477	mov		%l5, %o2
1478	call		handle_lddfmna
1479	 add		%sp, PTREGS_OFF, %o0
1480	ba,pt		%xcc, rtrap
1481	 clr		%l6
1482
1483	.globl		do_stdfmna
1484do_stdfmna:
1485	sethi		%hi(109f), %g7
1486	mov		TLB_SFSR, %g4
1487	ldxa		[%g4] ASI_DMMU, %g5
1488	stxa		%g0, [%g4] ASI_DMMU	! Clear FaultValid bit
1489	membar		#Sync
1490	mov		DMMU_SFAR, %g4
1491	ldxa		[%g4] ASI_DMMU, %g4
1492	ba,pt		%xcc, etrap
1493109:	 or		%g7, %lo(109b), %g7
1494	mov		%l4, %o1
1495	mov		%l5, %o2
1496	call		handle_stdfmna
1497	 add		%sp, PTREGS_OFF, %o0
1498	ba,pt		%xcc, rtrap
1499	 clr		%l6
1500
1501	.globl	breakpoint_trap
1502breakpoint_trap:
1503	call		sparc_breakpoint
1504	 add		%sp, PTREGS_OFF, %o0
1505	ba,pt		%xcc, rtrap
1506	 nop
1507
1508#if defined(CONFIG_SUNOS_EMUL) || defined(CONFIG_SOLARIS_EMUL) || \
1509    defined(CONFIG_SOLARIS_EMUL_MODULE)
1510	/* SunOS uses syscall zero as the 'indirect syscall' it looks
1511	 * like indir_syscall(scall_num, arg0, arg1, arg2...);  etc.
1512	 * This is complete brain damage.
1513	 */
1514	.globl	sunos_indir
1515sunos_indir:
1516	srl		%o0, 0, %o0
1517	mov		%o7, %l4
1518	cmp		%o0, NR_SYSCALLS
1519	blu,a,pt	%icc, 1f
1520	 sll		%o0, 0x2, %o0
1521	sethi		%hi(sunos_nosys), %l6
1522	b,pt		%xcc, 2f
1523	 or		%l6, %lo(sunos_nosys), %l6
15241:	sethi		%hi(sunos_sys_table), %l7
1525	or		%l7, %lo(sunos_sys_table), %l7
1526	lduw		[%l7 + %o0], %l6
15272:	mov		%o1, %o0
1528	mov		%o2, %o1
1529	mov		%o3, %o2
1530	mov		%o4, %o3
1531	mov		%o5, %o4
1532	call		%l6
1533	 mov		%l4, %o7
1534
1535	.globl	sunos_getpid
1536sunos_getpid:
1537	call	sys_getppid
1538	 nop
1539	call	sys_getpid
1540	 stx	%o0, [%sp + PTREGS_OFF + PT_V9_I1]
1541	b,pt	%xcc, ret_sys_call
1542	 stx	%o0, [%sp + PTREGS_OFF + PT_V9_I0]
1543
1544	/* SunOS getuid() returns uid in %o0 and euid in %o1 */
1545	.globl	sunos_getuid
1546sunos_getuid:
1547	call	sys32_geteuid16
1548	 nop
1549	call	sys32_getuid16
1550	 stx	%o0, [%sp + PTREGS_OFF + PT_V9_I1]
1551	b,pt	%xcc, ret_sys_call
1552	 stx	%o0, [%sp + PTREGS_OFF + PT_V9_I0]
1553
1554	/* SunOS getgid() returns gid in %o0 and egid in %o1 */
1555	.globl	sunos_getgid
1556sunos_getgid:
1557	call	sys32_getegid16
1558	 nop
1559	call	sys32_getgid16
1560	 stx	%o0, [%sp + PTREGS_OFF + PT_V9_I1]
1561	b,pt	%xcc, ret_sys_call
1562	 stx	%o0, [%sp + PTREGS_OFF + PT_V9_I0]
1563#endif
1564
1565	/* SunOS's execv() call only specifies the argv argument, the
1566	 * environment settings are the same as the calling processes.
1567	 */
1568	.globl	sunos_execv, sys_execve, sys32_execve
1569sys_execve:
1570	sethi		%hi(sparc_execve), %g1
1571	ba,pt		%xcc, execve_merge
1572	 or		%g1, %lo(sparc_execve), %g1
1573sunos_execv:
1574	stx		%g0, [%sp + PTREGS_OFF + PT_V9_I2]
1575sys32_execve:
1576	sethi		%hi(sparc32_execve), %g1
1577	or		%g1, %lo(sparc32_execve), %g1
1578execve_merge:
1579	flushw
1580	jmpl		%g1, %g0
1581	 add		%sp, PTREGS_OFF, %o0
1582
1583	.globl	sys_pipe, sys_sigpause, sys_nis_syscall
1584	.globl	sys_sigsuspend, sys_rt_sigsuspend, sys32_rt_sigsuspend
1585	.globl	sys_rt_sigreturn
1586	.globl	sys32_sigreturn, sys32_rt_sigreturn
1587	.globl	sys32_execve, sys_ptrace
1588	.globl	sys_sigaltstack, sys32_sigaltstack
1589	.globl	sys32_sigstack
1590	.align	32
1591sys_pipe:	ba,pt		%xcc, sparc_pipe
1592		 add		%sp, PTREGS_OFF, %o0
1593sys_nis_syscall:ba,pt		%xcc, c_sys_nis_syscall
1594		 add		%sp, PTREGS_OFF, %o0
1595sys_memory_ordering:
1596		ba,pt		%xcc, sparc_memory_ordering
1597		 add		%sp, PTREGS_OFF, %o1
1598sys_sigaltstack:ba,pt		%xcc, do_sigaltstack
1599		 add		%i6, STACK_BIAS, %o2
1600sys32_sigstack:	ba,pt		%xcc, do_sys32_sigstack
1601		 mov		%i6, %o2
1602sys32_sigaltstack:
1603		ba,pt		%xcc, do_sys32_sigaltstack
1604		 mov		%i6, %o2
1605
1606		.align		32
1607sys_sigsuspend:	add		%sp, PTREGS_OFF, %o0
1608		call		do_sigsuspend
1609		 add		%o7, 1f-.-4, %o7
1610		nop
1611sys_rt_sigsuspend: /* NOTE: %o0,%o1 have a correct value already */
1612		add		%sp, PTREGS_OFF, %o2
1613		call		do_rt_sigsuspend
1614		 add		%o7, 1f-.-4, %o7
1615		nop
1616sys32_rt_sigsuspend: /* NOTE: %o0,%o1 have a correct value already */
1617		srl		%o0, 0, %o0
1618		add		%sp, PTREGS_OFF, %o2
1619		call		do_rt_sigsuspend32
1620		 add		%o7, 1f-.-4, %o7
1621		/* NOTE: %o0 has a correct value already */
1622sys_sigpause:	add		%sp, PTREGS_OFF, %o1
1623		call		do_sigpause
1624		 add		%o7, 1f-.-4, %o7
1625		nop
1626sys32_sigreturn:
1627		add		%sp, PTREGS_OFF, %o0
1628		call		do_sigreturn32
1629		 add		%o7, 1f-.-4, %o7
1630		nop
1631sys_rt_sigreturn:
1632		add		%sp, PTREGS_OFF, %o0
1633		call		do_rt_sigreturn
1634		 add		%o7, 1f-.-4, %o7
1635		nop
1636sys32_rt_sigreturn:
1637		add		%sp, PTREGS_OFF, %o0
1638		call		do_rt_sigreturn32
1639		 add		%o7, 1f-.-4, %o7
1640		nop
1641sys_ptrace:	add		%sp, PTREGS_OFF, %o0
1642		call		do_ptrace
1643		 add		%o7, 1f-.-4, %o7
1644		nop
1645		.align		32
16461:		ldx		[%curptr + AOFF_task_ptrace], %l5
1647		andcc		%l5, 0x02, %g0
1648		be,pt		%icc, rtrap
1649		 clr		%l6
1650		call		syscall_trace
1651		 nop
1652
1653		ba,pt		%xcc, rtrap
1654		 clr		%l6
1655
1656	/* This is how fork() was meant to be done, 8 instruction entry.
1657	 *
1658	 * I questioned the following code briefly, let me clear things
1659	 * up so you must not reason on it like I did.
1660	 *
1661	 * Know the fork_kpsr etc. we use in the sparc32 port?  We don't
1662	 * need it here because the only piece of window state we copy to
1663	 * the child is the CWP register.  Even if the parent sleeps,
1664	 * we are safe because we stuck it into pt_regs of the parent
1665	 * so it will not change.
1666	 *
1667	 * XXX This raises the question, whether we can do the same on
1668	 * XXX sparc32 to get rid of fork_kpsr _and_ fork_kwim.  The
1669	 * XXX answer is yes.  We stick fork_kpsr in UREG_G0 and
1670	 * XXX fork_kwim in UREG_G1 (global registers are considered
1671	 * XXX volatile across a system call in the sparc ABI I think
1672	 * XXX if it isn't we can use regs->y instead, anyone who depends
1673	 * XXX upon the Y register being preserved across a fork deserves
1674	 * XXX to lose).
1675	 *
1676	 * In fact we should take advantage of that fact for other things
1677	 * during system calls...
1678	 */
1679	.globl	sys_fork, sys_vfork, sys_clone, sparc_exit
1680	.globl	ret_from_syscall
1681	.align	32
1682sys_vfork:	/* Under Linux, vfork and fork are just special cases of clone. */
1683		sethi		%hi(0x4000 | 0x0100 | SIGCHLD), %o0
1684		or		%o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0
1685		ba,pt		%xcc, sys_clone
1686sys_fork:	 clr		%o1
1687		mov		SIGCHLD, %o0
1688sys_clone:	flushw
1689		movrz		%o1, %fp, %o1
1690		mov		0, %o3
1691		ba,pt		%xcc, do_fork
1692		 add		%sp, PTREGS_OFF, %o2
1693ret_from_syscall:
1694		/* Clear SPARC_FLAG_NEWCHILD, switch_to leaves thread.flags in
1695		 * %o7 for us.  Check performance counter stuff too.
1696		 */
1697		andn		%o7, SPARC_FLAG_NEWCHILD, %l0
1698		mov		%g5, %o0	/* 'prev' */
1699		call		schedule_tail
1700		 stb		%l0, [%g6 + AOFF_task_thread + AOFF_thread_flags]
1701		andcc		%l0, SPARC_FLAG_PERFCTR, %g0
1702		be,pt		%icc, 1f
1703		 nop
1704		ldx		[%g6 + AOFF_task_thread + AOFF_thread_pcr_reg], %o7
1705		wr		%g0, %o7, %pcr
1706
1707		/* Blackbird errata workaround.  See commentary in
1708		 * smp.c:smp_percpu_timer_interrupt() for more
1709		 * information.
1710		 */
1711		ba,pt		%xcc, 99f
1712		 nop
1713		.align		64
171499:		wr		%g0, %g0, %pic
1715		rd		%pic, %g0
1716
17171:		b,pt		%xcc, ret_sys_call
1718		 ldx		[%sp + PTREGS_OFF + PT_V9_I0], %o0
1719sparc_exit:	wrpr		%g0, (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV), %pstate
1720		rdpr		%otherwin, %g1
1721		rdpr		%cansave, %g3
1722		add		%g3, %g1, %g3
1723		wrpr		%g3, 0x0, %cansave
1724		wrpr		%g0, 0x0, %otherwin
1725		wrpr		%g0, (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV | PSTATE_IE), %pstate
1726		ba,pt		%xcc, sys_exit
1727		 stb		%g0, [%g6 + AOFF_task_thread + AOFF_thread_w_saved]
1728
1729linux_sparc_ni_syscall:
1730	sethi		%hi(sys_ni_syscall), %l7
1731	b,pt		%xcc, 4f
1732	 or		%l7, %lo(sys_ni_syscall), %l7
1733
1734linux_syscall_trace32:
1735	call		syscall_trace
1736	 nop
1737	srl		%i0, 0, %o0
1738	mov		%i4, %o4
1739	srl		%i1, 0, %o1
1740	srl		%i2, 0, %o2
1741	b,pt		%xcc, 2f
1742	 srl		%i3, 0, %o3
1743
1744linux_syscall_trace:
1745	call		syscall_trace
1746	 nop
1747	mov		%i0, %o0
1748	mov		%i1, %o1
1749	mov		%i2, %o2
1750	mov		%i3, %o3
1751	b,pt		%xcc, 2f
1752	 mov		%i4, %o4
1753
1754
1755	/* Linux 32-bit and SunOS system calls enter here... */
1756	.align	32
1757	.globl	linux_sparc_syscall32
1758linux_sparc_syscall32:
1759	/* Direct access to user regs, much faster. */
1760	cmp		%g1, NR_SYSCALLS			! IEU1	Group
1761	bgeu,pn		%xcc, linux_sparc_ni_syscall		! CTI
1762	 srl		%i0, 0, %o0				! IEU0
1763	sll		%g1, 2, %l4				! IEU0	Group
1764#ifdef SYSCALL_TRACING
1765	call		syscall_trace_entry
1766	 add		%sp, PTREGS_OFF, %o0
1767	srl		%i0, 0, %o0
1768#endif
1769	mov		%i4, %o4				! IEU1
1770	lduw		[%l7 + %l4], %l7			! Load
1771	srl		%i1, 0, %o1				! IEU0	Group
1772	ldx		[%curptr + AOFF_task_ptrace], %l0	! Load
1773
1774	mov		%i5, %o5				! IEU1
1775	srl		%i2, 0, %o2				! IEU0	Group
1776	andcc		%l0, 0x02, %g0				! IEU0	Group
1777	bne,pn		%icc, linux_syscall_trace32		! CTI
1778	 mov		%i0, %l5				! IEU1
1779	call		%l7					! CTI	Group brk forced
1780	 srl		%i3, 0, %o3				! IEU0
1781	ba,a,pt		%xcc, 3f
1782
1783	/* Linux native and SunOS system calls enter here... */
1784	.align	32
1785	.globl	linux_sparc_syscall, ret_sys_call
1786linux_sparc_syscall:
1787	/* Direct access to user regs, much faster. */
1788	cmp		%g1, NR_SYSCALLS			! IEU1	Group
1789	bgeu,pn		%xcc, linux_sparc_ni_syscall		! CTI
1790	 mov		%i0, %o0				! IEU0
1791	sll		%g1, 2, %l4				! IEU0	Group
1792#ifdef SYSCALL_TRACING
1793	call		syscall_trace_entry
1794	 add		%sp, PTREGS_OFF, %o0
1795	mov		%i0, %o0
1796#endif
1797	mov		%i1, %o1				! IEU1
1798	lduw		[%l7 + %l4], %l7			! Load
17994:	mov		%i2, %o2				! IEU0	Group
1800	ldx		[%curptr + AOFF_task_ptrace], %l0	! Load
1801
1802	mov		%i3, %o3				! IEU1
1803	mov		%i4, %o4				! IEU0	Group
1804	andcc		%l0, 0x02, %g0				! IEU1	Group+1 bubble
1805	bne,pn		%icc, linux_syscall_trace		! CTI	Group
1806	 mov		%i0, %l5				! IEU0
18072:	call		%l7					! CTI	Group brk forced
1808	 mov		%i5, %o5				! IEU0
1809	nop
1810
18113:	stx		%o0, [%sp + PTREGS_OFF + PT_V9_I0]
1812ret_sys_call:
1813#ifdef SYSCALL_TRACING
1814	mov		%o0, %o1
1815	call		syscall_trace_exit
1816	 add		%sp, PTREGS_OFF, %o0
1817	mov		%o1, %o0
1818#endif
1819	ldx		[%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
1820	ldx		[%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
1821	sra		%o0, 0, %o0
1822	mov		%ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
1823	cmp		%o0, -ENOIOCTLCMD
1824	sllx		%g2, 32, %g2
1825	bgeu,pn		%xcc, 1f
1826	 andcc		%l0, 0x02, %l6
182780:
1828	andn		%g3, %g2, %g3		/* System call success, clear Carry condition code. */
1829	stx		%g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]
1830	bne,pn		%icc, linux_syscall_trace2
1831	 add		%l1, 0x4, %l2				         ! npc = npc+4
1832	stx		%l1, [%sp + PTREGS_OFF + PT_V9_TPC]
1833	ba,pt		%xcc, rtrap_clr_l6
1834	 stx		%l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
1835
18361:
1837	/* Really a failure?  Check if force_successful_syscall_return()
1838	 * was invoked.
1839	 */
1840	ldx		[%curptr + AOFF_task_thread + AOFF_thread_flags], %l0
1841	andcc		%l0, SPARC_FLAG_SYS_SUCCESS, %g0
1842	be,pt		%icc, 1f
1843	 andcc		%l6, 0x02, %g0
1844	andn		%l0, SPARC_FLAG_SYS_SUCCESS, %l0
1845	ba,pt		%xcc, 80b
1846	 stx		%l0, [%curptr + AOFF_task_thread + AOFF_thread_flags]
1847
1848	/* System call failure, set Carry condition code.
1849	 * Also, get abs(errno) to return to the process.
1850	 */
18511:
1852	sub		%g0, %o0, %o0
1853	or		%g3, %g2, %g3
1854	stx		%o0, [%sp + PTREGS_OFF + PT_V9_I0]
1855	mov		1, %l6
1856	stx		%g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]
1857	bne,pn		%icc, linux_syscall_trace2
1858	 add		%l1, 0x4, %l2				         !npc = npc+4
1859	stx		%l1, [%sp + PTREGS_OFF + PT_V9_TPC]
1860
1861	b,pt		%xcc, rtrap
1862	 stx		%l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
1863linux_syscall_trace2:
1864	call		syscall_trace
1865	 nop
1866	stx		%l1, [%sp + PTREGS_OFF + PT_V9_TPC]
1867	ba,pt		%xcc, rtrap
1868	 stx		%l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
1869
1870	.align		32
1871	.globl		__flushw_user
1872__flushw_user:
1873	rdpr		%otherwin, %g1
1874	brz,pn		%g1, 2f
1875	 clr		%g2
18761:	save		%sp, -128, %sp
1877	rdpr		%otherwin, %g1
1878	brnz,pt		%g1, 1b
1879	 add		%g2, 1, %g2
18801:	sub		%g2, 1, %g2
1881	brnz,pt		%g2, 1b
1882	 restore	%g0, %g0, %g0
18832:	retl
1884	 nop
1885