1/* $Id: rtrap.S,v 1.57.2.2 2002/03/03 10:31:56 davem Exp $
2 * rtrap.S: Preparing for return from trap on Sparc V9.
3 *
4 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#include <asm/asi.h>
9#include <asm/pstate.h>
10#include <asm/ptrace.h>
11#include <asm/spitfire.h>
12#include <asm/head.h>
13#include <asm/visasm.h>
14#include <asm/processor.h>
15
16#define		RTRAP_PSTATE		(PSTATE_RMO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
17#define		RTRAP_PSTATE_IRQOFF	(PSTATE_RMO|PSTATE_PEF|PSTATE_PRIV)
18#define		RTRAP_PSTATE_AG_IRQOFF	(PSTATE_RMO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
19
20		/* Register %l6 keeps track of whether we are returning
21		 * from a system call or not.  It is cleared if we call
22		 * do_signal, and it must not be otherwise modified until
23		 * we fully commit to returning to userspace.
24		 */
25
26		.text
27		.align			32
28__handle_softirq:
29		call			do_softirq
30		 nop
31		ba,a,pt			%xcc, __handle_softirq_continue
32		 nop
33__handle_preemption:
34		call			schedule
35		 wrpr			%g0, RTRAP_PSTATE, %pstate
36		ba,pt			%xcc, __handle_preemption_continue
37		 wrpr			%g0, RTRAP_PSTATE_IRQOFF, %pstate
38
39__handle_user_windows:
40		call			fault_in_user_windows
41		 wrpr			%g0, RTRAP_PSTATE, %pstate
42		wrpr			%g0, RTRAP_PSTATE_IRQOFF, %pstate
43		/* Redo sched+sig checks */
44		ldx			[%g6 + AOFF_task_need_resched], %l0
45		brz,pt			%l0, 1f
46		 nop
47		call			schedule
48
49		 wrpr			%g0, RTRAP_PSTATE, %pstate
50		wrpr			%g0, RTRAP_PSTATE_IRQOFF, %pstate
511:		lduw			[%g6 + AOFF_task_sigpending], %l0
52		brz,pt			%l0, __handle_user_windows_continue
53		 nop
54		clr			%o0
55		mov			%l5, %o2
56		mov			%l6, %o3
57
58		add			%sp, PTREGS_OFF, %o1
59		call			do_signal
60		 wrpr			%g0, RTRAP_PSTATE, %pstate
61		wrpr			%g0, RTRAP_PSTATE_IRQOFF, %pstate
62		clr			%l6
63		/* Signal delivery can modify pt_regs tstate, so we must
64		 * reload it.
65		 */
66		ldx			[%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
67		sethi			%hi(0xf << 20), %l4
68		and			%l1, %l4, %l4
69
70		ba,pt			%xcc, __handle_user_windows_continue
71		 andn			%l1, %l4, %l1
72__handle_perfctrs:
73		call			update_perfctrs
74		 wrpr			%g0, RTRAP_PSTATE, %pstate
75		wrpr			%g0, RTRAP_PSTATE_IRQOFF, %pstate
76		ldub			[%g6 + AOFF_task_thread + AOFF_thread_w_saved], %o2
77		brz,pt			%o2, 1f
78		 nop
79
80		/* Redo userwin+sched+sig checks */
81		call			fault_in_user_windows
82		 wrpr			%g0, RTRAP_PSTATE, %pstate
83		wrpr			%g0, RTRAP_PSTATE_IRQOFF, %pstate
841:		ldx			[%g6 + AOFF_task_need_resched], %l0
85		brz,pt			%l0, 1f
86		 nop
87		call			schedule
88		 wrpr			%g0, RTRAP_PSTATE, %pstate
89
90		wrpr			%g0, RTRAP_PSTATE_IRQOFF, %pstate
911:		lduw			[%g6 + AOFF_task_sigpending], %l0
92		brz,pt			%l0, __handle_perfctrs_continue
93		 sethi			%hi(TSTATE_PEF), %o0
94		clr			%o0
95		mov			%l5, %o2
96		mov			%l6, %o3
97		add			%sp, PTREGS_OFF, %o1
98
99		call			do_signal
100		 wrpr			%g0, RTRAP_PSTATE, %pstate
101		wrpr			%g0, RTRAP_PSTATE_IRQOFF, %pstate
102		clr			%l6
103		/* Signal delivery can modify pt_regs tstate, so we must
104		 * reload it.
105		 */
106		ldx			[%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
107		sethi			%hi(0xf << 20), %l4
108		and			%l1, %l4, %l4
109		andn			%l1, %l4, %l1
110
111		ba,pt			%xcc, __handle_perfctrs_continue
112		 sethi			%hi(TSTATE_PEF), %o0
113__handle_userfpu:
114		rd			%fprs, %l5
115		andcc			%l5, FPRS_FEF, %g0
116		sethi			%hi(TSTATE_PEF), %o0
117		be,a,pn			%icc, __handle_userfpu_continue
118		 andn			%l1, %o0, %l1
119		ba,a,pt			%xcc, __handle_userfpu_continue
120
121__handle_signal:
122		clr			%o0
123		mov			%l5, %o2
124		mov			%l6, %o3
125		add			%sp, PTREGS_OFF, %o1
126		call			do_signal
127		 wrpr			%g0, RTRAP_PSTATE, %pstate
128		wrpr			%g0, RTRAP_PSTATE_IRQOFF, %pstate
129		clr			%l6
130
131		/* Signal delivery can modify pt_regs tstate, so we must
132		 * reload it.
133		 */
134		ldx			[%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
135		sethi			%hi(0xf << 20), %l4
136		and			%l1, %l4, %l4
137		ba,pt			%xcc, __handle_signal_continue
138		 andn			%l1, %l4, %l1
139
140		.align			64
141		.globl			rtrap_clr_l6, rtrap, irqsz_patchme, rtrap_xcall
142rtrap_clr_l6:	clr			%l6
143rtrap:		lduw			[%g6 + AOFF_task_processor], %l0
144		sethi			%hi(irq_stat), %l2	! &softirq_active
145		or			%l2, %lo(irq_stat), %l2	! &softirq_active
146irqsz_patchme:	sllx			%l0, 0, %l0
147		lduw			[%l2 + %l0], %l1	! softirq_pending
148		cmp			%l1, 0
149
150		/* mm/ultra.S:xcall_report_regs KNOWS about this load. */
151		bne,pn			%icc, __handle_softirq
152		 ldx			[%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
153
154__handle_softirq_continue:
155rtrap_xcall:
156		sethi			%hi(0xf << 20), %l4
157		andcc			%l1, TSTATE_PRIV, %l3
158		and			%l1, %l4, %l4
159		bne,pn			%icc, to_kernel
160		 andn			%l1, %l4, %l1
161
162		/* We must hold IRQs off and atomically test schedule+signal
163		 * state, then hold them off all the way back to userspace.
164		 * If we are returning to kernel, none of this matters.
165		 *
166		 * If we do not do this, there is a window where we would do
167		 * the tests, later the signal/resched event arrives but we do
168		 * not process it since we are still in kernel mode.  It would
169		 * take until the next local IRQ before the signal/resched
170		 * event would be handled.
171		 *
172		 * This also means that if we have to deal with performance
173		 * counters or user windows, we have to redo all of these
174		 * sched+signal checks with IRQs disabled.
175		 */
176to_user:	wrpr			%g0, RTRAP_PSTATE_IRQOFF, %pstate
177		wrpr			0, %pil
178__handle_preemption_continue:
179		ldx			[%g6 + AOFF_task_need_resched], %l0
180		brnz,pn			%l0, __handle_preemption
181		 lduw			[%g6 + AOFF_task_sigpending], %l0
182		brnz,pn			%l0, __handle_signal
183		 nop
184__handle_signal_continue:
185		ldub			[%g6 + AOFF_task_thread + AOFF_thread_w_saved], %o2
186		brnz,pn			%o2, __handle_user_windows
187		 nop
188__handle_user_windows_continue:
189		ldub			[%g6 + AOFF_task_thread + AOFF_thread_flags], %l5
190		andcc			%l5, SPARC_FLAG_PERFCTR, %g0
191		sethi			%hi(TSTATE_PEF), %o0
192		bne,pn			%xcc, __handle_perfctrs
193__handle_perfctrs_continue:
194		 andcc			%l1, %o0, %g0
195
196		/* This fpdepth clear is neccessary for non-syscall rtraps only */
197		bne,pn			%xcc, __handle_userfpu
198		 stb			%g0, [%g6 + AOFF_task_thread + AOFF_thread_fpdepth]
199__handle_userfpu_continue:
200
201rt_continue:	ldx			[%sp + PTREGS_OFF + PT_V9_G1], %g1
202		ldx			[%sp + PTREGS_OFF + PT_V9_G2], %g2
203
204		ldx			[%sp + PTREGS_OFF + PT_V9_G3], %g3
205		ldx			[%sp + PTREGS_OFF + PT_V9_G4], %g4
206		ldx			[%sp + PTREGS_OFF + PT_V9_G5], %g5
207		ldx			[%sp + PTREGS_OFF + PT_V9_G6], %g6
208		ldx			[%sp + PTREGS_OFF + PT_V9_G7], %g7
209		wrpr			%g0, RTRAP_PSTATE_AG_IRQOFF, %pstate
210		ldx			[%sp + PTREGS_OFF + PT_V9_I0], %i0
211		ldx			[%sp + PTREGS_OFF + PT_V9_I1], %i1
212
213		ldx			[%sp + PTREGS_OFF + PT_V9_I2], %i2
214		ldx			[%sp + PTREGS_OFF + PT_V9_I3], %i3
215		ldx			[%sp + PTREGS_OFF + PT_V9_I4], %i4
216		ldx			[%sp + PTREGS_OFF + PT_V9_I5], %i5
217		ldx			[%sp + PTREGS_OFF + PT_V9_I6], %i6
218		ldx			[%sp + PTREGS_OFF + PT_V9_I7], %i7
219		ldx			[%sp + PTREGS_OFF + PT_V9_TPC], %l2
220		ldx			[%sp + PTREGS_OFF + PT_V9_TNPC], %o2
221
222		ld			[%sp + PTREGS_OFF + PT_V9_Y], %o3
223		wr			%o3, %g0, %y
224		srl			%l4, 20, %l4
225		wrpr			%l4, 0x0, %pil
226		wrpr			%g0, 0x1, %tl
227		wrpr			%l1, %g0, %tstate
228		wrpr			%l2, %g0, %tpc
229		wrpr			%o2, %g0, %tnpc
230
231		brnz,pn			%l3, kern_rtt
232		 mov			PRIMARY_CONTEXT, %l7
233		ldxa			[%l7 + %l7] ASI_DMMU, %l0
234		stxa			%l0, [%l7] ASI_DMMU
235		flush			%g6
236		rdpr			%wstate, %l1
237		rdpr			%otherwin, %l2
238		srl			%l1, 3, %l1
239
240		wrpr			%l2, %g0, %canrestore
241		wrpr			%l1, %g0, %wstate
242		wrpr			%g0, %g0, %otherwin
243		restore
244		rdpr			%canrestore, %g1
245		wrpr			%g1, 0x0, %cleanwin
246		retry
247		nop
248
249kern_rtt:	restore
250		retry
251to_kernel:	ldub			[%g6 + AOFF_task_thread + AOFF_thread_fpdepth], %l5
252		brz,pt			%l5, rt_continue
253		 srl			%l5, 1, %o0
254		add			%g6, AOFF_task_thread + AOFF_thread_fpsaved, %l6
255		ldub			[%l6 + %o0], %l2
256		sub			%l5, 2, %l5
257
258		add			%g6, AOFF_task_thread + AOFF_thread_gsr, %o1
259		andcc			%l2, (FPRS_FEF|FPRS_DU), %g0
260		be,pt			%icc, 2f
261		 and			%l2, FPRS_DL, %l6
262		andcc			%l2, FPRS_FEF, %g0
263		be,pn			%icc, 5f
264		 sll			%o0, 3, %o5
265		rd			%fprs, %g5
266
267		wr			%g5, FPRS_FEF, %fprs
268		ldx			[%o1 + %o5], %g5
269		add			%g6, AOFF_task_thread + AOFF_thread_xfsr, %o1
270		membar			#StoreLoad | #LoadLoad
271		sll			%o0, 8, %o2
272		add			%g6, AOFF_task_fpregs, %o3
273		brz,pn			%l6, 1f
274		 add			%g6, AOFF_task_fpregs+0x40, %o4
275
276		ldda			[%o3 + %o2] ASI_BLK_P, %f0
277		ldda			[%o4 + %o2] ASI_BLK_P, %f16
2781:		andcc			%l2, FPRS_DU, %g0
279		be,pn			%icc, 1f
280		 wr			%g5, 0, %gsr
281		add			%o2, 0x80, %o2
282		ldda			[%o3 + %o2] ASI_BLK_P, %f32
283		ldda			[%o4 + %o2] ASI_BLK_P, %f48
284
2851:		membar			#Sync
286		ldx			[%o1 + %o5], %fsr
2872:		stb			%l5, [%g6 + AOFF_task_thread + AOFF_thread_fpdepth]
288		ba,pt			%xcc, rt_continue
289		 nop
2905:		wr			%g0, FPRS_FEF, %fprs
291		membar			#StoreLoad | #LoadLoad
292		sll			%o0, 8, %o2
293
294		add			%g6, AOFF_task_fpregs+0x80, %o3
295		add			%g6, AOFF_task_fpregs+0xc0, %o4
296		ldda			[%o3 + %o2] ASI_BLK_P, %f32
297		ldda			[%o4 + %o2] ASI_BLK_P, %f48
298		membar			#Sync
299		wr			%g0, FPRS_DU, %fprs
300		ba,pt			%xcc, rt_continue
301		 stb			%l5, [%g6 + AOFF_task_thread + AOFF_thread_fpdepth]
302