1/*
2 * r2300_switch.S: R2300 specific task switching code.
3 *
4 * Copyright (C) 1994, 1995, 1996, 1999 by Ralf Baechle
5 * Copyright (C) 1994, 1995, 1996 by Andreas Busse
6 *
7 * Multi-cpu abstraction and macros for easier reading:
8 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
9 *
10 * Further modifications to make this work:
11 * Copyright (c) 1998-2000 Harald Koerfgen
12 */
13#include <linux/config.h>
14#include <asm/asm.h>
15#include <asm/cachectl.h>
16#include <asm/current.h>
17#include <asm/fpregdef.h>
18#include <asm/mipsregs.h>
19#include <asm/offset.h>
20#include <asm/page.h>
21#include <asm/pgtable-bits.h>
22#include <asm/processor.h>
23#include <asm/regdef.h>
24#include <asm/stackframe.h>
25
26#include <asm/asmmacro.h>
27
28	.set	mips1
29	.align	5
30
31#define PF_USEDFPU      0x00100000      /* task used FPU this quantum (SMP) */
32#define ST_OFF (KERNEL_STACK_SIZE - 32 - PT_SIZE + PT_STATUS)
33
34/*
35 * [jsun] FPU context is saved if and only if the process has used FPU in
36 * the current run (PF_USEDFPU).  In any case, the CU1 bit for user space
37 * STATUS register should be 0, so that a process *always* starts its
38 * userland with FPU disabled after each context switch.
39 *
40 * FPU will be enabled as soon as the process accesses FPU again, through
41 * do_cpu() trap.
42 */
43
44/*
45 * task_struct *resume(task_struct *prev,
46 *                     task_struct *next)
47 */
48LEAF(resume)
49#ifndef CONFIG_CPU_HAS_LLSC
50	sw      zero, ll_bit
51#endif
52	mfc0	t1, CP0_STATUS
53	sw	t1, THREAD_STATUS(a0)
54	CPU_SAVE_NONSCRATCH(a0)
55	sw	ra, THREAD_REG31(a0)
56
57	/*
58	 * check if we need to save FPU registers
59	 */
60	lw	t0, TASK_FLAGS(a0)
61	li	t1, PF_USEDFPU
62	and	t2, t0, t1
63	beqz	t2, 1f
64	nor	t1, zero, t1
65
66	/*
67	 * clear PF_USEDFPU bit in task flags
68	 */
69	and	t0, t0, t1
70	sw	t0, TASK_FLAGS(a0)
71
72	/*
73	 * clear user-saved stack CU1 bit
74	 */
75	lw	t0, ST_OFF(a0)
76	li	t1, ~ST0_CU1
77	and	t0, t0, t1
78	sw	t0, ST_OFF(a0)
79
80	FPU_SAVE_SINGLE(a0, t0)			# clobbers t0
81
821:
83	/*
84	 * The order of restoring the registers takes care of the race
85	 * updating $28, $29 and kernelsp without disabling ints.
86	 */
87	move	$28, a1
88	CPU_RESTORE_NONSCRATCH($28)
89	addiu	t0, $28, KERNEL_STACK_SIZE-32
90	sw	t0, kernelsp
91	mfc0	t1, CP0_STATUS		/* Do we really need this? */
92	li	a3, 0xff00
93	and	t1, a3
94	lw	a2, THREAD_STATUS($28)
95	nor	a3, $0, a3
96	and	a2, a3
97	or	a2, t1
98	mtc0	a2, CP0_STATUS
99	.set	noreorder
100	jr	ra
101	 move	v0, a0
102	.set	reorder
103	END(resume)
104
105/*
106 * Save a thread's fp context.
107 */
108LEAF(_save_fp)
109	FPU_SAVE_SINGLE(a0, t1)			# clobbers t1
110	jr	ra
111	END(_save_fp)
112
113/*
114 * Restore a thread's fp context.
115 */
116LEAF(_restore_fp)
117	FPU_RESTORE_SINGLE(a0, t1)		# clobbers t1
118	jr	ra
119	END(_restore_fp)
120
121/*
122 * Load the FPU with signalling NANS.  This bit pattern we're using has
123 * the property that no matter wether considered as single or as double
124 * precision represents signaling NANS.
125 *
126 * We initialize fcr31 to rounding to nearest, no exceptions.
127 */
128
129#define FPU_DEFAULT  0x00000000
130
131LEAF(_init_fpu)
132	mfc0	t0, CP0_STATUS
133	li	t1, ST0_CU1
134	or	t0, t1
135	mtc0	t0, CP0_STATUS
136
137	li	t1, FPU_DEFAULT
138	ctc1	t1, fcr31
139
140	li	t0, -1
141
142	mtc1	t0, $f0
143	mtc1	t0, $f1
144	mtc1	t0, $f2
145	mtc1	t0, $f3
146	mtc1	t0, $f4
147	mtc1	t0, $f5
148	mtc1	t0, $f6
149	mtc1	t0, $f7
150	mtc1	t0, $f8
151	mtc1	t0, $f9
152	mtc1	t0, $f10
153	mtc1	t0, $f11
154	mtc1	t0, $f12
155	mtc1	t0, $f13
156	mtc1	t0, $f14
157	mtc1	t0, $f15
158	mtc1	t0, $f16
159	mtc1	t0, $f17
160	mtc1	t0, $f18
161	mtc1	t0, $f19
162	mtc1	t0, $f20
163	mtc1	t0, $f21
164	mtc1	t0, $f22
165	mtc1	t0, $f23
166	mtc1	t0, $f24
167	mtc1	t0, $f25
168	mtc1	t0, $f26
169	mtc1	t0, $f27
170	mtc1	t0, $f28
171	mtc1	t0, $f29
172	mtc1	t0, $f30
173	.set	noreorder
174	jr	ra
175	 mtc1	t0, $f31
176	.set	reorder
177	END(_init_fpu)
178