1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 1995, 1996, 1998, 1999 by Ralf Baechle
7 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
8 * Copyright (C) 1994, 1995, 1996, by Andreas Busse
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Copyright (C) 2000 MIPS Technologies, Inc.
11 *    written by Carsten Langgaard, carstenl@mips.com
12 */
13#include <linux/config.h>
14#include <asm/asm.h>
15#include <asm/cachectl.h>
16#include <asm/current.h>
17#include <asm/fpregdef.h>
18#include <asm/mipsregs.h>
19#include <asm/offset.h>
20#include <asm/page.h>
21#include <asm/pgtable-bits.h>
22#include <asm/processor.h>
23#include <asm/regdef.h>
24#include <asm/stackframe.h>
25
26#include <asm/asmmacro.h>
27
28#define PF_USEDFPU      0x00100000      /* task used FPU this quantum (SMP) */
29#define ST_OFF (KERNEL_STACK_SIZE - 32 - PT_SIZE + PT_STATUS)
30
31/*
32 * [jsun] FPU context is saved if and only if the process has used FPU in
33 * the current run (PF_USEDFPU).  In any case, the CU1 bit for user space
34 * STATUS register should be 0, so that a process *always* starts its
35 * userland with FPU disabled after each context switch.
36 *
37 * FPU will be enabled as soon as the process accesses FPU again, through
38 * do_cpu() trap.
39 */
40
41/*
42 * task_struct *r4xx0_resume(task_struct *prev, task_struct *next)
43 */
44	.set	noreorder
45	.align	5
46	LEAF(resume)
47#ifndef CONFIG_CPU_HAS_LLSC
48	sw      zero, ll_bit
49#endif
50	mfc0	t1, CP0_STATUS
51	sw	t1, THREAD_STATUS(a0)
52	CPU_SAVE_NONSCRATCH(a0)
53	sw	ra, THREAD_REG31(a0)
54
55	/*
56	 * check if we need to save FPU registers
57	 */
58	lw	t0, TASK_FLAGS(a0)
59	li	t1, PF_USEDFPU
60	and	t2, t0, t1
61	beqz	t2, 1f
62	nor	t1, zero, t1
63
64	/*
65	 * clear PF_USEDFPU bit in task flags
66	 */
67	and	t0, t0, t1
68	sw	t0, TASK_FLAGS(a0)
69
70	/*
71	 * clear saved user stack CU1 bit
72	 */
73	lw	t0, ST_OFF(a0)
74	li	t1, ~ST0_CU1
75	and	t0, t0, t1
76	sw	t0, ST_OFF(a0)
77
78	FPU_SAVE_DOUBLE(a0, t0)			# clobbers t0
79
801:
81	/*
82	 * The order of restoring the registers takes care of the race
83	 * updating $28, $29 and kernelsp without disabling ints.
84	 */
85	move	$28, a1
86	CPU_RESTORE_NONSCRATCH($28)
87	addiu	t0, $28, KERNEL_STACK_SIZE-32
88#ifdef CONFIG_SMP
89	mfc0	a3, CP0_CONTEXT
90	la	t1, kernelsp
91	srl	a3, 23
92	sll	a3, 2
93	addu	t1, a3, t1
94	sw	t0, (t1)
95#else
96	sw	t0, kernelsp
97#endif
98	mfc0	t1, CP0_STATUS		/* Do we really need this? */
99	li	a3, 0xff00
100	and	t1, a3
101	lw	a2, THREAD_STATUS($28)
102	nor	a3, $0, a3
103	and	a2, a3
104	or	a2, t1
105	mtc0	a2, CP0_STATUS
106	jr	ra
107	 move	v0, a0
108	END(resume)
109
110/*
111 * Save a thread's fp context.
112 */
113LEAF(_save_fp)
114	FPU_SAVE_DOUBLE(a0, t1)			# clobbers t1
115	jr	ra
116	END(_save_fp)
117
118/*
119 * Restore a thread's fp context.
120 */
121LEAF(_restore_fp)
122	FPU_RESTORE_DOUBLE(a0, t1)		# clobbers t1
123	jr	ra
124	END(_restore_fp)
125
126/*
127 * Load the FPU with signalling NANS.  This bit pattern we're using has
128 * the property that no matter whether considered as single or as double
129 * precision represents signaling NANS.
130 *
131 * We initialize fcr31 to rounding to nearest, no exceptions.
132 */
133
134#define FPU_DEFAULT  0x00000000
135
136LEAF(_init_fpu)
137	.set	mips3
138	mfc0	t0, CP0_STATUS
139	li	t1, ST0_CU1
140	or	t0, t1
141	mtc0	t0, CP0_STATUS
142	FPU_ENABLE_HAZARD
143
144	li	t1, FPU_DEFAULT
145	ctc1	t1, fcr31
146
147	li	t0, -1
148
149	dmtc1	t0, $f0
150	dmtc1	t0, $f2
151	dmtc1	t0, $f4
152	dmtc1	t0, $f6
153	dmtc1	t0, $f8
154	dmtc1	t0, $f10
155	dmtc1	t0, $f12
156	dmtc1	t0, $f14
157	dmtc1	t0, $f16
158	dmtc1	t0, $f18
159	dmtc1	t0, $f20
160	dmtc1	t0, $f22
161	dmtc1	t0, $f24
162	dmtc1	t0, $f26
163	dmtc1	t0, $f28
164	.set	noreorder
165	jr	ra
166	 dmtc1	t0, $f30
167	.set	reorder
168	END(_init_fpu)
169
170