1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * In-kernel vector facility support functions
4  *
5  * Copyright IBM Corp. 2015
6  * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/cpu.h>
10 #include <linux/sched.h>
11 #include <asm/fpu/types.h>
12 #include <asm/fpu/api.h>
13 
14 asm(".include \"asm/vx-insn.h\"\n");
15 
__kernel_fpu_begin(struct kernel_fpu * state,u32 flags)16 void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
17 {
18 	/*
19 	 * Limit the save to the FPU/vector registers already
20 	 * in use by the previous context
21 	 */
22 	flags &= state->mask;
23 
24 	if (flags & KERNEL_FPC)
25 		/* Save floating point control */
26 		asm volatile("stfpc %0" : "=Q" (state->fpc));
27 
28 	if (!MACHINE_HAS_VX) {
29 		if (flags & KERNEL_VXR_V0V7) {
30 			/* Save floating-point registers */
31 			asm volatile("std 0,%0" : "=Q" (state->fprs[0]));
32 			asm volatile("std 1,%0" : "=Q" (state->fprs[1]));
33 			asm volatile("std 2,%0" : "=Q" (state->fprs[2]));
34 			asm volatile("std 3,%0" : "=Q" (state->fprs[3]));
35 			asm volatile("std 4,%0" : "=Q" (state->fprs[4]));
36 			asm volatile("std 5,%0" : "=Q" (state->fprs[5]));
37 			asm volatile("std 6,%0" : "=Q" (state->fprs[6]));
38 			asm volatile("std 7,%0" : "=Q" (state->fprs[7]));
39 			asm volatile("std 8,%0" : "=Q" (state->fprs[8]));
40 			asm volatile("std 9,%0" : "=Q" (state->fprs[9]));
41 			asm volatile("std 10,%0" : "=Q" (state->fprs[10]));
42 			asm volatile("std 11,%0" : "=Q" (state->fprs[11]));
43 			asm volatile("std 12,%0" : "=Q" (state->fprs[12]));
44 			asm volatile("std 13,%0" : "=Q" (state->fprs[13]));
45 			asm volatile("std 14,%0" : "=Q" (state->fprs[14]));
46 			asm volatile("std 15,%0" : "=Q" (state->fprs[15]));
47 		}
48 		return;
49 	}
50 
51 	/* Test and save vector registers */
52 	asm volatile (
53 		/*
54 		 * Test if any vector register must be saved and, if so,
55 		 * test if all register can be saved.
56 		 */
57 		"	la	1,%[vxrs]\n"	/* load save area */
58 		"	tmll	%[m],30\n"	/* KERNEL_VXR */
59 		"	jz	7f\n"		/* no work -> done */
60 		"	jo	5f\n"		/* -> save V0..V31 */
61 		/*
62 		 * Test for special case KERNEL_FPU_MID only. In this
63 		 * case a vstm V8..V23 is the best instruction
64 		 */
65 		"	chi	%[m],12\n"	/* KERNEL_VXR_MID */
66 		"	jne	0f\n"		/* -> save V8..V23 */
67 		"	VSTM	8,23,128,1\n"	/* vstm %v8,%v23,128(%r1) */
68 		"	j	7f\n"
69 		/* Test and save the first half of 16 vector registers */
70 		"0:	tmll	%[m],6\n"	/* KERNEL_VXR_LOW */
71 		"	jz	3f\n"		/* -> KERNEL_VXR_HIGH */
72 		"	jo	2f\n"		/* 11 -> save V0..V15 */
73 		"	brc	2,1f\n"		/* 10 -> save V8..V15 */
74 		"	VSTM	0,7,0,1\n"	/* vstm %v0,%v7,0(%r1) */
75 		"	j	3f\n"
76 		"1:	VSTM	8,15,128,1\n"	/* vstm %v8,%v15,128(%r1) */
77 		"	j	3f\n"
78 		"2:	VSTM	0,15,0,1\n"	/* vstm %v0,%v15,0(%r1) */
79 		/* Test and save the second half of 16 vector registers */
80 		"3:	tmll	%[m],24\n"	/* KERNEL_VXR_HIGH */
81 		"	jz	7f\n"
82 		"	jo	6f\n"		/* 11 -> save V16..V31 */
83 		"	brc	2,4f\n"		/* 10 -> save V24..V31 */
84 		"	VSTM	16,23,256,1\n"	/* vstm %v16,%v23,256(%r1) */
85 		"	j	7f\n"
86 		"4:	VSTM	24,31,384,1\n"	/* vstm %v24,%v31,384(%r1) */
87 		"	j	7f\n"
88 		"5:	VSTM	0,15,0,1\n"	/* vstm %v0,%v15,0(%r1) */
89 		"6:	VSTM	16,31,256,1\n"	/* vstm %v16,%v31,256(%r1) */
90 		"7:"
91 		: [vxrs] "=Q" (*(struct vx_array *) &state->vxrs)
92 		: [m] "d" (flags)
93 		: "1", "cc");
94 }
95 EXPORT_SYMBOL(__kernel_fpu_begin);
96 
__kernel_fpu_end(struct kernel_fpu * state,u32 flags)97 void __kernel_fpu_end(struct kernel_fpu *state, u32 flags)
98 {
99 	/*
100 	 * Limit the restore to the FPU/vector registers of the
101 	 * previous context that have been overwritte by the
102 	 * current context
103 	 */
104 	flags &= state->mask;
105 
106 	if (flags & KERNEL_FPC)
107 		/* Restore floating-point controls */
108 		asm volatile("lfpc %0" : : "Q" (state->fpc));
109 
110 	if (!MACHINE_HAS_VX) {
111 		if (flags & KERNEL_VXR_V0V7) {
112 			/* Restore floating-point registers */
113 			asm volatile("ld 0,%0" : : "Q" (state->fprs[0]));
114 			asm volatile("ld 1,%0" : : "Q" (state->fprs[1]));
115 			asm volatile("ld 2,%0" : : "Q" (state->fprs[2]));
116 			asm volatile("ld 3,%0" : : "Q" (state->fprs[3]));
117 			asm volatile("ld 4,%0" : : "Q" (state->fprs[4]));
118 			asm volatile("ld 5,%0" : : "Q" (state->fprs[5]));
119 			asm volatile("ld 6,%0" : : "Q" (state->fprs[6]));
120 			asm volatile("ld 7,%0" : : "Q" (state->fprs[7]));
121 			asm volatile("ld 8,%0" : : "Q" (state->fprs[8]));
122 			asm volatile("ld 9,%0" : : "Q" (state->fprs[9]));
123 			asm volatile("ld 10,%0" : : "Q" (state->fprs[10]));
124 			asm volatile("ld 11,%0" : : "Q" (state->fprs[11]));
125 			asm volatile("ld 12,%0" : : "Q" (state->fprs[12]));
126 			asm volatile("ld 13,%0" : : "Q" (state->fprs[13]));
127 			asm volatile("ld 14,%0" : : "Q" (state->fprs[14]));
128 			asm volatile("ld 15,%0" : : "Q" (state->fprs[15]));
129 		}
130 		return;
131 	}
132 
133 	/* Test and restore (load) vector registers */
134 	asm volatile (
135 		/*
136 		 * Test if any vector register must be loaded and, if so,
137 		 * test if all registers can be loaded at once.
138 		 */
139 		"	la	1,%[vxrs]\n"	/* load restore area */
140 		"	tmll	%[m],30\n"	/* KERNEL_VXR */
141 		"	jz	7f\n"		/* no work -> done */
142 		"	jo	5f\n"		/* -> restore V0..V31 */
143 		/*
144 		 * Test for special case KERNEL_FPU_MID only. In this
145 		 * case a vlm V8..V23 is the best instruction
146 		 */
147 		"	chi	%[m],12\n"	/* KERNEL_VXR_MID */
148 		"	jne	0f\n"		/* -> restore V8..V23 */
149 		"	VLM	8,23,128,1\n"	/* vlm %v8,%v23,128(%r1) */
150 		"	j	7f\n"
151 		/* Test and restore the first half of 16 vector registers */
152 		"0:	tmll	%[m],6\n"	/* KERNEL_VXR_LOW */
153 		"	jz	3f\n"		/* -> KERNEL_VXR_HIGH */
154 		"	jo	2f\n"		/* 11 -> restore V0..V15 */
155 		"	brc	2,1f\n"		/* 10 -> restore V8..V15 */
156 		"	VLM	0,7,0,1\n"	/* vlm %v0,%v7,0(%r1) */
157 		"	j	3f\n"
158 		"1:	VLM	8,15,128,1\n"	/* vlm %v8,%v15,128(%r1) */
159 		"	j	3f\n"
160 		"2:	VLM	0,15,0,1\n"	/* vlm %v0,%v15,0(%r1) */
161 		/* Test and restore the second half of 16 vector registers */
162 		"3:	tmll	%[m],24\n"	/* KERNEL_VXR_HIGH */
163 		"	jz	7f\n"
164 		"	jo	6f\n"		/* 11 -> restore V16..V31 */
165 		"	brc	2,4f\n"		/* 10 -> restore V24..V31 */
166 		"	VLM	16,23,256,1\n"	/* vlm %v16,%v23,256(%r1) */
167 		"	j	7f\n"
168 		"4:	VLM	24,31,384,1\n"	/* vlm %v24,%v31,384(%r1) */
169 		"	j	7f\n"
170 		"5:	VLM	0,15,0,1\n"	/* vlm %v0,%v15,0(%r1) */
171 		"6:	VLM	16,31,256,1\n"	/* vlm %v16,%v31,256(%r1) */
172 		"7:"
173 		: [vxrs] "=Q" (*(struct vx_array *) &state->vxrs)
174 		: [m] "d" (flags)
175 		: "1", "cc");
176 }
177 EXPORT_SYMBOL(__kernel_fpu_end);
178 
__load_fpu_regs(void)179 void __load_fpu_regs(void)
180 {
181 	struct fpu *state = &current->thread.fpu;
182 	unsigned long *regs = current->thread.fpu.regs;
183 
184 	asm volatile("lfpc %0" : : "Q" (state->fpc));
185 	if (likely(MACHINE_HAS_VX)) {
186 		asm volatile("lgr	1,%0\n"
187 			     "VLM	0,15,0,1\n"
188 			     "VLM	16,31,256,1\n"
189 			     :
190 			     : "d" (regs)
191 			     : "1", "cc", "memory");
192 	} else {
193 		asm volatile("ld 0,%0" : : "Q" (regs[0]));
194 		asm volatile("ld 1,%0" : : "Q" (regs[1]));
195 		asm volatile("ld 2,%0" : : "Q" (regs[2]));
196 		asm volatile("ld 3,%0" : : "Q" (regs[3]));
197 		asm volatile("ld 4,%0" : : "Q" (regs[4]));
198 		asm volatile("ld 5,%0" : : "Q" (regs[5]));
199 		asm volatile("ld 6,%0" : : "Q" (regs[6]));
200 		asm volatile("ld 7,%0" : : "Q" (regs[7]));
201 		asm volatile("ld 8,%0" : : "Q" (regs[8]));
202 		asm volatile("ld 9,%0" : : "Q" (regs[9]));
203 		asm volatile("ld 10,%0" : : "Q" (regs[10]));
204 		asm volatile("ld 11,%0" : : "Q" (regs[11]));
205 		asm volatile("ld 12,%0" : : "Q" (regs[12]));
206 		asm volatile("ld 13,%0" : : "Q" (regs[13]));
207 		asm volatile("ld 14,%0" : : "Q" (regs[14]));
208 		asm volatile("ld 15,%0" : : "Q" (regs[15]));
209 	}
210 	clear_cpu_flag(CIF_FPU);
211 }
212 EXPORT_SYMBOL(__load_fpu_regs);
213 
load_fpu_regs(void)214 void load_fpu_regs(void)
215 {
216 	raw_local_irq_disable();
217 	__load_fpu_regs();
218 	raw_local_irq_enable();
219 }
220 EXPORT_SYMBOL(load_fpu_regs);
221 
save_fpu_regs(void)222 void save_fpu_regs(void)
223 {
224 	unsigned long flags, *regs;
225 	struct fpu *state;
226 
227 	local_irq_save(flags);
228 
229 	if (test_cpu_flag(CIF_FPU))
230 		goto out;
231 
232 	state = &current->thread.fpu;
233 	regs = current->thread.fpu.regs;
234 
235 	asm volatile("stfpc %0" : "=Q" (state->fpc));
236 	if (likely(MACHINE_HAS_VX)) {
237 		asm volatile("lgr	1,%0\n"
238 			     "VSTM	0,15,0,1\n"
239 			     "VSTM	16,31,256,1\n"
240 			     :
241 			     : "d" (regs)
242 			     : "1", "cc", "memory");
243 	} else {
244 		asm volatile("std 0,%0" : "=Q" (regs[0]));
245 		asm volatile("std 1,%0" : "=Q" (regs[1]));
246 		asm volatile("std 2,%0" : "=Q" (regs[2]));
247 		asm volatile("std 3,%0" : "=Q" (regs[3]));
248 		asm volatile("std 4,%0" : "=Q" (regs[4]));
249 		asm volatile("std 5,%0" : "=Q" (regs[5]));
250 		asm volatile("std 6,%0" : "=Q" (regs[6]));
251 		asm volatile("std 7,%0" : "=Q" (regs[7]));
252 		asm volatile("std 8,%0" : "=Q" (regs[8]));
253 		asm volatile("std 9,%0" : "=Q" (regs[9]));
254 		asm volatile("std 10,%0" : "=Q" (regs[10]));
255 		asm volatile("std 11,%0" : "=Q" (regs[11]));
256 		asm volatile("std 12,%0" : "=Q" (regs[12]));
257 		asm volatile("std 13,%0" : "=Q" (regs[13]));
258 		asm volatile("std 14,%0" : "=Q" (regs[14]));
259 		asm volatile("std 15,%0" : "=Q" (regs[15]));
260 	}
261 	set_cpu_flag(CIF_FPU);
262 out:
263 	local_irq_restore(flags);
264 }
265 EXPORT_SYMBOL(save_fpu_regs);
266