1 /*
2  * Copyright (C) 1994 Linus Torvalds
3  *
4  * Pentium III FXSR, SSE support
5  * General FPU state handling cleanups
6  *	Gareth Hughes <gareth@valinux.com>, May 2000
7  * x86-64 work by Andi Kleen 2002
8  */
9 
10 #ifndef _FPU_INTERNAL_H
11 #define _FPU_INTERNAL_H
12 
13 #include <linux/kernel_stat.h>
14 #include <linux/regset.h>
15 #include <linux/slab.h>
16 #include <asm/asm.h>
17 #include <asm/cpufeature.h>
18 #include <asm/processor.h>
19 #include <asm/sigcontext.h>
20 #include <asm/user.h>
21 #include <asm/uaccess.h>
22 #include <asm/xsave.h>
23 
24 extern unsigned int sig_xstate_size;
25 extern void fpu_init(void);
26 
27 DECLARE_PER_CPU(struct task_struct *, fpu_owner_task);
28 
29 extern user_regset_active_fn fpregs_active, xfpregs_active;
30 extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
31 				xstateregs_get;
32 extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
33 				 xstateregs_set;
34 
35 
36 /*
37  * xstateregs_active == fpregs_active. Please refer to the comment
38  * at the definition of fpregs_active.
39  */
40 #define xstateregs_active	fpregs_active
41 
42 extern struct _fpx_sw_bytes fx_sw_reserved;
43 #ifdef CONFIG_IA32_EMULATION
44 extern unsigned int sig_xstate_ia32_size;
45 extern struct _fpx_sw_bytes fx_sw_reserved_ia32;
46 struct _fpstate_ia32;
47 struct _xstate_ia32;
48 extern int save_i387_xstate_ia32(void __user *buf);
49 extern int restore_i387_xstate_ia32(void __user *buf);
50 #endif
51 
52 #ifdef CONFIG_MATH_EMULATION
53 extern void finit_soft_fpu(struct i387_soft_struct *soft);
54 #else
finit_soft_fpu(struct i387_soft_struct * soft)55 static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
56 #endif
57 
58 #define X87_FSW_ES (1 << 7)	/* Exception Summary */
59 
use_xsaveopt(void)60 static __always_inline __pure bool use_xsaveopt(void)
61 {
62 	return static_cpu_has(X86_FEATURE_XSAVEOPT);
63 }
64 
use_xsave(void)65 static __always_inline __pure bool use_xsave(void)
66 {
67 	return static_cpu_has(X86_FEATURE_XSAVE);
68 }
69 
use_fxsr(void)70 static __always_inline __pure bool use_fxsr(void)
71 {
72         return static_cpu_has(X86_FEATURE_FXSR);
73 }
74 
75 extern void __sanitize_i387_state(struct task_struct *);
76 
sanitize_i387_state(struct task_struct * tsk)77 static inline void sanitize_i387_state(struct task_struct *tsk)
78 {
79 	if (!use_xsaveopt())
80 		return;
81 	__sanitize_i387_state(tsk);
82 }
83 
84 #ifdef CONFIG_X86_64
fxrstor_checking(struct i387_fxsave_struct * fx)85 static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
86 {
87 	int err;
88 
89 	/* See comment in fxsave() below. */
90 #ifdef CONFIG_AS_FXSAVEQ
91 	asm volatile("1:  fxrstorq %[fx]\n\t"
92 		     "2:\n"
93 		     ".section .fixup,\"ax\"\n"
94 		     "3:  movl $-1,%[err]\n"
95 		     "    jmp  2b\n"
96 		     ".previous\n"
97 		     _ASM_EXTABLE(1b, 3b)
98 		     : [err] "=r" (err)
99 		     : [fx] "m" (*fx), "0" (0));
100 #else
101 	asm volatile("1:  rex64/fxrstor (%[fx])\n\t"
102 		     "2:\n"
103 		     ".section .fixup,\"ax\"\n"
104 		     "3:  movl $-1,%[err]\n"
105 		     "    jmp  2b\n"
106 		     ".previous\n"
107 		     _ASM_EXTABLE(1b, 3b)
108 		     : [err] "=r" (err)
109 		     : [fx] "R" (fx), "m" (*fx), "0" (0));
110 #endif
111 	return err;
112 }
113 
fxsave_user(struct i387_fxsave_struct __user * fx)114 static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
115 {
116 	int err;
117 
118 	/*
119 	 * Clear the bytes not touched by the fxsave and reserved
120 	 * for the SW usage.
121 	 */
122 	err = __clear_user(&fx->sw_reserved,
123 			   sizeof(struct _fpx_sw_bytes));
124 	if (unlikely(err))
125 		return -EFAULT;
126 
127 	/* See comment in fxsave() below. */
128 #ifdef CONFIG_AS_FXSAVEQ
129 	asm volatile("1:  fxsaveq %[fx]\n\t"
130 		     "2:\n"
131 		     ".section .fixup,\"ax\"\n"
132 		     "3:  movl $-1,%[err]\n"
133 		     "    jmp  2b\n"
134 		     ".previous\n"
135 		     _ASM_EXTABLE(1b, 3b)
136 		     : [err] "=r" (err), [fx] "=m" (*fx)
137 		     : "0" (0));
138 #else
139 	asm volatile("1:  rex64/fxsave (%[fx])\n\t"
140 		     "2:\n"
141 		     ".section .fixup,\"ax\"\n"
142 		     "3:  movl $-1,%[err]\n"
143 		     "    jmp  2b\n"
144 		     ".previous\n"
145 		     _ASM_EXTABLE(1b, 3b)
146 		     : [err] "=r" (err), "=m" (*fx)
147 		     : [fx] "R" (fx), "0" (0));
148 #endif
149 	if (unlikely(err) &&
150 	    __clear_user(fx, sizeof(struct i387_fxsave_struct)))
151 		err = -EFAULT;
152 	/* No need to clear here because the caller clears USED_MATH */
153 	return err;
154 }
155 
fpu_fxsave(struct fpu * fpu)156 static inline void fpu_fxsave(struct fpu *fpu)
157 {
158 	/* Using "rex64; fxsave %0" is broken because, if the memory operand
159 	   uses any extended registers for addressing, a second REX prefix
160 	   will be generated (to the assembler, rex64 followed by semicolon
161 	   is a separate instruction), and hence the 64-bitness is lost. */
162 
163 #ifdef CONFIG_AS_FXSAVEQ
164 	/* Using "fxsaveq %0" would be the ideal choice, but is only supported
165 	   starting with gas 2.16. */
166 	__asm__ __volatile__("fxsaveq %0"
167 			     : "=m" (fpu->state->fxsave));
168 #else
169 	/* Using, as a workaround, the properly prefixed form below isn't
170 	   accepted by any binutils version so far released, complaining that
171 	   the same type of prefix is used twice if an extended register is
172 	   needed for addressing (fix submitted to mainline 2005-11-21).
173 	asm volatile("rex64/fxsave %0"
174 		     : "=m" (fpu->state->fxsave));
175 	   This, however, we can work around by forcing the compiler to select
176 	   an addressing mode that doesn't require extended registers. */
177 	asm volatile("rex64/fxsave (%[fx])"
178 		     : "=m" (fpu->state->fxsave)
179 		     : [fx] "R" (&fpu->state->fxsave));
180 #endif
181 }
182 
183 #else  /* CONFIG_X86_32 */
184 
185 /* perform fxrstor iff the processor has extended states, otherwise frstor */
fxrstor_checking(struct i387_fxsave_struct * fx)186 static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
187 {
188 	/*
189 	 * The "nop" is needed to make the instructions the same
190 	 * length.
191 	 */
192 	alternative_input(
193 		"nop ; frstor %1",
194 		"fxrstor %1",
195 		X86_FEATURE_FXSR,
196 		"m" (*fx));
197 
198 	return 0;
199 }
200 
fpu_fxsave(struct fpu * fpu)201 static inline void fpu_fxsave(struct fpu *fpu)
202 {
203 	asm volatile("fxsave %[fx]"
204 		     : [fx] "=m" (fpu->state->fxsave));
205 }
206 
207 #endif	/* CONFIG_X86_64 */
208 
209 /*
210  * These must be called with preempt disabled. Returns
211  * 'true' if the FPU state is still intact.
212  */
fpu_save_init(struct fpu * fpu)213 static inline int fpu_save_init(struct fpu *fpu)
214 {
215 	if (use_xsave()) {
216 		fpu_xsave(fpu);
217 
218 		/*
219 		 * xsave header may indicate the init state of the FP.
220 		 */
221 		if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
222 			return 1;
223 	} else if (use_fxsr()) {
224 		fpu_fxsave(fpu);
225 	} else {
226 		asm volatile("fnsave %[fx]; fwait"
227 			     : [fx] "=m" (fpu->state->fsave));
228 		return 0;
229 	}
230 
231 	/*
232 	 * If exceptions are pending, we need to clear them so
233 	 * that we don't randomly get exceptions later.
234 	 *
235 	 * FIXME! Is this perhaps only true for the old-style
236 	 * irq13 case? Maybe we could leave the x87 state
237 	 * intact otherwise?
238 	 */
239 	if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) {
240 		asm volatile("fnclex");
241 		return 0;
242 	}
243 	return 1;
244 }
245 
__save_init_fpu(struct task_struct * tsk)246 static inline int __save_init_fpu(struct task_struct *tsk)
247 {
248 	return fpu_save_init(&tsk->thread.fpu);
249 }
250 
fpu_fxrstor_checking(struct fpu * fpu)251 static inline int fpu_fxrstor_checking(struct fpu *fpu)
252 {
253 	return fxrstor_checking(&fpu->state->fxsave);
254 }
255 
fpu_restore_checking(struct fpu * fpu)256 static inline int fpu_restore_checking(struct fpu *fpu)
257 {
258 	if (use_xsave())
259 		return fpu_xrstor_checking(fpu);
260 	else
261 		return fpu_fxrstor_checking(fpu);
262 }
263 
restore_fpu_checking(struct task_struct * tsk)264 static inline int restore_fpu_checking(struct task_struct *tsk)
265 {
266 	/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
267 	   is pending.  Clear the x87 state here by setting it to fixed
268 	   values. "m" is a random variable that should be in L1 */
269 	if (unlikely(static_cpu_has(X86_FEATURE_FXSAVE_LEAK))) {
270 		asm volatile(
271 			"fnclex\n\t"
272 			"emms\n\t"
273 			"fildl %P[addr]"	/* set F?P to defined value */
274 			: : [addr] "m" (tsk->thread.fpu.has_fpu));
275 	}
276 
277 	return fpu_restore_checking(&tsk->thread.fpu);
278 }
279 
280 /*
281  * Software FPU state helpers. Careful: these need to
282  * be preemption protection *and* they need to be
283  * properly paired with the CR0.TS changes!
284  */
__thread_has_fpu(struct task_struct * tsk)285 static inline int __thread_has_fpu(struct task_struct *tsk)
286 {
287 	return tsk->thread.fpu.has_fpu;
288 }
289 
290 /* Must be paired with an 'stts' after! */
__thread_clear_has_fpu(struct task_struct * tsk)291 static inline void __thread_clear_has_fpu(struct task_struct *tsk)
292 {
293 	tsk->thread.fpu.has_fpu = 0;
294 	percpu_write(fpu_owner_task, NULL);
295 }
296 
297 /* Must be paired with a 'clts' before! */
__thread_set_has_fpu(struct task_struct * tsk)298 static inline void __thread_set_has_fpu(struct task_struct *tsk)
299 {
300 	tsk->thread.fpu.has_fpu = 1;
301 	percpu_write(fpu_owner_task, tsk);
302 }
303 
304 /*
305  * Encapsulate the CR0.TS handling together with the
306  * software flag.
307  *
308  * These generally need preemption protection to work,
309  * do try to avoid using these on their own.
310  */
__thread_fpu_end(struct task_struct * tsk)311 static inline void __thread_fpu_end(struct task_struct *tsk)
312 {
313 	__thread_clear_has_fpu(tsk);
314 	stts();
315 }
316 
__thread_fpu_begin(struct task_struct * tsk)317 static inline void __thread_fpu_begin(struct task_struct *tsk)
318 {
319 	clts();
320 	__thread_set_has_fpu(tsk);
321 }
322 
323 /*
324  * FPU state switching for scheduling.
325  *
326  * This is a two-stage process:
327  *
328  *  - switch_fpu_prepare() saves the old state and
329  *    sets the new state of the CR0.TS bit. This is
330  *    done within the context of the old process.
331  *
332  *  - switch_fpu_finish() restores the new state as
333  *    necessary.
334  */
335 typedef struct { int preload; } fpu_switch_t;
336 
337 /*
338  * Must be run with preemption disabled: this clears the fpu_owner_task,
339  * on this CPU.
340  *
341  * This will disable any lazy FPU state restore of the current FPU state,
342  * but if the current thread owns the FPU, it will still be saved by.
343  */
__cpu_disable_lazy_restore(unsigned int cpu)344 static inline void __cpu_disable_lazy_restore(unsigned int cpu)
345 {
346 	per_cpu(fpu_owner_task, cpu) = NULL;
347 }
348 
fpu_lazy_restore(struct task_struct * new,unsigned int cpu)349 static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
350 {
351 	return new == percpu_read_stable(fpu_owner_task) &&
352 		cpu == new->thread.fpu.last_cpu;
353 }
354 
switch_fpu_prepare(struct task_struct * old,struct task_struct * new,int cpu)355 static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu)
356 {
357 	fpu_switch_t fpu;
358 
359 	fpu.preload = tsk_used_math(new) && new->fpu_counter > 5;
360 	if (__thread_has_fpu(old)) {
361 		if (!__save_init_fpu(old))
362 			cpu = ~0;
363 		old->thread.fpu.last_cpu = cpu;
364 		old->thread.fpu.has_fpu = 0;	/* But leave fpu_owner_task! */
365 
366 		/* Don't change CR0.TS if we just switch! */
367 		if (fpu.preload) {
368 			new->fpu_counter++;
369 			__thread_set_has_fpu(new);
370 			prefetch(new->thread.fpu.state);
371 		} else
372 			stts();
373 	} else {
374 		old->fpu_counter = 0;
375 		old->thread.fpu.last_cpu = ~0;
376 		if (fpu.preload) {
377 			new->fpu_counter++;
378 			if (fpu_lazy_restore(new, cpu))
379 				fpu.preload = 0;
380 			else
381 				prefetch(new->thread.fpu.state);
382 			__thread_fpu_begin(new);
383 		}
384 	}
385 	return fpu;
386 }
387 
388 /*
389  * By the time this gets called, we've already cleared CR0.TS and
390  * given the process the FPU if we are going to preload the FPU
391  * state - all we need to do is to conditionally restore the register
392  * state itself.
393  */
switch_fpu_finish(struct task_struct * new,fpu_switch_t fpu)394 static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
395 {
396 	if (fpu.preload) {
397 		if (unlikely(restore_fpu_checking(new)))
398 			__thread_fpu_end(new);
399 	}
400 }
401 
402 /*
403  * Signal frame handlers...
404  */
405 extern int save_i387_xstate(void __user *buf);
406 extern int restore_i387_xstate(void __user *buf);
407 
__clear_fpu(struct task_struct * tsk)408 static inline void __clear_fpu(struct task_struct *tsk)
409 {
410 	if (__thread_has_fpu(tsk)) {
411 		/* Ignore delayed exceptions from user space */
412 		asm volatile("1: fwait\n"
413 			     "2:\n"
414 			     _ASM_EXTABLE(1b, 2b));
415 		__thread_fpu_end(tsk);
416 	}
417 }
418 
419 /*
420  * The actual user_fpu_begin/end() functions
421  * need to be preemption-safe.
422  *
423  * NOTE! user_fpu_end() must be used only after you
424  * have saved the FP state, and user_fpu_begin() must
425  * be used only immediately before restoring it.
426  * These functions do not do any save/restore on
427  * their own.
428  */
user_fpu_end(void)429 static inline void user_fpu_end(void)
430 {
431 	preempt_disable();
432 	__thread_fpu_end(current);
433 	preempt_enable();
434 }
435 
user_fpu_begin(void)436 static inline void user_fpu_begin(void)
437 {
438 	preempt_disable();
439 	if (!user_has_fpu())
440 		__thread_fpu_begin(current);
441 	preempt_enable();
442 }
443 
444 /*
445  * These disable preemption on their own and are safe
446  */
save_init_fpu(struct task_struct * tsk)447 static inline void save_init_fpu(struct task_struct *tsk)
448 {
449 	WARN_ON_ONCE(!__thread_has_fpu(tsk));
450 	preempt_disable();
451 	__save_init_fpu(tsk);
452 	__thread_fpu_end(tsk);
453 	preempt_enable();
454 }
455 
clear_fpu(struct task_struct * tsk)456 static inline void clear_fpu(struct task_struct *tsk)
457 {
458 	preempt_disable();
459 	__clear_fpu(tsk);
460 	preempt_enable();
461 }
462 
463 /*
464  * i387 state interaction
465  */
get_fpu_cwd(struct task_struct * tsk)466 static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
467 {
468 	if (cpu_has_fxsr) {
469 		return tsk->thread.fpu.state->fxsave.cwd;
470 	} else {
471 		return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
472 	}
473 }
474 
get_fpu_swd(struct task_struct * tsk)475 static inline unsigned short get_fpu_swd(struct task_struct *tsk)
476 {
477 	if (cpu_has_fxsr) {
478 		return tsk->thread.fpu.state->fxsave.swd;
479 	} else {
480 		return (unsigned short)tsk->thread.fpu.state->fsave.swd;
481 	}
482 }
483 
get_fpu_mxcsr(struct task_struct * tsk)484 static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
485 {
486 	if (cpu_has_xmm) {
487 		return tsk->thread.fpu.state->fxsave.mxcsr;
488 	} else {
489 		return MXCSR_DEFAULT;
490 	}
491 }
492 
fpu_allocated(struct fpu * fpu)493 static bool fpu_allocated(struct fpu *fpu)
494 {
495 	return fpu->state != NULL;
496 }
497 
fpu_alloc(struct fpu * fpu)498 static inline int fpu_alloc(struct fpu *fpu)
499 {
500 	if (fpu_allocated(fpu))
501 		return 0;
502 	fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
503 	if (!fpu->state)
504 		return -ENOMEM;
505 	WARN_ON((unsigned long)fpu->state & 15);
506 	return 0;
507 }
508 
fpu_free(struct fpu * fpu)509 static inline void fpu_free(struct fpu *fpu)
510 {
511 	if (fpu->state) {
512 		kmem_cache_free(task_xstate_cachep, fpu->state);
513 		fpu->state = NULL;
514 	}
515 }
516 
fpu_copy(struct fpu * dst,struct fpu * src)517 static inline void fpu_copy(struct fpu *dst, struct fpu *src)
518 {
519 	memcpy(dst->state, src->state, xstate_size);
520 }
521 
522 extern void fpu_finit(struct fpu *fpu);
523 
524 #endif
525