1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __X86_KERNEL_FPU_XSTATE_H
3 #define __X86_KERNEL_FPU_XSTATE_H
4
5 #include <asm/cpufeature.h>
6 #include <asm/fpu/xstate.h>
7 #include <asm/fpu/xcr.h>
8
9 #ifdef CONFIG_X86_64
10 DECLARE_PER_CPU(u64, xfd_state);
11 #endif
12
xstate_init_xcomp_bv(struct xregs_state * xsave,u64 mask)13 static inline void xstate_init_xcomp_bv(struct xregs_state *xsave, u64 mask)
14 {
15 /*
16 * XRSTORS requires these bits set in xcomp_bv, or it will
17 * trigger #GP:
18 */
19 if (cpu_feature_enabled(X86_FEATURE_XCOMPACTED))
20 xsave->header.xcomp_bv = mask | XCOMP_BV_COMPACTED_FORMAT;
21 }
22
xstate_get_group_perm(bool guest)23 static inline u64 xstate_get_group_perm(bool guest)
24 {
25 struct fpu *fpu = ¤t->group_leader->thread.fpu;
26 struct fpu_state_perm *perm;
27
28 /* Pairs with WRITE_ONCE() in xstate_request_perm() */
29 perm = guest ? &fpu->guest_perm : &fpu->perm;
30 return READ_ONCE(perm->__state_perm);
31 }
32
xstate_get_host_group_perm(void)33 static inline u64 xstate_get_host_group_perm(void)
34 {
35 return xstate_get_group_perm(false);
36 }
37
38 enum xstate_copy_mode {
39 XSTATE_COPY_FP,
40 XSTATE_COPY_FX,
41 XSTATE_COPY_XSAVE,
42 };
43
44 struct membuf;
45 extern void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
46 u64 xfeatures, u32 pkru_val,
47 enum xstate_copy_mode copy_mode);
48 extern void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
49 enum xstate_copy_mode mode);
50 extern int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf, u32 *pkru);
51 extern int copy_sigframe_from_user_to_xstate(struct task_struct *tsk, const void __user *ubuf);
52
53
54 extern void fpu__init_cpu_xstate(void);
55 extern void fpu__init_system_xstate(unsigned int legacy_size);
56
57 extern void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr);
58
xfeatures_mask_supervisor(void)59 static inline u64 xfeatures_mask_supervisor(void)
60 {
61 return fpu_kernel_cfg.max_features & XFEATURE_MASK_SUPERVISOR_SUPPORTED;
62 }
63
xfeatures_mask_independent(void)64 static inline u64 xfeatures_mask_independent(void)
65 {
66 if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR))
67 return XFEATURE_MASK_INDEPENDENT & ~XFEATURE_MASK_LBR;
68
69 return XFEATURE_MASK_INDEPENDENT;
70 }
71
72 /* XSAVE/XRSTOR wrapper functions */
73
74 #ifdef CONFIG_X86_64
75 #define REX_PREFIX "0x48, "
76 #else
77 #define REX_PREFIX
78 #endif
79
80 /* These macros all use (%edi)/(%rdi) as the single memory argument. */
81 #define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27"
82 #define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37"
83 #define XSAVEC ".byte " REX_PREFIX "0x0f,0xc7,0x27"
84 #define XSAVES ".byte " REX_PREFIX "0x0f,0xc7,0x2f"
85 #define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f"
86 #define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
87
88 /*
89 * After this @err contains 0 on success or the trap number when the
90 * operation raises an exception.
91 */
92 #define XSTATE_OP(op, st, lmask, hmask, err) \
93 asm volatile("1:" op "\n\t" \
94 "xor %[err], %[err]\n" \
95 "2:\n\t" \
96 _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_FAULT_MCE_SAFE) \
97 : [err] "=a" (err) \
98 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
99 : "memory")
100
101 /*
102 * If XSAVES is enabled, it replaces XSAVEC because it supports supervisor
103 * states in addition to XSAVEC.
104 *
105 * Otherwise if XSAVEC is enabled, it replaces XSAVEOPT because it supports
106 * compacted storage format in addition to XSAVEOPT.
107 *
108 * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT
109 * supports modified optimization which is not supported by XSAVE.
110 *
111 * We use XSAVE as a fallback.
112 *
113 * The 661 label is defined in the ALTERNATIVE* macros as the address of the
114 * original instruction which gets replaced. We need to use it here as the
115 * address of the instruction where we might get an exception at.
116 */
117 #define XSTATE_XSAVE(st, lmask, hmask, err) \
118 asm volatile(ALTERNATIVE_3(XSAVE, \
119 XSAVEOPT, X86_FEATURE_XSAVEOPT, \
120 XSAVEC, X86_FEATURE_XSAVEC, \
121 XSAVES, X86_FEATURE_XSAVES) \
122 "\n" \
123 "xor %[err], %[err]\n" \
124 "3:\n" \
125 _ASM_EXTABLE_TYPE_REG(661b, 3b, EX_TYPE_EFAULT_REG, %[err]) \
126 : [err] "=r" (err) \
127 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
128 : "memory")
129
130 /*
131 * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
132 * XSAVE area format.
133 */
134 #define XSTATE_XRESTORE(st, lmask, hmask) \
135 asm volatile(ALTERNATIVE(XRSTOR, \
136 XRSTORS, X86_FEATURE_XSAVES) \
137 "\n" \
138 "3:\n" \
139 _ASM_EXTABLE_TYPE(661b, 3b, EX_TYPE_FPU_RESTORE) \
140 : \
141 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
142 : "memory")
143
144 #if defined(CONFIG_X86_64) && defined(CONFIG_X86_DEBUG_FPU)
145 extern void xfd_validate_state(struct fpstate *fpstate, u64 mask, bool rstor);
146 #else
xfd_validate_state(struct fpstate * fpstate,u64 mask,bool rstor)147 static inline void xfd_validate_state(struct fpstate *fpstate, u64 mask, bool rstor) { }
148 #endif
149
150 #ifdef CONFIG_X86_64
xfd_update_state(struct fpstate * fpstate)151 static inline void xfd_update_state(struct fpstate *fpstate)
152 {
153 if (fpu_state_size_dynamic()) {
154 u64 xfd = fpstate->xfd;
155
156 if (__this_cpu_read(xfd_state) != xfd) {
157 wrmsrl(MSR_IA32_XFD, xfd);
158 __this_cpu_write(xfd_state, xfd);
159 }
160 }
161 }
162
163 extern int __xfd_enable_feature(u64 which, struct fpu_guest *guest_fpu);
164 #else
xfd_update_state(struct fpstate * fpstate)165 static inline void xfd_update_state(struct fpstate *fpstate) { }
166
__xfd_enable_feature(u64 which,struct fpu_guest * guest_fpu)167 static inline int __xfd_enable_feature(u64 which, struct fpu_guest *guest_fpu) {
168 return -EPERM;
169 }
170 #endif
171
172 /*
173 * Save processor xstate to xsave area.
174 *
175 * Uses either XSAVE or XSAVEOPT or XSAVES depending on the CPU features
176 * and command line options. The choice is permanent until the next reboot.
177 */
os_xsave(struct fpstate * fpstate)178 static inline void os_xsave(struct fpstate *fpstate)
179 {
180 u64 mask = fpstate->xfeatures;
181 u32 lmask = mask;
182 u32 hmask = mask >> 32;
183 int err;
184
185 WARN_ON_FPU(!alternatives_patched);
186 xfd_validate_state(fpstate, mask, false);
187
188 XSTATE_XSAVE(&fpstate->regs.xsave, lmask, hmask, err);
189
190 /* We should never fault when copying to a kernel buffer: */
191 WARN_ON_FPU(err);
192 }
193
194 /*
195 * Restore processor xstate from xsave area.
196 *
197 * Uses XRSTORS when XSAVES is used, XRSTOR otherwise.
198 */
os_xrstor(struct fpstate * fpstate,u64 mask)199 static inline void os_xrstor(struct fpstate *fpstate, u64 mask)
200 {
201 u32 lmask = mask;
202 u32 hmask = mask >> 32;
203
204 xfd_validate_state(fpstate, mask, true);
205 XSTATE_XRESTORE(&fpstate->regs.xsave, lmask, hmask);
206 }
207
208 /* Restore of supervisor state. Does not require XFD */
os_xrstor_supervisor(struct fpstate * fpstate)209 static inline void os_xrstor_supervisor(struct fpstate *fpstate)
210 {
211 u64 mask = xfeatures_mask_supervisor();
212 u32 lmask = mask;
213 u32 hmask = mask >> 32;
214
215 XSTATE_XRESTORE(&fpstate->regs.xsave, lmask, hmask);
216 }
217
218 /*
219 * XSAVE itself always writes all requested xfeatures. Removing features
220 * from the request bitmap reduces the features which are written.
221 * Generate a mask of features which must be written to a sigframe. The
222 * unset features can be optimized away and not written.
223 *
224 * This optimization is user-visible. Only use for states where
225 * uninitialized sigframe contents are tolerable, like dynamic features.
226 *
227 * Users of buffers produced with this optimization must check XSTATE_BV
228 * to determine which features have been optimized out.
229 */
xfeatures_need_sigframe_write(void)230 static inline u64 xfeatures_need_sigframe_write(void)
231 {
232 u64 xfeaures_to_write;
233
234 /* In-use features must be written: */
235 xfeaures_to_write = xfeatures_in_use();
236
237 /* Also write all non-optimizable sigframe features: */
238 xfeaures_to_write |= XFEATURE_MASK_USER_SUPPORTED &
239 ~XFEATURE_MASK_SIGFRAME_INITOPT;
240
241 return xfeaures_to_write;
242 }
243
244 /*
245 * Save xstate to user space xsave area.
246 *
247 * We don't use modified optimization because xrstor/xrstors might track
248 * a different application.
249 *
250 * We don't use compacted format xsave area for backward compatibility for
251 * old applications which don't understand the compacted format of the
252 * xsave area.
253 *
254 * The caller has to zero buf::header before calling this because XSAVE*
255 * does not touch the reserved fields in the header.
256 */
xsave_to_user_sigframe(struct xregs_state __user * buf)257 static inline int xsave_to_user_sigframe(struct xregs_state __user *buf)
258 {
259 /*
260 * Include the features which are not xsaved/rstored by the kernel
261 * internally, e.g. PKRU. That's user space ABI and also required
262 * to allow the signal handler to modify PKRU.
263 */
264 struct fpstate *fpstate = current->thread.fpu.fpstate;
265 u64 mask = fpstate->user_xfeatures;
266 u32 lmask;
267 u32 hmask;
268 int err;
269
270 /* Optimize away writing unnecessary xfeatures: */
271 if (fpu_state_size_dynamic())
272 mask &= xfeatures_need_sigframe_write();
273
274 lmask = mask;
275 hmask = mask >> 32;
276 xfd_validate_state(fpstate, mask, false);
277
278 stac();
279 XSTATE_OP(XSAVE, buf, lmask, hmask, err);
280 clac();
281
282 return err;
283 }
284
285 /*
286 * Restore xstate from user space xsave area.
287 */
xrstor_from_user_sigframe(struct xregs_state __user * buf,u64 mask)288 static inline int xrstor_from_user_sigframe(struct xregs_state __user *buf, u64 mask)
289 {
290 struct xregs_state *xstate = ((__force struct xregs_state *)buf);
291 u32 lmask = mask;
292 u32 hmask = mask >> 32;
293 int err;
294
295 xfd_validate_state(current->thread.fpu.fpstate, mask, true);
296
297 stac();
298 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
299 clac();
300
301 return err;
302 }
303
304 /*
305 * Restore xstate from kernel space xsave area, return an error code instead of
306 * an exception.
307 */
os_xrstor_safe(struct fpstate * fpstate,u64 mask)308 static inline int os_xrstor_safe(struct fpstate *fpstate, u64 mask)
309 {
310 struct xregs_state *xstate = &fpstate->regs.xsave;
311 u32 lmask = mask;
312 u32 hmask = mask >> 32;
313 int err;
314
315 /* Ensure that XFD is up to date */
316 xfd_update_state(fpstate);
317
318 if (cpu_feature_enabled(X86_FEATURE_XSAVES))
319 XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
320 else
321 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
322
323 return err;
324 }
325
326
327 #endif
328