1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __X86_KERNEL_FPU_XSTATE_H
3 #define __X86_KERNEL_FPU_XSTATE_H
4
5 #include <asm/cpufeature.h>
6 #include <asm/fpu/xstate.h>
7 #include <asm/fpu/xcr.h>
8
9 #ifdef CONFIG_X86_64
10 DECLARE_PER_CPU(u64, xfd_state);
11 #endif
12
xstate_init_xcomp_bv(struct xregs_state * xsave,u64 mask)13 static inline void xstate_init_xcomp_bv(struct xregs_state *xsave, u64 mask)
14 {
15 /*
16 * XRSTORS requires these bits set in xcomp_bv, or it will
17 * trigger #GP:
18 */
19 if (cpu_feature_enabled(X86_FEATURE_XCOMPACTED))
20 xsave->header.xcomp_bv = mask | XCOMP_BV_COMPACTED_FORMAT;
21 }
22
xstate_get_group_perm(bool guest)23 static inline u64 xstate_get_group_perm(bool guest)
24 {
25 struct fpu *fpu = ¤t->group_leader->thread.fpu;
26 struct fpu_state_perm *perm;
27
28 /* Pairs with WRITE_ONCE() in xstate_request_perm() */
29 perm = guest ? &fpu->guest_perm : &fpu->perm;
30 return READ_ONCE(perm->__state_perm);
31 }
32
xstate_get_host_group_perm(void)33 static inline u64 xstate_get_host_group_perm(void)
34 {
35 return xstate_get_group_perm(false);
36 }
37
38 enum xstate_copy_mode {
39 XSTATE_COPY_FP,
40 XSTATE_COPY_FX,
41 XSTATE_COPY_XSAVE,
42 };
43
44 struct membuf;
45 extern void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
46 u32 pkru_val, enum xstate_copy_mode copy_mode);
47 extern void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
48 enum xstate_copy_mode mode);
49 extern int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf, u32 *pkru);
50 extern int copy_sigframe_from_user_to_xstate(struct task_struct *tsk, const void __user *ubuf);
51
52
53 extern void fpu__init_cpu_xstate(void);
54 extern void fpu__init_system_xstate(unsigned int legacy_size);
55
56 extern void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr);
57
xfeatures_mask_supervisor(void)58 static inline u64 xfeatures_mask_supervisor(void)
59 {
60 return fpu_kernel_cfg.max_features & XFEATURE_MASK_SUPERVISOR_SUPPORTED;
61 }
62
xfeatures_mask_independent(void)63 static inline u64 xfeatures_mask_independent(void)
64 {
65 if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR))
66 return XFEATURE_MASK_INDEPENDENT & ~XFEATURE_MASK_LBR;
67
68 return XFEATURE_MASK_INDEPENDENT;
69 }
70
71 /* XSAVE/XRSTOR wrapper functions */
72
73 #ifdef CONFIG_X86_64
74 #define REX_PREFIX "0x48, "
75 #else
76 #define REX_PREFIX
77 #endif
78
79 /* These macros all use (%edi)/(%rdi) as the single memory argument. */
80 #define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27"
81 #define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37"
82 #define XSAVEC ".byte " REX_PREFIX "0x0f,0xc7,0x27"
83 #define XSAVES ".byte " REX_PREFIX "0x0f,0xc7,0x2f"
84 #define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f"
85 #define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
86
87 /*
88 * After this @err contains 0 on success or the trap number when the
89 * operation raises an exception.
90 */
91 #define XSTATE_OP(op, st, lmask, hmask, err) \
92 asm volatile("1:" op "\n\t" \
93 "xor %[err], %[err]\n" \
94 "2:\n\t" \
95 _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_FAULT_MCE_SAFE) \
96 : [err] "=a" (err) \
97 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
98 : "memory")
99
100 /*
101 * If XSAVES is enabled, it replaces XSAVEC because it supports supervisor
102 * states in addition to XSAVEC.
103 *
104 * Otherwise if XSAVEC is enabled, it replaces XSAVEOPT because it supports
105 * compacted storage format in addition to XSAVEOPT.
106 *
107 * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT
108 * supports modified optimization which is not supported by XSAVE.
109 *
110 * We use XSAVE as a fallback.
111 *
112 * The 661 label is defined in the ALTERNATIVE* macros as the address of the
113 * original instruction which gets replaced. We need to use it here as the
114 * address of the instruction where we might get an exception at.
115 */
116 #define XSTATE_XSAVE(st, lmask, hmask, err) \
117 asm volatile(ALTERNATIVE_3(XSAVE, \
118 XSAVEOPT, X86_FEATURE_XSAVEOPT, \
119 XSAVEC, X86_FEATURE_XSAVEC, \
120 XSAVES, X86_FEATURE_XSAVES) \
121 "\n" \
122 "xor %[err], %[err]\n" \
123 "3:\n" \
124 _ASM_EXTABLE_TYPE_REG(661b, 3b, EX_TYPE_EFAULT_REG, %[err]) \
125 : [err] "=r" (err) \
126 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
127 : "memory")
128
129 /*
130 * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
131 * XSAVE area format.
132 */
133 #define XSTATE_XRESTORE(st, lmask, hmask) \
134 asm volatile(ALTERNATIVE(XRSTOR, \
135 XRSTORS, X86_FEATURE_XSAVES) \
136 "\n" \
137 "3:\n" \
138 _ASM_EXTABLE_TYPE(661b, 3b, EX_TYPE_FPU_RESTORE) \
139 : \
140 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
141 : "memory")
142
143 #if defined(CONFIG_X86_64) && defined(CONFIG_X86_DEBUG_FPU)
144 extern void xfd_validate_state(struct fpstate *fpstate, u64 mask, bool rstor);
145 #else
xfd_validate_state(struct fpstate * fpstate,u64 mask,bool rstor)146 static inline void xfd_validate_state(struct fpstate *fpstate, u64 mask, bool rstor) { }
147 #endif
148
149 #ifdef CONFIG_X86_64
xfd_update_state(struct fpstate * fpstate)150 static inline void xfd_update_state(struct fpstate *fpstate)
151 {
152 if (fpu_state_size_dynamic()) {
153 u64 xfd = fpstate->xfd;
154
155 if (__this_cpu_read(xfd_state) != xfd) {
156 wrmsrl(MSR_IA32_XFD, xfd);
157 __this_cpu_write(xfd_state, xfd);
158 }
159 }
160 }
161
162 extern int __xfd_enable_feature(u64 which, struct fpu_guest *guest_fpu);
163 #else
xfd_update_state(struct fpstate * fpstate)164 static inline void xfd_update_state(struct fpstate *fpstate) { }
165
__xfd_enable_feature(u64 which,struct fpu_guest * guest_fpu)166 static inline int __xfd_enable_feature(u64 which, struct fpu_guest *guest_fpu) {
167 return -EPERM;
168 }
169 #endif
170
171 /*
172 * Save processor xstate to xsave area.
173 *
174 * Uses either XSAVE or XSAVEOPT or XSAVES depending on the CPU features
175 * and command line options. The choice is permanent until the next reboot.
176 */
os_xsave(struct fpstate * fpstate)177 static inline void os_xsave(struct fpstate *fpstate)
178 {
179 u64 mask = fpstate->xfeatures;
180 u32 lmask = mask;
181 u32 hmask = mask >> 32;
182 int err;
183
184 WARN_ON_FPU(!alternatives_patched);
185 xfd_validate_state(fpstate, mask, false);
186
187 XSTATE_XSAVE(&fpstate->regs.xsave, lmask, hmask, err);
188
189 /* We should never fault when copying to a kernel buffer: */
190 WARN_ON_FPU(err);
191 }
192
193 /*
194 * Restore processor xstate from xsave area.
195 *
196 * Uses XRSTORS when XSAVES is used, XRSTOR otherwise.
197 */
os_xrstor(struct fpstate * fpstate,u64 mask)198 static inline void os_xrstor(struct fpstate *fpstate, u64 mask)
199 {
200 u32 lmask = mask;
201 u32 hmask = mask >> 32;
202
203 xfd_validate_state(fpstate, mask, true);
204 XSTATE_XRESTORE(&fpstate->regs.xsave, lmask, hmask);
205 }
206
207 /* Restore of supervisor state. Does not require XFD */
os_xrstor_supervisor(struct fpstate * fpstate)208 static inline void os_xrstor_supervisor(struct fpstate *fpstate)
209 {
210 u64 mask = xfeatures_mask_supervisor();
211 u32 lmask = mask;
212 u32 hmask = mask >> 32;
213
214 XSTATE_XRESTORE(&fpstate->regs.xsave, lmask, hmask);
215 }
216
217 /*
218 * XSAVE itself always writes all requested xfeatures. Removing features
219 * from the request bitmap reduces the features which are written.
220 * Generate a mask of features which must be written to a sigframe. The
221 * unset features can be optimized away and not written.
222 *
223 * This optimization is user-visible. Only use for states where
224 * uninitialized sigframe contents are tolerable, like dynamic features.
225 *
226 * Users of buffers produced with this optimization must check XSTATE_BV
227 * to determine which features have been optimized out.
228 */
xfeatures_need_sigframe_write(void)229 static inline u64 xfeatures_need_sigframe_write(void)
230 {
231 u64 xfeaures_to_write;
232
233 /* In-use features must be written: */
234 xfeaures_to_write = xfeatures_in_use();
235
236 /* Also write all non-optimizable sigframe features: */
237 xfeaures_to_write |= XFEATURE_MASK_USER_SUPPORTED &
238 ~XFEATURE_MASK_SIGFRAME_INITOPT;
239
240 return xfeaures_to_write;
241 }
242
243 /*
244 * Save xstate to user space xsave area.
245 *
246 * We don't use modified optimization because xrstor/xrstors might track
247 * a different application.
248 *
249 * We don't use compacted format xsave area for backward compatibility for
250 * old applications which don't understand the compacted format of the
251 * xsave area.
252 *
253 * The caller has to zero buf::header before calling this because XSAVE*
254 * does not touch the reserved fields in the header.
255 */
xsave_to_user_sigframe(struct xregs_state __user * buf)256 static inline int xsave_to_user_sigframe(struct xregs_state __user *buf)
257 {
258 /*
259 * Include the features which are not xsaved/rstored by the kernel
260 * internally, e.g. PKRU. That's user space ABI and also required
261 * to allow the signal handler to modify PKRU.
262 */
263 struct fpstate *fpstate = current->thread.fpu.fpstate;
264 u64 mask = fpstate->user_xfeatures;
265 u32 lmask;
266 u32 hmask;
267 int err;
268
269 /* Optimize away writing unnecessary xfeatures: */
270 if (fpu_state_size_dynamic())
271 mask &= xfeatures_need_sigframe_write();
272
273 lmask = mask;
274 hmask = mask >> 32;
275 xfd_validate_state(fpstate, mask, false);
276
277 stac();
278 XSTATE_OP(XSAVE, buf, lmask, hmask, err);
279 clac();
280
281 return err;
282 }
283
284 /*
285 * Restore xstate from user space xsave area.
286 */
xrstor_from_user_sigframe(struct xregs_state __user * buf,u64 mask)287 static inline int xrstor_from_user_sigframe(struct xregs_state __user *buf, u64 mask)
288 {
289 struct xregs_state *xstate = ((__force struct xregs_state *)buf);
290 u32 lmask = mask;
291 u32 hmask = mask >> 32;
292 int err;
293
294 xfd_validate_state(current->thread.fpu.fpstate, mask, true);
295
296 stac();
297 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
298 clac();
299
300 return err;
301 }
302
303 /*
304 * Restore xstate from kernel space xsave area, return an error code instead of
305 * an exception.
306 */
os_xrstor_safe(struct fpstate * fpstate,u64 mask)307 static inline int os_xrstor_safe(struct fpstate *fpstate, u64 mask)
308 {
309 struct xregs_state *xstate = &fpstate->regs.xsave;
310 u32 lmask = mask;
311 u32 hmask = mask >> 32;
312 int err;
313
314 /* Ensure that XFD is up to date */
315 xfd_update_state(fpstate);
316
317 if (cpu_feature_enabled(X86_FEATURE_XSAVES))
318 XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
319 else
320 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
321
322 return err;
323 }
324
325
326 #endif
327