1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 /* 3 * Copyright (C) 2012 ARM Ltd. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 #ifndef _UAPI__ASM_SIGCONTEXT_H 18 #define _UAPI__ASM_SIGCONTEXT_H 19 20 #ifndef __ASSEMBLY__ 21 22 #include <linux/types.h> 23 24 /* 25 * Signal context structure - contains all info to do with the state 26 * before the signal handler was invoked. 27 */ 28 struct sigcontext { 29 __u64 fault_address; 30 /* AArch64 registers */ 31 __u64 regs[31]; 32 __u64 sp; 33 __u64 pc; 34 __u64 pstate; 35 /* 4K reserved for FP/SIMD state and future expansion */ 36 __u8 __reserved[4096] __attribute__((__aligned__(16))); 37 }; 38 39 /* 40 * Allocation of __reserved[]: 41 * (Note: records do not necessarily occur in the order shown here.) 42 * 43 * size description 44 * 45 * 0x210 fpsimd_context 46 * 0x10 esr_context 47 * 0x8a0 sve_context (vl <= 64) (optional) 48 * 0x20 extra_context (optional) 49 * 0x10 terminator (null _aarch64_ctx) 50 * 51 * 0x510 (reserved for future allocation) 52 * 53 * New records that can exceed this space need to be opt-in for userspace, so 54 * that an expanded signal frame is not generated unexpectedly. The mechanism 55 * for opting in will depend on the extension that generates each new record. 56 * The above table documents the maximum set and sizes of records than can be 57 * generated when userspace does not opt in for any such extension. 58 */ 59 60 /* 61 * Header to be used at the beginning of structures extending the user 62 * context. Such structures must be placed after the rt_sigframe on the stack 63 * and be 16-byte aligned. The last structure must be a dummy one with the 64 * magic and size set to 0. 65 * 66 * Note that the values allocated for use as magic should be chosen to 67 * be meaningful in ASCII to aid manual parsing, ZA doesn't follow this 68 * convention due to oversight but it should be observed for future additions. 69 */ 70 struct _aarch64_ctx { 71 __u32 magic; 72 __u32 size; 73 }; 74 75 #define FPSIMD_MAGIC 0x46508001 76 77 struct fpsimd_context { 78 struct _aarch64_ctx head; 79 __u32 fpsr; 80 __u32 fpcr; 81 __uint128_t vregs[32]; 82 }; 83 84 /* 85 * Note: similarly to all other integer fields, each V-register is stored in an 86 * endianness-dependent format, with the byte at offset i from the start of the 87 * in-memory representation of the register value containing 88 * 89 * bits [(7 + 8 * i) : (8 * i)] of the register on little-endian hosts; or 90 * bits [(127 - 8 * i) : (120 - 8 * i)] on big-endian hosts. 91 */ 92 93 /* ESR_EL1 context */ 94 #define ESR_MAGIC 0x45535201 95 96 struct esr_context { 97 struct _aarch64_ctx head; 98 __u64 esr; 99 }; 100 101 /* 102 * extra_context: describes extra space in the signal frame for 103 * additional structures that don't fit in sigcontext.__reserved[]. 104 * 105 * Note: 106 * 107 * 1) fpsimd_context, esr_context and extra_context must be placed in 108 * sigcontext.__reserved[] if present. They cannot be placed in the 109 * extra space. Any other record can be placed either in the extra 110 * space or in sigcontext.__reserved[], unless otherwise specified in 111 * this file. 112 * 113 * 2) There must not be more than one extra_context. 114 * 115 * 3) If extra_context is present, it must be followed immediately in 116 * sigcontext.__reserved[] by the terminating null _aarch64_ctx. 117 * 118 * 4) The extra space to which datap points must start at the first 119 * 16-byte aligned address immediately after the terminating null 120 * _aarch64_ctx that follows the extra_context structure in 121 * __reserved[]. The extra space may overrun the end of __reserved[], 122 * as indicated by a sufficiently large value for the size field. 123 * 124 * 5) The extra space must itself be terminated with a null 125 * _aarch64_ctx. 126 */ 127 #define EXTRA_MAGIC 0x45585401 128 129 struct extra_context { 130 struct _aarch64_ctx head; 131 __u64 datap; /* 16-byte aligned pointer to extra space cast to __u64 */ 132 __u32 size; /* size in bytes of the extra space */ 133 __u32 __reserved[3]; 134 }; 135 136 #define SVE_MAGIC 0x53564501 137 138 struct sve_context { 139 struct _aarch64_ctx head; 140 __u16 vl; 141 __u16 flags; 142 __u16 __reserved[2]; 143 }; 144 145 #define SVE_SIG_FLAG_SM 0x1 /* Context describes streaming mode */ 146 147 /* TPIDR2_EL0 context */ 148 #define TPIDR2_MAGIC 0x54504902 149 150 struct tpidr2_context { 151 struct _aarch64_ctx head; 152 __u64 tpidr2; 153 }; 154 155 #define ZA_MAGIC 0x54366345 156 157 struct za_context { 158 struct _aarch64_ctx head; 159 __u16 vl; 160 __u16 __reserved[3]; 161 }; 162 163 #define ZT_MAGIC 0x5a544e01 164 165 struct zt_context { 166 struct _aarch64_ctx head; 167 __u16 nregs; 168 __u16 __reserved[3]; 169 }; 170 171 #endif /* !__ASSEMBLY__ */ 172 173 #include <asm/sve_context.h> 174 175 /* 176 * The SVE architecture leaves space for future expansion of the 177 * vector length beyond its initial architectural limit of 2048 bits 178 * (16 quadwords). 179 * 180 * See linux/Documentation/arch/arm64/sve.rst for a description of the VL/VQ 181 * terminology. 182 */ 183 #define SVE_VQ_BYTES __SVE_VQ_BYTES /* bytes per quadword */ 184 185 #define SVE_VQ_MIN __SVE_VQ_MIN 186 #define SVE_VQ_MAX __SVE_VQ_MAX 187 188 #define SVE_VL_MIN __SVE_VL_MIN 189 #define SVE_VL_MAX __SVE_VL_MAX 190 191 #define SVE_NUM_ZREGS __SVE_NUM_ZREGS 192 #define SVE_NUM_PREGS __SVE_NUM_PREGS 193 194 #define sve_vl_valid(vl) __sve_vl_valid(vl) 195 #define sve_vq_from_vl(vl) __sve_vq_from_vl(vl) 196 #define sve_vl_from_vq(vq) __sve_vl_from_vq(vq) 197 198 /* 199 * If the SVE registers are currently live for the thread at signal delivery, 200 * sve_context.head.size >= 201 * SVE_SIG_CONTEXT_SIZE(sve_vq_from_vl(sve_context.vl)) 202 * and the register data may be accessed using the SVE_SIG_*() macros. 203 * 204 * If sve_context.head.size < 205 * SVE_SIG_CONTEXT_SIZE(sve_vq_from_vl(sve_context.vl)), 206 * the SVE registers were not live for the thread and no register data 207 * is included: in this case, the SVE_SIG_*() macros should not be 208 * used except for this check. 209 * 210 * The same convention applies when returning from a signal: a caller 211 * will need to remove or resize the sve_context block if it wants to 212 * make the SVE registers live when they were previously non-live or 213 * vice-versa. This may require the caller to allocate fresh 214 * memory and/or move other context blocks in the signal frame. 215 * 216 * Changing the vector length during signal return is not permitted: 217 * sve_context.vl must equal the thread's current vector length when 218 * doing a sigreturn. 219 * 220 * On systems with support for SME the SVE register state may reflect either 221 * streaming or non-streaming mode. In streaming mode the streaming mode 222 * vector length will be used and the flag SVE_SIG_FLAG_SM will be set in 223 * the flags field. It is permitted to enter or leave streaming mode in 224 * a signal return, applications should take care to ensure that any difference 225 * in vector length between the two modes is handled, including any resizing 226 * and movement of context blocks. 227 * 228 * Note: for all these macros, the "vq" argument denotes the vector length 229 * in quadwords (i.e., units of 128 bits). 230 * 231 * The correct way to obtain vq is to use sve_vq_from_vl(vl). The 232 * result is valid if and only if sve_vl_valid(vl) is true. This is 233 * guaranteed for a struct sve_context written by the kernel. 234 * 235 * 236 * Additional macros describe the contents and layout of the payload. 237 * For each, SVE_SIG_x_OFFSET(args) is the start offset relative to 238 * the start of struct sve_context, and SVE_SIG_x_SIZE(args) is the 239 * size in bytes: 240 * 241 * x type description 242 * - ---- ----------- 243 * REGS the entire SVE context 244 * 245 * ZREGS __uint128_t[SVE_NUM_ZREGS][vq] all Z-registers 246 * ZREG __uint128_t[vq] individual Z-register Zn 247 * 248 * PREGS uint16_t[SVE_NUM_PREGS][vq] all P-registers 249 * PREG uint16_t[vq] individual P-register Pn 250 * 251 * FFR uint16_t[vq] first-fault status register 252 * 253 * Additional data might be appended in the future. 254 * 255 * Unlike vregs[] in fpsimd_context, each SVE scalable register (Z-, P- or FFR) 256 * is encoded in memory in an endianness-invariant format, with the byte at 257 * offset i from the start of the in-memory representation containing bits 258 * [(7 + 8 * i) : (8 * i)] of the register value. 259 */ 260 261 #define SVE_SIG_ZREG_SIZE(vq) __SVE_ZREG_SIZE(vq) 262 #define SVE_SIG_PREG_SIZE(vq) __SVE_PREG_SIZE(vq) 263 #define SVE_SIG_FFR_SIZE(vq) __SVE_FFR_SIZE(vq) 264 265 #define SVE_SIG_REGS_OFFSET \ 266 ((sizeof(struct sve_context) + (__SVE_VQ_BYTES - 1)) \ 267 / __SVE_VQ_BYTES * __SVE_VQ_BYTES) 268 269 #define SVE_SIG_ZREGS_OFFSET \ 270 (SVE_SIG_REGS_OFFSET + __SVE_ZREGS_OFFSET) 271 #define SVE_SIG_ZREG_OFFSET(vq, n) \ 272 (SVE_SIG_REGS_OFFSET + __SVE_ZREG_OFFSET(vq, n)) 273 #define SVE_SIG_ZREGS_SIZE(vq) __SVE_ZREGS_SIZE(vq) 274 275 #define SVE_SIG_PREGS_OFFSET(vq) \ 276 (SVE_SIG_REGS_OFFSET + __SVE_PREGS_OFFSET(vq)) 277 #define SVE_SIG_PREG_OFFSET(vq, n) \ 278 (SVE_SIG_REGS_OFFSET + __SVE_PREG_OFFSET(vq, n)) 279 #define SVE_SIG_PREGS_SIZE(vq) __SVE_PREGS_SIZE(vq) 280 281 #define SVE_SIG_FFR_OFFSET(vq) \ 282 (SVE_SIG_REGS_OFFSET + __SVE_FFR_OFFSET(vq)) 283 284 #define SVE_SIG_REGS_SIZE(vq) \ 285 (__SVE_FFR_OFFSET(vq) + __SVE_FFR_SIZE(vq)) 286 287 #define SVE_SIG_CONTEXT_SIZE(vq) \ 288 (SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq)) 289 290 /* 291 * If the ZA register is enabled for the thread at signal delivery then, 292 * za_context.head.size >= ZA_SIG_CONTEXT_SIZE(sve_vq_from_vl(za_context.vl)) 293 * and the register data may be accessed using the ZA_SIG_*() macros. 294 * 295 * If za_context.head.size < ZA_SIG_CONTEXT_SIZE(sve_vq_from_vl(za_context.vl)) 296 * then ZA was not enabled and no register data was included in which case 297 * ZA register was not enabled for the thread and no register data 298 * the ZA_SIG_*() macros should not be used except for this check. 299 * 300 * The same convention applies when returning from a signal: a caller 301 * will need to remove or resize the za_context block if it wants to 302 * enable the ZA register when it was previously non-live or vice-versa. 303 * This may require the caller to allocate fresh memory and/or move other 304 * context blocks in the signal frame. 305 * 306 * Changing the vector length during signal return is not permitted: 307 * za_context.vl must equal the thread's current SME vector length when 308 * doing a sigreturn. 309 */ 310 311 #define ZA_SIG_REGS_OFFSET \ 312 ((sizeof(struct za_context) + (__SVE_VQ_BYTES - 1)) \ 313 / __SVE_VQ_BYTES * __SVE_VQ_BYTES) 314 315 #define ZA_SIG_REGS_SIZE(vq) ((vq * __SVE_VQ_BYTES) * (vq * __SVE_VQ_BYTES)) 316 317 #define ZA_SIG_ZAV_OFFSET(vq, n) (ZA_SIG_REGS_OFFSET + \ 318 (SVE_SIG_ZREG_SIZE(vq) * n)) 319 320 #define ZA_SIG_CONTEXT_SIZE(vq) \ 321 (ZA_SIG_REGS_OFFSET + ZA_SIG_REGS_SIZE(vq)) 322 323 #define ZT_SIG_REG_SIZE 512 324 325 #define ZT_SIG_REG_BYTES (ZT_SIG_REG_SIZE / 8) 326 327 #define ZT_SIG_REGS_OFFSET sizeof(struct zt_context) 328 329 #define ZT_SIG_REGS_SIZE(n) (ZT_SIG_REG_BYTES * n) 330 331 #define ZT_SIG_CONTEXT_SIZE(n) \ 332 (sizeof(struct zt_context) + ZT_SIG_REGS_SIZE(n)) 333 334 #endif /* _UAPI__ASM_SIGCONTEXT_H */ 335