1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Check for KVM_GET_REG_LIST regressions.
4  *
5  * Copyright (C) 2020, Red Hat, Inc.
6  *
7  * When attempting to migrate from a host with an older kernel to a host
8  * with a newer kernel we allow the newer kernel on the destination to
9  * list new registers with get-reg-list. We assume they'll be unused, at
10  * least until the guest reboots, and so they're relatively harmless.
11  * However, if the destination host with the newer kernel is missing
12  * registers which the source host with the older kernel has, then that's
13  * a regression in get-reg-list. This test checks for that regression by
14  * checking the current list against a blessed list. We should never have
15  * missing registers, but if new ones appear then they can probably be
16  * added to the blessed list. A completely new blessed list can be created
17  * by running the test with the --list command line argument.
18  *
19  * Note, the blessed list should be created from the oldest possible
20  * kernel. We can't go older than v4.15, though, because that's the first
21  * release to expose the ID system registers in KVM_GET_REG_LIST, see
22  * commit 93390c0a1b20 ("arm64: KVM: Hide unsupported AArch64 CPU features
23  * from guests"). Also, one must use the --core-reg-fixup command line
24  * option when running on an older kernel that doesn't include df205b5c6328
25  * ("KVM: arm64: Filter out invalid core register IDs in KVM_GET_REG_LIST")
26  */
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <unistd.h>
31 #include <sys/types.h>
32 #include <sys/wait.h>
33 #include "kvm_util.h"
34 #include "test_util.h"
35 #include "processor.h"
36 
37 static struct kvm_reg_list *reg_list;
38 static __u64 *blessed_reg, blessed_n;
39 
40 struct reg_sublist {
41 	const char *name;
42 	long capability;
43 	int feature;
44 	bool finalize;
45 	__u64 *regs;
46 	__u64 regs_n;
47 	__u64 *rejects_set;
48 	__u64 rejects_set_n;
49 };
50 
51 struct vcpu_config {
52 	char *name;
53 	struct reg_sublist sublists[];
54 };
55 
56 static struct vcpu_config *vcpu_configs[];
57 static int vcpu_configs_n;
58 
59 #define for_each_sublist(c, s)							\
60 	for ((s) = &(c)->sublists[0]; (s)->regs; ++(s))
61 
62 #define for_each_reg(i)								\
63 	for ((i) = 0; (i) < reg_list->n; ++(i))
64 
65 #define for_each_reg_filtered(i)						\
66 	for_each_reg(i)								\
67 		if (!filter_reg(reg_list->reg[i]))
68 
69 #define for_each_missing_reg(i)							\
70 	for ((i) = 0; (i) < blessed_n; ++(i))					\
71 		if (!find_reg(reg_list->reg, reg_list->n, blessed_reg[i]))
72 
73 #define for_each_new_reg(i)							\
74 	for_each_reg_filtered(i)						\
75 		if (!find_reg(blessed_reg, blessed_n, reg_list->reg[i]))
76 
config_name(struct vcpu_config * c)77 static const char *config_name(struct vcpu_config *c)
78 {
79 	struct reg_sublist *s;
80 	int len = 0;
81 
82 	if (c->name)
83 		return c->name;
84 
85 	for_each_sublist(c, s)
86 		len += strlen(s->name) + 1;
87 
88 	c->name = malloc(len);
89 
90 	len = 0;
91 	for_each_sublist(c, s) {
92 		if (!strcmp(s->name, "base"))
93 			continue;
94 		strcat(c->name + len, s->name);
95 		len += strlen(s->name) + 1;
96 		c->name[len - 1] = '+';
97 	}
98 	c->name[len - 1] = '\0';
99 
100 	return c->name;
101 }
102 
has_cap(struct vcpu_config * c,long capability)103 static bool has_cap(struct vcpu_config *c, long capability)
104 {
105 	struct reg_sublist *s;
106 
107 	for_each_sublist(c, s)
108 		if (s->capability == capability)
109 			return true;
110 	return false;
111 }
112 
filter_reg(__u64 reg)113 static bool filter_reg(__u64 reg)
114 {
115 	/*
116 	 * DEMUX register presence depends on the host's CLIDR_EL1.
117 	 * This means there's no set of them that we can bless.
118 	 */
119 	if ((reg & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
120 		return true;
121 
122 	return false;
123 }
124 
find_reg(__u64 regs[],__u64 nr_regs,__u64 reg)125 static bool find_reg(__u64 regs[], __u64 nr_regs, __u64 reg)
126 {
127 	int i;
128 
129 	for (i = 0; i < nr_regs; ++i)
130 		if (reg == regs[i])
131 			return true;
132 	return false;
133 }
134 
str_with_index(const char * template,__u64 index)135 static const char *str_with_index(const char *template, __u64 index)
136 {
137 	char *str, *p;
138 	int n;
139 
140 	str = strdup(template);
141 	p = strstr(str, "##");
142 	n = sprintf(p, "%lld", index);
143 	strcat(p + n, strstr(template, "##") + 2);
144 
145 	return (const char *)str;
146 }
147 
148 #define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_COPROC_MASK)
149 
150 #define CORE_REGS_XX_NR_WORDS	2
151 #define CORE_SPSR_XX_NR_WORDS	2
152 #define CORE_FPREGS_XX_NR_WORDS	4
153 
core_id_to_str(struct vcpu_config * c,__u64 id)154 static const char *core_id_to_str(struct vcpu_config *c, __u64 id)
155 {
156 	__u64 core_off = id & ~REG_MASK, idx;
157 
158 	/*
159 	 * core_off is the offset into struct kvm_regs
160 	 */
161 	switch (core_off) {
162 	case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
163 	     KVM_REG_ARM_CORE_REG(regs.regs[30]):
164 		idx = (core_off - KVM_REG_ARM_CORE_REG(regs.regs[0])) / CORE_REGS_XX_NR_WORDS;
165 		TEST_ASSERT(idx < 31, "%s: Unexpected regs.regs index: %lld", config_name(c), idx);
166 		return str_with_index("KVM_REG_ARM_CORE_REG(regs.regs[##])", idx);
167 	case KVM_REG_ARM_CORE_REG(regs.sp):
168 		return "KVM_REG_ARM_CORE_REG(regs.sp)";
169 	case KVM_REG_ARM_CORE_REG(regs.pc):
170 		return "KVM_REG_ARM_CORE_REG(regs.pc)";
171 	case KVM_REG_ARM_CORE_REG(regs.pstate):
172 		return "KVM_REG_ARM_CORE_REG(regs.pstate)";
173 	case KVM_REG_ARM_CORE_REG(sp_el1):
174 		return "KVM_REG_ARM_CORE_REG(sp_el1)";
175 	case KVM_REG_ARM_CORE_REG(elr_el1):
176 		return "KVM_REG_ARM_CORE_REG(elr_el1)";
177 	case KVM_REG_ARM_CORE_REG(spsr[0]) ...
178 	     KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
179 		idx = (core_off - KVM_REG_ARM_CORE_REG(spsr[0])) / CORE_SPSR_XX_NR_WORDS;
180 		TEST_ASSERT(idx < KVM_NR_SPSR, "%s: Unexpected spsr index: %lld", config_name(c), idx);
181 		return str_with_index("KVM_REG_ARM_CORE_REG(spsr[##])", idx);
182 	case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
183 	     KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
184 		idx = (core_off - KVM_REG_ARM_CORE_REG(fp_regs.vregs[0])) / CORE_FPREGS_XX_NR_WORDS;
185 		TEST_ASSERT(idx < 32, "%s: Unexpected fp_regs.vregs index: %lld", config_name(c), idx);
186 		return str_with_index("KVM_REG_ARM_CORE_REG(fp_regs.vregs[##])", idx);
187 	case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
188 		return "KVM_REG_ARM_CORE_REG(fp_regs.fpsr)";
189 	case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
190 		return "KVM_REG_ARM_CORE_REG(fp_regs.fpcr)";
191 	}
192 
193 	TEST_FAIL("%s: Unknown core reg id: 0x%llx", config_name(c), id);
194 	return NULL;
195 }
196 
sve_id_to_str(struct vcpu_config * c,__u64 id)197 static const char *sve_id_to_str(struct vcpu_config *c, __u64 id)
198 {
199 	__u64 sve_off, n, i;
200 
201 	if (id == KVM_REG_ARM64_SVE_VLS)
202 		return "KVM_REG_ARM64_SVE_VLS";
203 
204 	sve_off = id & ~(REG_MASK | ((1ULL << 5) - 1));
205 	i = id & (KVM_ARM64_SVE_MAX_SLICES - 1);
206 
207 	TEST_ASSERT(i == 0, "%s: Currently we don't expect slice > 0, reg id 0x%llx", config_name(c), id);
208 
209 	switch (sve_off) {
210 	case KVM_REG_ARM64_SVE_ZREG_BASE ...
211 	     KVM_REG_ARM64_SVE_ZREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_ZREGS - 1:
212 		n = (id >> 5) & (KVM_ARM64_SVE_NUM_ZREGS - 1);
213 		TEST_ASSERT(id == KVM_REG_ARM64_SVE_ZREG(n, 0),
214 			    "%s: Unexpected bits set in SVE ZREG id: 0x%llx", config_name(c), id);
215 		return str_with_index("KVM_REG_ARM64_SVE_ZREG(##, 0)", n);
216 	case KVM_REG_ARM64_SVE_PREG_BASE ...
217 	     KVM_REG_ARM64_SVE_PREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_PREGS - 1:
218 		n = (id >> 5) & (KVM_ARM64_SVE_NUM_PREGS - 1);
219 		TEST_ASSERT(id == KVM_REG_ARM64_SVE_PREG(n, 0),
220 			    "%s: Unexpected bits set in SVE PREG id: 0x%llx", config_name(c), id);
221 		return str_with_index("KVM_REG_ARM64_SVE_PREG(##, 0)", n);
222 	case KVM_REG_ARM64_SVE_FFR_BASE:
223 		TEST_ASSERT(id == KVM_REG_ARM64_SVE_FFR(0),
224 			    "%s: Unexpected bits set in SVE FFR id: 0x%llx", config_name(c), id);
225 		return "KVM_REG_ARM64_SVE_FFR(0)";
226 	}
227 
228 	return NULL;
229 }
230 
print_reg(struct vcpu_config * c,__u64 id)231 static void print_reg(struct vcpu_config *c, __u64 id)
232 {
233 	unsigned op0, op1, crn, crm, op2;
234 	const char *reg_size = NULL;
235 
236 	TEST_ASSERT((id & KVM_REG_ARCH_MASK) == KVM_REG_ARM64,
237 		    "%s: KVM_REG_ARM64 missing in reg id: 0x%llx", config_name(c), id);
238 
239 	switch (id & KVM_REG_SIZE_MASK) {
240 	case KVM_REG_SIZE_U8:
241 		reg_size = "KVM_REG_SIZE_U8";
242 		break;
243 	case KVM_REG_SIZE_U16:
244 		reg_size = "KVM_REG_SIZE_U16";
245 		break;
246 	case KVM_REG_SIZE_U32:
247 		reg_size = "KVM_REG_SIZE_U32";
248 		break;
249 	case KVM_REG_SIZE_U64:
250 		reg_size = "KVM_REG_SIZE_U64";
251 		break;
252 	case KVM_REG_SIZE_U128:
253 		reg_size = "KVM_REG_SIZE_U128";
254 		break;
255 	case KVM_REG_SIZE_U256:
256 		reg_size = "KVM_REG_SIZE_U256";
257 		break;
258 	case KVM_REG_SIZE_U512:
259 		reg_size = "KVM_REG_SIZE_U512";
260 		break;
261 	case KVM_REG_SIZE_U1024:
262 		reg_size = "KVM_REG_SIZE_U1024";
263 		break;
264 	case KVM_REG_SIZE_U2048:
265 		reg_size = "KVM_REG_SIZE_U2048";
266 		break;
267 	default:
268 		TEST_FAIL("%s: Unexpected reg size: 0x%llx in reg id: 0x%llx",
269 			  config_name(c), (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id);
270 	}
271 
272 	switch (id & KVM_REG_ARM_COPROC_MASK) {
273 	case KVM_REG_ARM_CORE:
274 		printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_CORE | %s,\n", reg_size, core_id_to_str(c, id));
275 		break;
276 	case KVM_REG_ARM_DEMUX:
277 		TEST_ASSERT(!(id & ~(REG_MASK | KVM_REG_ARM_DEMUX_ID_MASK | KVM_REG_ARM_DEMUX_VAL_MASK)),
278 			    "%s: Unexpected bits set in DEMUX reg id: 0x%llx", config_name(c), id);
279 		printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_DEMUX | KVM_REG_ARM_DEMUX_ID_CCSIDR | %lld,\n",
280 		       reg_size, id & KVM_REG_ARM_DEMUX_VAL_MASK);
281 		break;
282 	case KVM_REG_ARM64_SYSREG:
283 		op0 = (id & KVM_REG_ARM64_SYSREG_OP0_MASK) >> KVM_REG_ARM64_SYSREG_OP0_SHIFT;
284 		op1 = (id & KVM_REG_ARM64_SYSREG_OP1_MASK) >> KVM_REG_ARM64_SYSREG_OP1_SHIFT;
285 		crn = (id & KVM_REG_ARM64_SYSREG_CRN_MASK) >> KVM_REG_ARM64_SYSREG_CRN_SHIFT;
286 		crm = (id & KVM_REG_ARM64_SYSREG_CRM_MASK) >> KVM_REG_ARM64_SYSREG_CRM_SHIFT;
287 		op2 = (id & KVM_REG_ARM64_SYSREG_OP2_MASK) >> KVM_REG_ARM64_SYSREG_OP2_SHIFT;
288 		TEST_ASSERT(id == ARM64_SYS_REG(op0, op1, crn, crm, op2),
289 			    "%s: Unexpected bits set in SYSREG reg id: 0x%llx", config_name(c), id);
290 		printf("\tARM64_SYS_REG(%d, %d, %d, %d, %d),\n", op0, op1, crn, crm, op2);
291 		break;
292 	case KVM_REG_ARM_FW:
293 		TEST_ASSERT(id == KVM_REG_ARM_FW_REG(id & 0xffff),
294 			    "%s: Unexpected bits set in FW reg id: 0x%llx", config_name(c), id);
295 		printf("\tKVM_REG_ARM_FW_REG(%lld),\n", id & 0xffff);
296 		break;
297 	case KVM_REG_ARM_FW_FEAT_BMAP:
298 		TEST_ASSERT(id == KVM_REG_ARM_FW_FEAT_BMAP_REG(id & 0xffff),
299 			    "%s: Unexpected bits set in the bitmap feature FW reg id: 0x%llx", config_name(c), id);
300 		printf("\tKVM_REG_ARM_FW_FEAT_BMAP_REG(%lld),\n", id & 0xffff);
301 		break;
302 	case KVM_REG_ARM64_SVE:
303 		if (has_cap(c, KVM_CAP_ARM_SVE))
304 			printf("\t%s,\n", sve_id_to_str(c, id));
305 		else
306 			TEST_FAIL("%s: KVM_REG_ARM64_SVE is an unexpected coproc type in reg id: 0x%llx", config_name(c), id);
307 		break;
308 	default:
309 		TEST_FAIL("%s: Unexpected coproc type: 0x%llx in reg id: 0x%llx",
310 			  config_name(c), (id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT, id);
311 	}
312 }
313 
314 /*
315  * Older kernels listed each 32-bit word of CORE registers separately.
316  * For 64 and 128-bit registers we need to ignore the extra words. We
317  * also need to fixup the sizes, because the older kernels stated all
318  * registers were 64-bit, even when they weren't.
319  */
core_reg_fixup(void)320 static void core_reg_fixup(void)
321 {
322 	struct kvm_reg_list *tmp;
323 	__u64 id, core_off;
324 	int i;
325 
326 	tmp = calloc(1, sizeof(*tmp) + reg_list->n * sizeof(__u64));
327 
328 	for (i = 0; i < reg_list->n; ++i) {
329 		id = reg_list->reg[i];
330 
331 		if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM_CORE) {
332 			tmp->reg[tmp->n++] = id;
333 			continue;
334 		}
335 
336 		core_off = id & ~REG_MASK;
337 
338 		switch (core_off) {
339 		case 0x52: case 0xd2: case 0xd6:
340 			/*
341 			 * These offsets are pointing at padding.
342 			 * We need to ignore them too.
343 			 */
344 			continue;
345 		case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
346 		     KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
347 			if (core_off & 3)
348 				continue;
349 			id &= ~KVM_REG_SIZE_MASK;
350 			id |= KVM_REG_SIZE_U128;
351 			tmp->reg[tmp->n++] = id;
352 			continue;
353 		case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
354 		case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
355 			id &= ~KVM_REG_SIZE_MASK;
356 			id |= KVM_REG_SIZE_U32;
357 			tmp->reg[tmp->n++] = id;
358 			continue;
359 		default:
360 			if (core_off & 1)
361 				continue;
362 			tmp->reg[tmp->n++] = id;
363 			break;
364 		}
365 	}
366 
367 	free(reg_list);
368 	reg_list = tmp;
369 }
370 
prepare_vcpu_init(struct vcpu_config * c,struct kvm_vcpu_init * init)371 static void prepare_vcpu_init(struct vcpu_config *c, struct kvm_vcpu_init *init)
372 {
373 	struct reg_sublist *s;
374 
375 	for_each_sublist(c, s)
376 		if (s->capability)
377 			init->features[s->feature / 32] |= 1 << (s->feature % 32);
378 }
379 
finalize_vcpu(struct kvm_vm * vm,uint32_t vcpuid,struct vcpu_config * c)380 static void finalize_vcpu(struct kvm_vm *vm, uint32_t vcpuid, struct vcpu_config *c)
381 {
382 	struct reg_sublist *s;
383 	int feature;
384 
385 	for_each_sublist(c, s) {
386 		if (s->finalize) {
387 			feature = s->feature;
388 			vcpu_ioctl(vm, vcpuid, KVM_ARM_VCPU_FINALIZE, &feature);
389 		}
390 	}
391 }
392 
check_supported(struct vcpu_config * c)393 static void check_supported(struct vcpu_config *c)
394 {
395 	struct reg_sublist *s;
396 
397 	for_each_sublist(c, s) {
398 		if (s->capability && !kvm_check_cap(s->capability)) {
399 			fprintf(stderr, "%s: %s not available, skipping tests\n", config_name(c), s->name);
400 			exit(KSFT_SKIP);
401 		}
402 	}
403 }
404 
405 static bool print_list;
406 static bool print_filtered;
407 static bool fixup_core_regs;
408 
run_test(struct vcpu_config * c)409 static void run_test(struct vcpu_config *c)
410 {
411 	struct kvm_vcpu_init init = { .target = -1, };
412 	int new_regs = 0, missing_regs = 0, i, n;
413 	int failed_get = 0, failed_set = 0, failed_reject = 0;
414 	struct kvm_vm *vm;
415 	struct reg_sublist *s;
416 
417 	check_supported(c);
418 
419 	vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
420 	prepare_vcpu_init(c, &init);
421 	aarch64_vcpu_add_default(vm, 0, &init, NULL);
422 	finalize_vcpu(vm, 0, c);
423 
424 	reg_list = vcpu_get_reg_list(vm, 0);
425 
426 	if (fixup_core_regs)
427 		core_reg_fixup();
428 
429 	if (print_list || print_filtered) {
430 		putchar('\n');
431 		for_each_reg(i) {
432 			__u64 id = reg_list->reg[i];
433 			if ((print_list && !filter_reg(id)) ||
434 			    (print_filtered && filter_reg(id)))
435 				print_reg(c, id);
436 		}
437 		putchar('\n');
438 		return;
439 	}
440 
441 	/*
442 	 * We only test that we can get the register and then write back the
443 	 * same value. Some registers may allow other values to be written
444 	 * back, but others only allow some bits to be changed, and at least
445 	 * for ID registers set will fail if the value does not exactly match
446 	 * what was returned by get. If registers that allow other values to
447 	 * be written need to have the other values tested, then we should
448 	 * create a new set of tests for those in a new independent test
449 	 * executable.
450 	 */
451 	for_each_reg(i) {
452 		uint8_t addr[2048 / 8];
453 		struct kvm_one_reg reg = {
454 			.id = reg_list->reg[i],
455 			.addr = (__u64)&addr,
456 		};
457 		bool reject_reg = false;
458 		int ret;
459 
460 		ret = _vcpu_ioctl(vm, 0, KVM_GET_ONE_REG, &reg);
461 		if (ret) {
462 			printf("%s: Failed to get ", config_name(c));
463 			print_reg(c, reg.id);
464 			putchar('\n');
465 			++failed_get;
466 		}
467 
468 		/* rejects_set registers are rejected after KVM_ARM_VCPU_FINALIZE */
469 		for_each_sublist(c, s) {
470 			if (s->rejects_set && find_reg(s->rejects_set, s->rejects_set_n, reg.id)) {
471 				reject_reg = true;
472 				ret = _vcpu_ioctl(vm, 0, KVM_SET_ONE_REG, &reg);
473 				if (ret != -1 || errno != EPERM) {
474 					printf("%s: Failed to reject (ret=%d, errno=%d) ", config_name(c), ret, errno);
475 					print_reg(c, reg.id);
476 					putchar('\n');
477 					++failed_reject;
478 				}
479 				break;
480 			}
481 		}
482 
483 		if (!reject_reg) {
484 			ret = _vcpu_ioctl(vm, 0, KVM_SET_ONE_REG, &reg);
485 			if (ret) {
486 				printf("%s: Failed to set ", config_name(c));
487 				print_reg(c, reg.id);
488 				putchar('\n');
489 				++failed_set;
490 			}
491 		}
492 	}
493 
494 	for_each_sublist(c, s)
495 		blessed_n += s->regs_n;
496 	blessed_reg = calloc(blessed_n, sizeof(__u64));
497 
498 	n = 0;
499 	for_each_sublist(c, s) {
500 		for (i = 0; i < s->regs_n; ++i)
501 			blessed_reg[n++] = s->regs[i];
502 	}
503 
504 	for_each_new_reg(i)
505 		++new_regs;
506 
507 	for_each_missing_reg(i)
508 		++missing_regs;
509 
510 	if (new_regs || missing_regs) {
511 		n = 0;
512 		for_each_reg_filtered(i)
513 			++n;
514 
515 		printf("%s: Number blessed registers: %5lld\n", config_name(c), blessed_n);
516 		printf("%s: Number registers:         %5lld (includes %lld filtered registers)\n",
517 		       config_name(c), reg_list->n, reg_list->n - n);
518 	}
519 
520 	if (new_regs) {
521 		printf("\n%s: There are %d new registers.\n"
522 		       "Consider adding them to the blessed reg "
523 		       "list with the following lines:\n\n", config_name(c), new_regs);
524 		for_each_new_reg(i)
525 			print_reg(c, reg_list->reg[i]);
526 		putchar('\n');
527 	}
528 
529 	if (missing_regs) {
530 		printf("\n%s: There are %d missing registers.\n"
531 		       "The following lines are missing registers:\n\n", config_name(c), missing_regs);
532 		for_each_missing_reg(i)
533 			print_reg(c, blessed_reg[i]);
534 		putchar('\n');
535 	}
536 
537 	TEST_ASSERT(!missing_regs && !failed_get && !failed_set && !failed_reject,
538 		    "%s: There are %d missing registers; "
539 		    "%d registers failed get; %d registers failed set; %d registers failed reject",
540 		    config_name(c), missing_regs, failed_get, failed_set, failed_reject);
541 
542 	pr_info("%s: PASS\n", config_name(c));
543 	blessed_n = 0;
544 	free(blessed_reg);
545 	free(reg_list);
546 	kvm_vm_free(vm);
547 }
548 
help(void)549 static void help(void)
550 {
551 	struct vcpu_config *c;
552 	int i;
553 
554 	printf(
555 	"\n"
556 	"usage: get-reg-list [--config=<selection>] [--list] [--list-filtered] [--core-reg-fixup]\n\n"
557 	" --config=<selection>        Used to select a specific vcpu configuration for the test/listing\n"
558 	"                             '<selection>' may be\n");
559 
560 	for (i = 0; i < vcpu_configs_n; ++i) {
561 		c = vcpu_configs[i];
562 		printf(
563 	"                               '%s'\n", config_name(c));
564 	}
565 
566 	printf(
567 	"\n"
568 	" --list                      Print the register list rather than test it (requires --config)\n"
569 	" --list-filtered             Print registers that would normally be filtered out (requires --config)\n"
570 	" --core-reg-fixup            Needed when running on old kernels with broken core reg listings\n"
571 	"\n"
572 	);
573 }
574 
parse_config(const char * config)575 static struct vcpu_config *parse_config(const char *config)
576 {
577 	struct vcpu_config *c;
578 	int i;
579 
580 	if (config[8] != '=')
581 		help(), exit(1);
582 
583 	for (i = 0; i < vcpu_configs_n; ++i) {
584 		c = vcpu_configs[i];
585 		if (strcmp(config_name(c), &config[9]) == 0)
586 			break;
587 	}
588 
589 	if (i == vcpu_configs_n)
590 		help(), exit(1);
591 
592 	return c;
593 }
594 
main(int ac,char ** av)595 int main(int ac, char **av)
596 {
597 	struct vcpu_config *c, *sel = NULL;
598 	int i, ret = 0;
599 	pid_t pid;
600 
601 	for (i = 1; i < ac; ++i) {
602 		if (strcmp(av[i], "--core-reg-fixup") == 0)
603 			fixup_core_regs = true;
604 		else if (strncmp(av[i], "--config", 8) == 0)
605 			sel = parse_config(av[i]);
606 		else if (strcmp(av[i], "--list") == 0)
607 			print_list = true;
608 		else if (strcmp(av[i], "--list-filtered") == 0)
609 			print_filtered = true;
610 		else if (strcmp(av[i], "--help") == 0 || strcmp(av[1], "-h") == 0)
611 			help(), exit(0);
612 		else
613 			help(), exit(1);
614 	}
615 
616 	if (print_list || print_filtered) {
617 		/*
618 		 * We only want to print the register list of a single config.
619 		 */
620 		if (!sel)
621 			help(), exit(1);
622 	}
623 
624 	for (i = 0; i < vcpu_configs_n; ++i) {
625 		c = vcpu_configs[i];
626 		if (sel && c != sel)
627 			continue;
628 
629 		pid = fork();
630 
631 		if (!pid) {
632 			run_test(c);
633 			exit(0);
634 		} else {
635 			int wstatus;
636 			pid_t wpid = wait(&wstatus);
637 			TEST_ASSERT(wpid == pid && WIFEXITED(wstatus), "wait: Unexpected return");
638 			if (WEXITSTATUS(wstatus) && WEXITSTATUS(wstatus) != KSFT_SKIP)
639 				ret = KSFT_FAIL;
640 		}
641 	}
642 
643 	return ret;
644 }
645 
646 /*
647  * The current blessed list was primed with the output of kernel version
648  * v4.15 with --core-reg-fixup and then later updated with new registers.
649  *
650  * The blessed list is up to date with kernel version v5.13-rc3
651  */
652 static __u64 base_regs[] = {
653 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[0]),
654 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[1]),
655 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[2]),
656 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[3]),
657 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[4]),
658 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[5]),
659 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[6]),
660 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[7]),
661 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[8]),
662 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[9]),
663 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[10]),
664 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[11]),
665 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[12]),
666 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[13]),
667 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[14]),
668 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[15]),
669 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[16]),
670 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[17]),
671 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[18]),
672 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[19]),
673 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[20]),
674 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[21]),
675 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[22]),
676 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[23]),
677 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[24]),
678 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[25]),
679 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[26]),
680 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[27]),
681 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[28]),
682 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[29]),
683 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[30]),
684 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.sp),
685 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.pc),
686 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.pstate),
687 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(sp_el1),
688 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(elr_el1),
689 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[0]),
690 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[1]),
691 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[2]),
692 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[3]),
693 	KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[4]),
694 	KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpsr),
695 	KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpcr),
696 	KVM_REG_ARM_FW_REG(0),		/* KVM_REG_ARM_PSCI_VERSION */
697 	KVM_REG_ARM_FW_REG(1),		/* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 */
698 	KVM_REG_ARM_FW_REG(2),		/* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 */
699 	KVM_REG_ARM_FW_REG(3),		/* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3 */
700 	KVM_REG_ARM_FW_FEAT_BMAP_REG(0),	/* KVM_REG_ARM_STD_BMAP */
701 	KVM_REG_ARM_FW_FEAT_BMAP_REG(1),	/* KVM_REG_ARM_STD_HYP_BMAP */
702 	KVM_REG_ARM_FW_FEAT_BMAP_REG(2),	/* KVM_REG_ARM_VENDOR_HYP_BMAP */
703 	ARM64_SYS_REG(3, 3, 14, 3, 1),	/* CNTV_CTL_EL0 */
704 	ARM64_SYS_REG(3, 3, 14, 3, 2),	/* CNTV_CVAL_EL0 */
705 	ARM64_SYS_REG(3, 3, 14, 0, 2),
706 	ARM64_SYS_REG(3, 0, 0, 0, 0),	/* MIDR_EL1 */
707 	ARM64_SYS_REG(3, 0, 0, 0, 6),	/* REVIDR_EL1 */
708 	ARM64_SYS_REG(3, 1, 0, 0, 1),	/* CLIDR_EL1 */
709 	ARM64_SYS_REG(3, 1, 0, 0, 7),	/* AIDR_EL1 */
710 	ARM64_SYS_REG(3, 3, 0, 0, 1),	/* CTR_EL0 */
711 	ARM64_SYS_REG(2, 0, 0, 0, 4),
712 	ARM64_SYS_REG(2, 0, 0, 0, 5),
713 	ARM64_SYS_REG(2, 0, 0, 0, 6),
714 	ARM64_SYS_REG(2, 0, 0, 0, 7),
715 	ARM64_SYS_REG(2, 0, 0, 1, 4),
716 	ARM64_SYS_REG(2, 0, 0, 1, 5),
717 	ARM64_SYS_REG(2, 0, 0, 1, 6),
718 	ARM64_SYS_REG(2, 0, 0, 1, 7),
719 	ARM64_SYS_REG(2, 0, 0, 2, 0),	/* MDCCINT_EL1 */
720 	ARM64_SYS_REG(2, 0, 0, 2, 2),	/* MDSCR_EL1 */
721 	ARM64_SYS_REG(2, 0, 0, 2, 4),
722 	ARM64_SYS_REG(2, 0, 0, 2, 5),
723 	ARM64_SYS_REG(2, 0, 0, 2, 6),
724 	ARM64_SYS_REG(2, 0, 0, 2, 7),
725 	ARM64_SYS_REG(2, 0, 0, 3, 4),
726 	ARM64_SYS_REG(2, 0, 0, 3, 5),
727 	ARM64_SYS_REG(2, 0, 0, 3, 6),
728 	ARM64_SYS_REG(2, 0, 0, 3, 7),
729 	ARM64_SYS_REG(2, 0, 0, 4, 4),
730 	ARM64_SYS_REG(2, 0, 0, 4, 5),
731 	ARM64_SYS_REG(2, 0, 0, 4, 6),
732 	ARM64_SYS_REG(2, 0, 0, 4, 7),
733 	ARM64_SYS_REG(2, 0, 0, 5, 4),
734 	ARM64_SYS_REG(2, 0, 0, 5, 5),
735 	ARM64_SYS_REG(2, 0, 0, 5, 6),
736 	ARM64_SYS_REG(2, 0, 0, 5, 7),
737 	ARM64_SYS_REG(2, 0, 0, 6, 4),
738 	ARM64_SYS_REG(2, 0, 0, 6, 5),
739 	ARM64_SYS_REG(2, 0, 0, 6, 6),
740 	ARM64_SYS_REG(2, 0, 0, 6, 7),
741 	ARM64_SYS_REG(2, 0, 0, 7, 4),
742 	ARM64_SYS_REG(2, 0, 0, 7, 5),
743 	ARM64_SYS_REG(2, 0, 0, 7, 6),
744 	ARM64_SYS_REG(2, 0, 0, 7, 7),
745 	ARM64_SYS_REG(2, 0, 0, 8, 4),
746 	ARM64_SYS_REG(2, 0, 0, 8, 5),
747 	ARM64_SYS_REG(2, 0, 0, 8, 6),
748 	ARM64_SYS_REG(2, 0, 0, 8, 7),
749 	ARM64_SYS_REG(2, 0, 0, 9, 4),
750 	ARM64_SYS_REG(2, 0, 0, 9, 5),
751 	ARM64_SYS_REG(2, 0, 0, 9, 6),
752 	ARM64_SYS_REG(2, 0, 0, 9, 7),
753 	ARM64_SYS_REG(2, 0, 0, 10, 4),
754 	ARM64_SYS_REG(2, 0, 0, 10, 5),
755 	ARM64_SYS_REG(2, 0, 0, 10, 6),
756 	ARM64_SYS_REG(2, 0, 0, 10, 7),
757 	ARM64_SYS_REG(2, 0, 0, 11, 4),
758 	ARM64_SYS_REG(2, 0, 0, 11, 5),
759 	ARM64_SYS_REG(2, 0, 0, 11, 6),
760 	ARM64_SYS_REG(2, 0, 0, 11, 7),
761 	ARM64_SYS_REG(2, 0, 0, 12, 4),
762 	ARM64_SYS_REG(2, 0, 0, 12, 5),
763 	ARM64_SYS_REG(2, 0, 0, 12, 6),
764 	ARM64_SYS_REG(2, 0, 0, 12, 7),
765 	ARM64_SYS_REG(2, 0, 0, 13, 4),
766 	ARM64_SYS_REG(2, 0, 0, 13, 5),
767 	ARM64_SYS_REG(2, 0, 0, 13, 6),
768 	ARM64_SYS_REG(2, 0, 0, 13, 7),
769 	ARM64_SYS_REG(2, 0, 0, 14, 4),
770 	ARM64_SYS_REG(2, 0, 0, 14, 5),
771 	ARM64_SYS_REG(2, 0, 0, 14, 6),
772 	ARM64_SYS_REG(2, 0, 0, 14, 7),
773 	ARM64_SYS_REG(2, 0, 0, 15, 4),
774 	ARM64_SYS_REG(2, 0, 0, 15, 5),
775 	ARM64_SYS_REG(2, 0, 0, 15, 6),
776 	ARM64_SYS_REG(2, 0, 0, 15, 7),
777 	ARM64_SYS_REG(2, 0, 1, 1, 4),	/* OSLSR_EL1 */
778 	ARM64_SYS_REG(2, 4, 0, 7, 0),	/* DBGVCR32_EL2 */
779 	ARM64_SYS_REG(3, 0, 0, 0, 5),	/* MPIDR_EL1 */
780 	ARM64_SYS_REG(3, 0, 0, 1, 0),	/* ID_PFR0_EL1 */
781 	ARM64_SYS_REG(3, 0, 0, 1, 1),	/* ID_PFR1_EL1 */
782 	ARM64_SYS_REG(3, 0, 0, 1, 2),	/* ID_DFR0_EL1 */
783 	ARM64_SYS_REG(3, 0, 0, 1, 3),	/* ID_AFR0_EL1 */
784 	ARM64_SYS_REG(3, 0, 0, 1, 4),	/* ID_MMFR0_EL1 */
785 	ARM64_SYS_REG(3, 0, 0, 1, 5),	/* ID_MMFR1_EL1 */
786 	ARM64_SYS_REG(3, 0, 0, 1, 6),	/* ID_MMFR2_EL1 */
787 	ARM64_SYS_REG(3, 0, 0, 1, 7),	/* ID_MMFR3_EL1 */
788 	ARM64_SYS_REG(3, 0, 0, 2, 0),	/* ID_ISAR0_EL1 */
789 	ARM64_SYS_REG(3, 0, 0, 2, 1),	/* ID_ISAR1_EL1 */
790 	ARM64_SYS_REG(3, 0, 0, 2, 2),	/* ID_ISAR2_EL1 */
791 	ARM64_SYS_REG(3, 0, 0, 2, 3),	/* ID_ISAR3_EL1 */
792 	ARM64_SYS_REG(3, 0, 0, 2, 4),	/* ID_ISAR4_EL1 */
793 	ARM64_SYS_REG(3, 0, 0, 2, 5),	/* ID_ISAR5_EL1 */
794 	ARM64_SYS_REG(3, 0, 0, 2, 6),	/* ID_MMFR4_EL1 */
795 	ARM64_SYS_REG(3, 0, 0, 2, 7),	/* ID_ISAR6_EL1 */
796 	ARM64_SYS_REG(3, 0, 0, 3, 0),	/* MVFR0_EL1 */
797 	ARM64_SYS_REG(3, 0, 0, 3, 1),	/* MVFR1_EL1 */
798 	ARM64_SYS_REG(3, 0, 0, 3, 2),	/* MVFR2_EL1 */
799 	ARM64_SYS_REG(3, 0, 0, 3, 3),
800 	ARM64_SYS_REG(3, 0, 0, 3, 4),	/* ID_PFR2_EL1 */
801 	ARM64_SYS_REG(3, 0, 0, 3, 5),	/* ID_DFR1_EL1 */
802 	ARM64_SYS_REG(3, 0, 0, 3, 6),	/* ID_MMFR5_EL1 */
803 	ARM64_SYS_REG(3, 0, 0, 3, 7),
804 	ARM64_SYS_REG(3, 0, 0, 4, 0),	/* ID_AA64PFR0_EL1 */
805 	ARM64_SYS_REG(3, 0, 0, 4, 1),	/* ID_AA64PFR1_EL1 */
806 	ARM64_SYS_REG(3, 0, 0, 4, 2),
807 	ARM64_SYS_REG(3, 0, 0, 4, 3),
808 	ARM64_SYS_REG(3, 0, 0, 4, 4),	/* ID_AA64ZFR0_EL1 */
809 	ARM64_SYS_REG(3, 0, 0, 4, 5),
810 	ARM64_SYS_REG(3, 0, 0, 4, 6),
811 	ARM64_SYS_REG(3, 0, 0, 4, 7),
812 	ARM64_SYS_REG(3, 0, 0, 5, 0),	/* ID_AA64DFR0_EL1 */
813 	ARM64_SYS_REG(3, 0, 0, 5, 1),	/* ID_AA64DFR1_EL1 */
814 	ARM64_SYS_REG(3, 0, 0, 5, 2),
815 	ARM64_SYS_REG(3, 0, 0, 5, 3),
816 	ARM64_SYS_REG(3, 0, 0, 5, 4),	/* ID_AA64AFR0_EL1 */
817 	ARM64_SYS_REG(3, 0, 0, 5, 5),	/* ID_AA64AFR1_EL1 */
818 	ARM64_SYS_REG(3, 0, 0, 5, 6),
819 	ARM64_SYS_REG(3, 0, 0, 5, 7),
820 	ARM64_SYS_REG(3, 0, 0, 6, 0),	/* ID_AA64ISAR0_EL1 */
821 	ARM64_SYS_REG(3, 0, 0, 6, 1),	/* ID_AA64ISAR1_EL1 */
822 	ARM64_SYS_REG(3, 0, 0, 6, 2),
823 	ARM64_SYS_REG(3, 0, 0, 6, 3),
824 	ARM64_SYS_REG(3, 0, 0, 6, 4),
825 	ARM64_SYS_REG(3, 0, 0, 6, 5),
826 	ARM64_SYS_REG(3, 0, 0, 6, 6),
827 	ARM64_SYS_REG(3, 0, 0, 6, 7),
828 	ARM64_SYS_REG(3, 0, 0, 7, 0),	/* ID_AA64MMFR0_EL1 */
829 	ARM64_SYS_REG(3, 0, 0, 7, 1),	/* ID_AA64MMFR1_EL1 */
830 	ARM64_SYS_REG(3, 0, 0, 7, 2),	/* ID_AA64MMFR2_EL1 */
831 	ARM64_SYS_REG(3, 0, 0, 7, 3),
832 	ARM64_SYS_REG(3, 0, 0, 7, 4),
833 	ARM64_SYS_REG(3, 0, 0, 7, 5),
834 	ARM64_SYS_REG(3, 0, 0, 7, 6),
835 	ARM64_SYS_REG(3, 0, 0, 7, 7),
836 	ARM64_SYS_REG(3, 0, 1, 0, 0),	/* SCTLR_EL1 */
837 	ARM64_SYS_REG(3, 0, 1, 0, 1),	/* ACTLR_EL1 */
838 	ARM64_SYS_REG(3, 0, 1, 0, 2),	/* CPACR_EL1 */
839 	ARM64_SYS_REG(3, 0, 2, 0, 0),	/* TTBR0_EL1 */
840 	ARM64_SYS_REG(3, 0, 2, 0, 1),	/* TTBR1_EL1 */
841 	ARM64_SYS_REG(3, 0, 2, 0, 2),	/* TCR_EL1 */
842 	ARM64_SYS_REG(3, 0, 5, 1, 0),	/* AFSR0_EL1 */
843 	ARM64_SYS_REG(3, 0, 5, 1, 1),	/* AFSR1_EL1 */
844 	ARM64_SYS_REG(3, 0, 5, 2, 0),	/* ESR_EL1 */
845 	ARM64_SYS_REG(3, 0, 6, 0, 0),	/* FAR_EL1 */
846 	ARM64_SYS_REG(3, 0, 7, 4, 0),	/* PAR_EL1 */
847 	ARM64_SYS_REG(3, 0, 10, 2, 0),	/* MAIR_EL1 */
848 	ARM64_SYS_REG(3, 0, 10, 3, 0),	/* AMAIR_EL1 */
849 	ARM64_SYS_REG(3, 0, 12, 0, 0),	/* VBAR_EL1 */
850 	ARM64_SYS_REG(3, 0, 12, 1, 1),	/* DISR_EL1 */
851 	ARM64_SYS_REG(3, 0, 13, 0, 1),	/* CONTEXTIDR_EL1 */
852 	ARM64_SYS_REG(3, 0, 13, 0, 4),	/* TPIDR_EL1 */
853 	ARM64_SYS_REG(3, 0, 14, 1, 0),	/* CNTKCTL_EL1 */
854 	ARM64_SYS_REG(3, 2, 0, 0, 0),	/* CSSELR_EL1 */
855 	ARM64_SYS_REG(3, 3, 13, 0, 2),	/* TPIDR_EL0 */
856 	ARM64_SYS_REG(3, 3, 13, 0, 3),	/* TPIDRRO_EL0 */
857 	ARM64_SYS_REG(3, 4, 3, 0, 0),	/* DACR32_EL2 */
858 	ARM64_SYS_REG(3, 4, 5, 0, 1),	/* IFSR32_EL2 */
859 	ARM64_SYS_REG(3, 4, 5, 3, 0),	/* FPEXC32_EL2 */
860 };
861 
862 static __u64 pmu_regs[] = {
863 	ARM64_SYS_REG(3, 0, 9, 14, 1),	/* PMINTENSET_EL1 */
864 	ARM64_SYS_REG(3, 0, 9, 14, 2),	/* PMINTENCLR_EL1 */
865 	ARM64_SYS_REG(3, 3, 9, 12, 0),	/* PMCR_EL0 */
866 	ARM64_SYS_REG(3, 3, 9, 12, 1),	/* PMCNTENSET_EL0 */
867 	ARM64_SYS_REG(3, 3, 9, 12, 2),	/* PMCNTENCLR_EL0 */
868 	ARM64_SYS_REG(3, 3, 9, 12, 3),	/* PMOVSCLR_EL0 */
869 	ARM64_SYS_REG(3, 3, 9, 12, 4),	/* PMSWINC_EL0 */
870 	ARM64_SYS_REG(3, 3, 9, 12, 5),	/* PMSELR_EL0 */
871 	ARM64_SYS_REG(3, 3, 9, 13, 0),	/* PMCCNTR_EL0 */
872 	ARM64_SYS_REG(3, 3, 9, 14, 0),	/* PMUSERENR_EL0 */
873 	ARM64_SYS_REG(3, 3, 9, 14, 3),	/* PMOVSSET_EL0 */
874 	ARM64_SYS_REG(3, 3, 14, 8, 0),
875 	ARM64_SYS_REG(3, 3, 14, 8, 1),
876 	ARM64_SYS_REG(3, 3, 14, 8, 2),
877 	ARM64_SYS_REG(3, 3, 14, 8, 3),
878 	ARM64_SYS_REG(3, 3, 14, 8, 4),
879 	ARM64_SYS_REG(3, 3, 14, 8, 5),
880 	ARM64_SYS_REG(3, 3, 14, 8, 6),
881 	ARM64_SYS_REG(3, 3, 14, 8, 7),
882 	ARM64_SYS_REG(3, 3, 14, 9, 0),
883 	ARM64_SYS_REG(3, 3, 14, 9, 1),
884 	ARM64_SYS_REG(3, 3, 14, 9, 2),
885 	ARM64_SYS_REG(3, 3, 14, 9, 3),
886 	ARM64_SYS_REG(3, 3, 14, 9, 4),
887 	ARM64_SYS_REG(3, 3, 14, 9, 5),
888 	ARM64_SYS_REG(3, 3, 14, 9, 6),
889 	ARM64_SYS_REG(3, 3, 14, 9, 7),
890 	ARM64_SYS_REG(3, 3, 14, 10, 0),
891 	ARM64_SYS_REG(3, 3, 14, 10, 1),
892 	ARM64_SYS_REG(3, 3, 14, 10, 2),
893 	ARM64_SYS_REG(3, 3, 14, 10, 3),
894 	ARM64_SYS_REG(3, 3, 14, 10, 4),
895 	ARM64_SYS_REG(3, 3, 14, 10, 5),
896 	ARM64_SYS_REG(3, 3, 14, 10, 6),
897 	ARM64_SYS_REG(3, 3, 14, 10, 7),
898 	ARM64_SYS_REG(3, 3, 14, 11, 0),
899 	ARM64_SYS_REG(3, 3, 14, 11, 1),
900 	ARM64_SYS_REG(3, 3, 14, 11, 2),
901 	ARM64_SYS_REG(3, 3, 14, 11, 3),
902 	ARM64_SYS_REG(3, 3, 14, 11, 4),
903 	ARM64_SYS_REG(3, 3, 14, 11, 5),
904 	ARM64_SYS_REG(3, 3, 14, 11, 6),
905 	ARM64_SYS_REG(3, 3, 14, 12, 0),
906 	ARM64_SYS_REG(3, 3, 14, 12, 1),
907 	ARM64_SYS_REG(3, 3, 14, 12, 2),
908 	ARM64_SYS_REG(3, 3, 14, 12, 3),
909 	ARM64_SYS_REG(3, 3, 14, 12, 4),
910 	ARM64_SYS_REG(3, 3, 14, 12, 5),
911 	ARM64_SYS_REG(3, 3, 14, 12, 6),
912 	ARM64_SYS_REG(3, 3, 14, 12, 7),
913 	ARM64_SYS_REG(3, 3, 14, 13, 0),
914 	ARM64_SYS_REG(3, 3, 14, 13, 1),
915 	ARM64_SYS_REG(3, 3, 14, 13, 2),
916 	ARM64_SYS_REG(3, 3, 14, 13, 3),
917 	ARM64_SYS_REG(3, 3, 14, 13, 4),
918 	ARM64_SYS_REG(3, 3, 14, 13, 5),
919 	ARM64_SYS_REG(3, 3, 14, 13, 6),
920 	ARM64_SYS_REG(3, 3, 14, 13, 7),
921 	ARM64_SYS_REG(3, 3, 14, 14, 0),
922 	ARM64_SYS_REG(3, 3, 14, 14, 1),
923 	ARM64_SYS_REG(3, 3, 14, 14, 2),
924 	ARM64_SYS_REG(3, 3, 14, 14, 3),
925 	ARM64_SYS_REG(3, 3, 14, 14, 4),
926 	ARM64_SYS_REG(3, 3, 14, 14, 5),
927 	ARM64_SYS_REG(3, 3, 14, 14, 6),
928 	ARM64_SYS_REG(3, 3, 14, 14, 7),
929 	ARM64_SYS_REG(3, 3, 14, 15, 0),
930 	ARM64_SYS_REG(3, 3, 14, 15, 1),
931 	ARM64_SYS_REG(3, 3, 14, 15, 2),
932 	ARM64_SYS_REG(3, 3, 14, 15, 3),
933 	ARM64_SYS_REG(3, 3, 14, 15, 4),
934 	ARM64_SYS_REG(3, 3, 14, 15, 5),
935 	ARM64_SYS_REG(3, 3, 14, 15, 6),
936 	ARM64_SYS_REG(3, 3, 14, 15, 7),	/* PMCCFILTR_EL0 */
937 };
938 
939 static __u64 vregs[] = {
940 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]),
941 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[1]),
942 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[2]),
943 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[3]),
944 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[4]),
945 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[5]),
946 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[6]),
947 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[7]),
948 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[8]),
949 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[9]),
950 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[10]),
951 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[11]),
952 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[12]),
953 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[13]),
954 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[14]),
955 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[15]),
956 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[16]),
957 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[17]),
958 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[18]),
959 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[19]),
960 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[20]),
961 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[21]),
962 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[22]),
963 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[23]),
964 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[24]),
965 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[25]),
966 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[26]),
967 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[27]),
968 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[28]),
969 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[29]),
970 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[30]),
971 	KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]),
972 };
973 
974 static __u64 sve_regs[] = {
975 	KVM_REG_ARM64_SVE_VLS,
976 	KVM_REG_ARM64_SVE_ZREG(0, 0),
977 	KVM_REG_ARM64_SVE_ZREG(1, 0),
978 	KVM_REG_ARM64_SVE_ZREG(2, 0),
979 	KVM_REG_ARM64_SVE_ZREG(3, 0),
980 	KVM_REG_ARM64_SVE_ZREG(4, 0),
981 	KVM_REG_ARM64_SVE_ZREG(5, 0),
982 	KVM_REG_ARM64_SVE_ZREG(6, 0),
983 	KVM_REG_ARM64_SVE_ZREG(7, 0),
984 	KVM_REG_ARM64_SVE_ZREG(8, 0),
985 	KVM_REG_ARM64_SVE_ZREG(9, 0),
986 	KVM_REG_ARM64_SVE_ZREG(10, 0),
987 	KVM_REG_ARM64_SVE_ZREG(11, 0),
988 	KVM_REG_ARM64_SVE_ZREG(12, 0),
989 	KVM_REG_ARM64_SVE_ZREG(13, 0),
990 	KVM_REG_ARM64_SVE_ZREG(14, 0),
991 	KVM_REG_ARM64_SVE_ZREG(15, 0),
992 	KVM_REG_ARM64_SVE_ZREG(16, 0),
993 	KVM_REG_ARM64_SVE_ZREG(17, 0),
994 	KVM_REG_ARM64_SVE_ZREG(18, 0),
995 	KVM_REG_ARM64_SVE_ZREG(19, 0),
996 	KVM_REG_ARM64_SVE_ZREG(20, 0),
997 	KVM_REG_ARM64_SVE_ZREG(21, 0),
998 	KVM_REG_ARM64_SVE_ZREG(22, 0),
999 	KVM_REG_ARM64_SVE_ZREG(23, 0),
1000 	KVM_REG_ARM64_SVE_ZREG(24, 0),
1001 	KVM_REG_ARM64_SVE_ZREG(25, 0),
1002 	KVM_REG_ARM64_SVE_ZREG(26, 0),
1003 	KVM_REG_ARM64_SVE_ZREG(27, 0),
1004 	KVM_REG_ARM64_SVE_ZREG(28, 0),
1005 	KVM_REG_ARM64_SVE_ZREG(29, 0),
1006 	KVM_REG_ARM64_SVE_ZREG(30, 0),
1007 	KVM_REG_ARM64_SVE_ZREG(31, 0),
1008 	KVM_REG_ARM64_SVE_PREG(0, 0),
1009 	KVM_REG_ARM64_SVE_PREG(1, 0),
1010 	KVM_REG_ARM64_SVE_PREG(2, 0),
1011 	KVM_REG_ARM64_SVE_PREG(3, 0),
1012 	KVM_REG_ARM64_SVE_PREG(4, 0),
1013 	KVM_REG_ARM64_SVE_PREG(5, 0),
1014 	KVM_REG_ARM64_SVE_PREG(6, 0),
1015 	KVM_REG_ARM64_SVE_PREG(7, 0),
1016 	KVM_REG_ARM64_SVE_PREG(8, 0),
1017 	KVM_REG_ARM64_SVE_PREG(9, 0),
1018 	KVM_REG_ARM64_SVE_PREG(10, 0),
1019 	KVM_REG_ARM64_SVE_PREG(11, 0),
1020 	KVM_REG_ARM64_SVE_PREG(12, 0),
1021 	KVM_REG_ARM64_SVE_PREG(13, 0),
1022 	KVM_REG_ARM64_SVE_PREG(14, 0),
1023 	KVM_REG_ARM64_SVE_PREG(15, 0),
1024 	KVM_REG_ARM64_SVE_FFR(0),
1025 	ARM64_SYS_REG(3, 0, 1, 2, 0),   /* ZCR_EL1 */
1026 };
1027 
1028 static __u64 sve_rejects_set[] = {
1029 	KVM_REG_ARM64_SVE_VLS,
1030 };
1031 
1032 static __u64 pauth_addr_regs[] = {
1033 	ARM64_SYS_REG(3, 0, 2, 1, 0),	/* APIAKEYLO_EL1 */
1034 	ARM64_SYS_REG(3, 0, 2, 1, 1),	/* APIAKEYHI_EL1 */
1035 	ARM64_SYS_REG(3, 0, 2, 1, 2),	/* APIBKEYLO_EL1 */
1036 	ARM64_SYS_REG(3, 0, 2, 1, 3),	/* APIBKEYHI_EL1 */
1037 	ARM64_SYS_REG(3, 0, 2, 2, 0),	/* APDAKEYLO_EL1 */
1038 	ARM64_SYS_REG(3, 0, 2, 2, 1),	/* APDAKEYHI_EL1 */
1039 	ARM64_SYS_REG(3, 0, 2, 2, 2),	/* APDBKEYLO_EL1 */
1040 	ARM64_SYS_REG(3, 0, 2, 2, 3)	/* APDBKEYHI_EL1 */
1041 };
1042 
1043 static __u64 pauth_generic_regs[] = {
1044 	ARM64_SYS_REG(3, 0, 2, 3, 0),	/* APGAKEYLO_EL1 */
1045 	ARM64_SYS_REG(3, 0, 2, 3, 1),	/* APGAKEYHI_EL1 */
1046 };
1047 
1048 #define BASE_SUBLIST \
1049 	{ "base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), }
1050 #define VREGS_SUBLIST \
1051 	{ "vregs", .regs = vregs, .regs_n = ARRAY_SIZE(vregs), }
1052 #define PMU_SUBLIST \
1053 	{ "pmu", .capability = KVM_CAP_ARM_PMU_V3, .feature = KVM_ARM_VCPU_PMU_V3, \
1054 	  .regs = pmu_regs, .regs_n = ARRAY_SIZE(pmu_regs), }
1055 #define SVE_SUBLIST \
1056 	{ "sve", .capability = KVM_CAP_ARM_SVE, .feature = KVM_ARM_VCPU_SVE, .finalize = true, \
1057 	  .regs = sve_regs, .regs_n = ARRAY_SIZE(sve_regs), \
1058 	  .rejects_set = sve_rejects_set, .rejects_set_n = ARRAY_SIZE(sve_rejects_set), }
1059 #define PAUTH_SUBLIST							\
1060 	{								\
1061 		.name 		= "pauth_address",			\
1062 		.capability	= KVM_CAP_ARM_PTRAUTH_ADDRESS,		\
1063 		.feature	= KVM_ARM_VCPU_PTRAUTH_ADDRESS,		\
1064 		.regs		= pauth_addr_regs,			\
1065 		.regs_n		= ARRAY_SIZE(pauth_addr_regs),		\
1066 	},								\
1067 	{								\
1068 		.name 		= "pauth_generic",			\
1069 		.capability	= KVM_CAP_ARM_PTRAUTH_GENERIC,		\
1070 		.feature	= KVM_ARM_VCPU_PTRAUTH_GENERIC,		\
1071 		.regs		= pauth_generic_regs,			\
1072 		.regs_n		= ARRAY_SIZE(pauth_generic_regs),	\
1073 	}
1074 
1075 static struct vcpu_config vregs_config = {
1076 	.sublists = {
1077 	BASE_SUBLIST,
1078 	VREGS_SUBLIST,
1079 	{0},
1080 	},
1081 };
1082 static struct vcpu_config vregs_pmu_config = {
1083 	.sublists = {
1084 	BASE_SUBLIST,
1085 	VREGS_SUBLIST,
1086 	PMU_SUBLIST,
1087 	{0},
1088 	},
1089 };
1090 static struct vcpu_config sve_config = {
1091 	.sublists = {
1092 	BASE_SUBLIST,
1093 	SVE_SUBLIST,
1094 	{0},
1095 	},
1096 };
1097 static struct vcpu_config sve_pmu_config = {
1098 	.sublists = {
1099 	BASE_SUBLIST,
1100 	SVE_SUBLIST,
1101 	PMU_SUBLIST,
1102 	{0},
1103 	},
1104 };
1105 static struct vcpu_config pauth_config = {
1106 	.sublists = {
1107 	BASE_SUBLIST,
1108 	VREGS_SUBLIST,
1109 	PAUTH_SUBLIST,
1110 	{0},
1111 	},
1112 };
1113 static struct vcpu_config pauth_pmu_config = {
1114 	.sublists = {
1115 	BASE_SUBLIST,
1116 	VREGS_SUBLIST,
1117 	PAUTH_SUBLIST,
1118 	PMU_SUBLIST,
1119 	{0},
1120 	},
1121 };
1122 
1123 static struct vcpu_config *vcpu_configs[] = {
1124 	&vregs_config,
1125 	&vregs_pmu_config,
1126 	&sve_config,
1127 	&sve_pmu_config,
1128 	&pauth_config,
1129 	&pauth_pmu_config,
1130 };
1131 static int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
1132