1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/kvm/coproc.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Authors: Rusty Russell <rusty@rustcorp.com.au>
9 * Christoffer Dall <c.dall@virtualopensystems.com>
10 */
11
12 #include <linux/bitfield.h>
13 #include <linux/bsearch.h>
14 #include <linux/kvm_host.h>
15 #include <linux/mm.h>
16 #include <linux/printk.h>
17 #include <linux/uaccess.h>
18
19 #include <asm/cacheflush.h>
20 #include <asm/cputype.h>
21 #include <asm/debug-monitors.h>
22 #include <asm/esr.h>
23 #include <asm/kvm_arm.h>
24 #include <asm/kvm_emulate.h>
25 #include <asm/kvm_hyp.h>
26 #include <asm/kvm_mmu.h>
27 #include <asm/perf_event.h>
28 #include <asm/sysreg.h>
29
30 #include <trace/events/kvm.h>
31
32 #include "sys_regs.h"
33
34 #include "trace.h"
35
36 /*
37 * All of this file is extremely similar to the ARM coproc.c, but the
38 * types are different. My gut feeling is that it should be pretty
39 * easy to merge, but that would be an ABI breakage -- again. VFP
40 * would also need to be abstracted.
41 *
42 * For AArch32, we only take care of what is being trapped. Anything
43 * that has to do with init and userspace access has to go via the
44 * 64bit interface.
45 */
46
47 static int reg_from_user(u64 *val, const void __user *uaddr, u64 id);
48 static int reg_to_user(void __user *uaddr, const u64 *val, u64 id);
49 static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
50
read_from_write_only(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * r)51 static bool read_from_write_only(struct kvm_vcpu *vcpu,
52 struct sys_reg_params *params,
53 const struct sys_reg_desc *r)
54 {
55 WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
56 print_sys_reg_instr(params);
57 kvm_inject_undefined(vcpu);
58 return false;
59 }
60
write_to_read_only(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * r)61 static bool write_to_read_only(struct kvm_vcpu *vcpu,
62 struct sys_reg_params *params,
63 const struct sys_reg_desc *r)
64 {
65 WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
66 print_sys_reg_instr(params);
67 kvm_inject_undefined(vcpu);
68 return false;
69 }
70
vcpu_read_sys_reg(const struct kvm_vcpu * vcpu,int reg)71 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
72 {
73 u64 val = 0x8badf00d8badf00d;
74
75 if (vcpu->arch.sysregs_loaded_on_cpu &&
76 __vcpu_read_sys_reg_from_cpu(reg, &val))
77 return val;
78
79 return __vcpu_sys_reg(vcpu, reg);
80 }
81
vcpu_write_sys_reg(struct kvm_vcpu * vcpu,u64 val,int reg)82 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
83 {
84 if (vcpu->arch.sysregs_loaded_on_cpu &&
85 __vcpu_write_sys_reg_to_cpu(val, reg))
86 return;
87
88 __vcpu_sys_reg(vcpu, reg) = val;
89 }
90
91 /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
92 static u32 cache_levels;
93
94 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
95 #define CSSELR_MAX 14
96
97 /* Which cache CCSIDR represents depends on CSSELR value. */
get_ccsidr(u32 csselr)98 static u32 get_ccsidr(u32 csselr)
99 {
100 u32 ccsidr;
101
102 /* Make sure noone else changes CSSELR during this! */
103 local_irq_disable();
104 write_sysreg(csselr, csselr_el1);
105 isb();
106 ccsidr = read_sysreg(ccsidr_el1);
107 local_irq_enable();
108
109 return ccsidr;
110 }
111
112 /*
113 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
114 */
access_dcsw(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)115 static bool access_dcsw(struct kvm_vcpu *vcpu,
116 struct sys_reg_params *p,
117 const struct sys_reg_desc *r)
118 {
119 if (!p->is_write)
120 return read_from_write_only(vcpu, p, r);
121
122 /*
123 * Only track S/W ops if we don't have FWB. It still indicates
124 * that the guest is a bit broken (S/W operations should only
125 * be done by firmware, knowing that there is only a single
126 * CPU left in the system, and certainly not from non-secure
127 * software).
128 */
129 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
130 kvm_set_way_flush(vcpu);
131
132 return true;
133 }
134
get_access_mask(const struct sys_reg_desc * r,u64 * mask,u64 * shift)135 static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift)
136 {
137 switch (r->aarch32_map) {
138 case AA32_LO:
139 *mask = GENMASK_ULL(31, 0);
140 *shift = 0;
141 break;
142 case AA32_HI:
143 *mask = GENMASK_ULL(63, 32);
144 *shift = 32;
145 break;
146 default:
147 *mask = GENMASK_ULL(63, 0);
148 *shift = 0;
149 break;
150 }
151 }
152
153 /*
154 * Generic accessor for VM registers. Only called as long as HCR_TVM
155 * is set. If the guest enables the MMU, we stop trapping the VM
156 * sys_regs and leave it in complete control of the caches.
157 */
access_vm_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)158 static bool access_vm_reg(struct kvm_vcpu *vcpu,
159 struct sys_reg_params *p,
160 const struct sys_reg_desc *r)
161 {
162 bool was_enabled = vcpu_has_cache_enabled(vcpu);
163 u64 val, mask, shift;
164
165 BUG_ON(!p->is_write);
166
167 get_access_mask(r, &mask, &shift);
168
169 if (~mask) {
170 val = vcpu_read_sys_reg(vcpu, r->reg);
171 val &= ~mask;
172 } else {
173 val = 0;
174 }
175
176 val |= (p->regval & (mask >> shift)) << shift;
177 vcpu_write_sys_reg(vcpu, val, r->reg);
178
179 kvm_toggle_cache(vcpu, was_enabled);
180 return true;
181 }
182
access_actlr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)183 static bool access_actlr(struct kvm_vcpu *vcpu,
184 struct sys_reg_params *p,
185 const struct sys_reg_desc *r)
186 {
187 u64 mask, shift;
188
189 if (p->is_write)
190 return ignore_write(vcpu, p);
191
192 get_access_mask(r, &mask, &shift);
193 p->regval = (vcpu_read_sys_reg(vcpu, r->reg) & mask) >> shift;
194
195 return true;
196 }
197
198 /*
199 * Trap handler for the GICv3 SGI generation system register.
200 * Forward the request to the VGIC emulation.
201 * The cp15_64 code makes sure this automatically works
202 * for both AArch64 and AArch32 accesses.
203 */
access_gic_sgi(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)204 static bool access_gic_sgi(struct kvm_vcpu *vcpu,
205 struct sys_reg_params *p,
206 const struct sys_reg_desc *r)
207 {
208 bool g1;
209
210 if (!p->is_write)
211 return read_from_write_only(vcpu, p, r);
212
213 /*
214 * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
215 * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
216 * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
217 * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
218 * group.
219 */
220 if (p->Op0 == 0) { /* AArch32 */
221 switch (p->Op1) {
222 default: /* Keep GCC quiet */
223 case 0: /* ICC_SGI1R */
224 g1 = true;
225 break;
226 case 1: /* ICC_ASGI1R */
227 case 2: /* ICC_SGI0R */
228 g1 = false;
229 break;
230 }
231 } else { /* AArch64 */
232 switch (p->Op2) {
233 default: /* Keep GCC quiet */
234 case 5: /* ICC_SGI1R_EL1 */
235 g1 = true;
236 break;
237 case 6: /* ICC_ASGI1R_EL1 */
238 case 7: /* ICC_SGI0R_EL1 */
239 g1 = false;
240 break;
241 }
242 }
243
244 vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
245
246 return true;
247 }
248
access_gic_sre(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)249 static bool access_gic_sre(struct kvm_vcpu *vcpu,
250 struct sys_reg_params *p,
251 const struct sys_reg_desc *r)
252 {
253 if (p->is_write)
254 return ignore_write(vcpu, p);
255
256 p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
257 return true;
258 }
259
trap_raz_wi(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)260 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
261 struct sys_reg_params *p,
262 const struct sys_reg_desc *r)
263 {
264 if (p->is_write)
265 return ignore_write(vcpu, p);
266 else
267 return read_zero(vcpu, p);
268 }
269
270 /*
271 * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
272 * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
273 * system, these registers should UNDEF. LORID_EL1 being a RO register, we
274 * treat it separately.
275 */
trap_loregion(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)276 static bool trap_loregion(struct kvm_vcpu *vcpu,
277 struct sys_reg_params *p,
278 const struct sys_reg_desc *r)
279 {
280 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
281 u32 sr = reg_to_encoding(r);
282
283 if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) {
284 kvm_inject_undefined(vcpu);
285 return false;
286 }
287
288 if (p->is_write && sr == SYS_LORID_EL1)
289 return write_to_read_only(vcpu, p, r);
290
291 return trap_raz_wi(vcpu, p, r);
292 }
293
trap_oslar_el1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)294 static bool trap_oslar_el1(struct kvm_vcpu *vcpu,
295 struct sys_reg_params *p,
296 const struct sys_reg_desc *r)
297 {
298 u64 oslsr;
299
300 if (!p->is_write)
301 return read_from_write_only(vcpu, p, r);
302
303 /* Forward the OSLK bit to OSLSR */
304 oslsr = __vcpu_sys_reg(vcpu, OSLSR_EL1) & ~SYS_OSLSR_OSLK;
305 if (p->regval & SYS_OSLAR_OSLK)
306 oslsr |= SYS_OSLSR_OSLK;
307
308 __vcpu_sys_reg(vcpu, OSLSR_EL1) = oslsr;
309 return true;
310 }
311
trap_oslsr_el1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)312 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
313 struct sys_reg_params *p,
314 const struct sys_reg_desc *r)
315 {
316 if (p->is_write)
317 return write_to_read_only(vcpu, p, r);
318
319 p->regval = __vcpu_sys_reg(vcpu, r->reg);
320 return true;
321 }
322
set_oslsr_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,const struct kvm_one_reg * reg,void __user * uaddr)323 static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
324 const struct kvm_one_reg *reg, void __user *uaddr)
325 {
326 u64 id = sys_reg_to_index(rd);
327 u64 val;
328 int err;
329
330 err = reg_from_user(&val, uaddr, id);
331 if (err)
332 return err;
333
334 /*
335 * The only modifiable bit is the OSLK bit. Refuse the write if
336 * userspace attempts to change any other bit in the register.
337 */
338 if ((val ^ rd->val) & ~SYS_OSLSR_OSLK)
339 return -EINVAL;
340
341 __vcpu_sys_reg(vcpu, rd->reg) = val;
342 return 0;
343 }
344
trap_dbgauthstatus_el1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)345 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
346 struct sys_reg_params *p,
347 const struct sys_reg_desc *r)
348 {
349 if (p->is_write) {
350 return ignore_write(vcpu, p);
351 } else {
352 p->regval = read_sysreg(dbgauthstatus_el1);
353 return true;
354 }
355 }
356
357 /*
358 * We want to avoid world-switching all the DBG registers all the
359 * time:
360 *
361 * - If we've touched any debug register, it is likely that we're
362 * going to touch more of them. It then makes sense to disable the
363 * traps and start doing the save/restore dance
364 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
365 * then mandatory to save/restore the registers, as the guest
366 * depends on them.
367 *
368 * For this, we use a DIRTY bit, indicating the guest has modified the
369 * debug registers, used as follow:
370 *
371 * On guest entry:
372 * - If the dirty bit is set (because we're coming back from trapping),
373 * disable the traps, save host registers, restore guest registers.
374 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
375 * set the dirty bit, disable the traps, save host registers,
376 * restore guest registers.
377 * - Otherwise, enable the traps
378 *
379 * On guest exit:
380 * - If the dirty bit is set, save guest registers, restore host
381 * registers and clear the dirty bit. This ensure that the host can
382 * now use the debug registers.
383 */
trap_debug_regs(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)384 static bool trap_debug_regs(struct kvm_vcpu *vcpu,
385 struct sys_reg_params *p,
386 const struct sys_reg_desc *r)
387 {
388 if (p->is_write) {
389 vcpu_write_sys_reg(vcpu, p->regval, r->reg);
390 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
391 } else {
392 p->regval = vcpu_read_sys_reg(vcpu, r->reg);
393 }
394
395 trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
396
397 return true;
398 }
399
400 /*
401 * reg_to_dbg/dbg_to_reg
402 *
403 * A 32 bit write to a debug register leave top bits alone
404 * A 32 bit read from a debug register only returns the bottom bits
405 *
406 * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
407 * hyp.S code switches between host and guest values in future.
408 */
reg_to_dbg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd,u64 * dbg_reg)409 static void reg_to_dbg(struct kvm_vcpu *vcpu,
410 struct sys_reg_params *p,
411 const struct sys_reg_desc *rd,
412 u64 *dbg_reg)
413 {
414 u64 mask, shift, val;
415
416 get_access_mask(rd, &mask, &shift);
417
418 val = *dbg_reg;
419 val &= ~mask;
420 val |= (p->regval & (mask >> shift)) << shift;
421 *dbg_reg = val;
422
423 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
424 }
425
dbg_to_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd,u64 * dbg_reg)426 static void dbg_to_reg(struct kvm_vcpu *vcpu,
427 struct sys_reg_params *p,
428 const struct sys_reg_desc *rd,
429 u64 *dbg_reg)
430 {
431 u64 mask, shift;
432
433 get_access_mask(rd, &mask, &shift);
434 p->regval = (*dbg_reg & mask) >> shift;
435 }
436
trap_bvr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd)437 static bool trap_bvr(struct kvm_vcpu *vcpu,
438 struct sys_reg_params *p,
439 const struct sys_reg_desc *rd)
440 {
441 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
442
443 if (p->is_write)
444 reg_to_dbg(vcpu, p, rd, dbg_reg);
445 else
446 dbg_to_reg(vcpu, p, rd, dbg_reg);
447
448 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
449
450 return true;
451 }
452
set_bvr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,const struct kvm_one_reg * reg,void __user * uaddr)453 static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
454 const struct kvm_one_reg *reg, void __user *uaddr)
455 {
456 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
457
458 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
459 return -EFAULT;
460 return 0;
461 }
462
get_bvr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,const struct kvm_one_reg * reg,void __user * uaddr)463 static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
464 const struct kvm_one_reg *reg, void __user *uaddr)
465 {
466 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
467
468 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
469 return -EFAULT;
470 return 0;
471 }
472
reset_bvr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)473 static void reset_bvr(struct kvm_vcpu *vcpu,
474 const struct sys_reg_desc *rd)
475 {
476 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
477 }
478
trap_bcr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd)479 static bool trap_bcr(struct kvm_vcpu *vcpu,
480 struct sys_reg_params *p,
481 const struct sys_reg_desc *rd)
482 {
483 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
484
485 if (p->is_write)
486 reg_to_dbg(vcpu, p, rd, dbg_reg);
487 else
488 dbg_to_reg(vcpu, p, rd, dbg_reg);
489
490 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
491
492 return true;
493 }
494
set_bcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,const struct kvm_one_reg * reg,void __user * uaddr)495 static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
496 const struct kvm_one_reg *reg, void __user *uaddr)
497 {
498 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
499
500 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
501 return -EFAULT;
502
503 return 0;
504 }
505
get_bcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,const struct kvm_one_reg * reg,void __user * uaddr)506 static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
507 const struct kvm_one_reg *reg, void __user *uaddr)
508 {
509 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
510
511 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
512 return -EFAULT;
513 return 0;
514 }
515
reset_bcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)516 static void reset_bcr(struct kvm_vcpu *vcpu,
517 const struct sys_reg_desc *rd)
518 {
519 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
520 }
521
trap_wvr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd)522 static bool trap_wvr(struct kvm_vcpu *vcpu,
523 struct sys_reg_params *p,
524 const struct sys_reg_desc *rd)
525 {
526 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
527
528 if (p->is_write)
529 reg_to_dbg(vcpu, p, rd, dbg_reg);
530 else
531 dbg_to_reg(vcpu, p, rd, dbg_reg);
532
533 trace_trap_reg(__func__, rd->CRm, p->is_write,
534 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
535
536 return true;
537 }
538
set_wvr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,const struct kvm_one_reg * reg,void __user * uaddr)539 static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
540 const struct kvm_one_reg *reg, void __user *uaddr)
541 {
542 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
543
544 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
545 return -EFAULT;
546 return 0;
547 }
548
get_wvr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,const struct kvm_one_reg * reg,void __user * uaddr)549 static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
550 const struct kvm_one_reg *reg, void __user *uaddr)
551 {
552 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
553
554 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
555 return -EFAULT;
556 return 0;
557 }
558
reset_wvr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)559 static void reset_wvr(struct kvm_vcpu *vcpu,
560 const struct sys_reg_desc *rd)
561 {
562 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
563 }
564
trap_wcr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd)565 static bool trap_wcr(struct kvm_vcpu *vcpu,
566 struct sys_reg_params *p,
567 const struct sys_reg_desc *rd)
568 {
569 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
570
571 if (p->is_write)
572 reg_to_dbg(vcpu, p, rd, dbg_reg);
573 else
574 dbg_to_reg(vcpu, p, rd, dbg_reg);
575
576 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
577
578 return true;
579 }
580
set_wcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,const struct kvm_one_reg * reg,void __user * uaddr)581 static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
582 const struct kvm_one_reg *reg, void __user *uaddr)
583 {
584 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
585
586 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
587 return -EFAULT;
588 return 0;
589 }
590
get_wcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,const struct kvm_one_reg * reg,void __user * uaddr)591 static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
592 const struct kvm_one_reg *reg, void __user *uaddr)
593 {
594 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
595
596 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
597 return -EFAULT;
598 return 0;
599 }
600
reset_wcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)601 static void reset_wcr(struct kvm_vcpu *vcpu,
602 const struct sys_reg_desc *rd)
603 {
604 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
605 }
606
reset_amair_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)607 static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
608 {
609 u64 amair = read_sysreg(amair_el1);
610 vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
611 }
612
reset_actlr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)613 static void reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
614 {
615 u64 actlr = read_sysreg(actlr_el1);
616 vcpu_write_sys_reg(vcpu, actlr, ACTLR_EL1);
617 }
618
reset_mpidr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)619 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
620 {
621 u64 mpidr;
622
623 /*
624 * Map the vcpu_id into the first three affinity level fields of
625 * the MPIDR. We limit the number of VCPUs in level 0 due to a
626 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
627 * of the GICv3 to be able to address each CPU directly when
628 * sending IPIs.
629 */
630 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
631 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
632 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
633 vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
634 }
635
pmu_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)636 static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
637 const struct sys_reg_desc *r)
638 {
639 if (kvm_vcpu_has_pmu(vcpu))
640 return 0;
641
642 return REG_HIDDEN;
643 }
644
reset_pmu_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)645 static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
646 {
647 u64 n, mask = BIT(ARMV8_PMU_CYCLE_IDX);
648
649 /* No PMU available, any PMU reg may UNDEF... */
650 if (!kvm_arm_support_pmu_v3())
651 return;
652
653 n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
654 n &= ARMV8_PMU_PMCR_N_MASK;
655 if (n)
656 mask |= GENMASK(n - 1, 0);
657
658 reset_unknown(vcpu, r);
659 __vcpu_sys_reg(vcpu, r->reg) &= mask;
660 }
661
reset_pmevcntr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)662 static void reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
663 {
664 reset_unknown(vcpu, r);
665 __vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0);
666 }
667
reset_pmevtyper(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)668 static void reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
669 {
670 reset_unknown(vcpu, r);
671 __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_EVTYPE_MASK;
672 }
673
reset_pmselr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)674 static void reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
675 {
676 reset_unknown(vcpu, r);
677 __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK;
678 }
679
reset_pmcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)680 static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
681 {
682 u64 pmcr, val;
683
684 /* No PMU available, PMCR_EL0 may UNDEF... */
685 if (!kvm_arm_support_pmu_v3())
686 return;
687
688 pmcr = read_sysreg(pmcr_el0);
689 /*
690 * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
691 * except PMCR.E resetting to zero.
692 */
693 val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
694 | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
695 if (!kvm_supports_32bit_el0())
696 val |= ARMV8_PMU_PMCR_LC;
697 __vcpu_sys_reg(vcpu, r->reg) = val;
698 }
699
check_pmu_access_disabled(struct kvm_vcpu * vcpu,u64 flags)700 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
701 {
702 u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
703 bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
704
705 if (!enabled)
706 kvm_inject_undefined(vcpu);
707
708 return !enabled;
709 }
710
pmu_access_el0_disabled(struct kvm_vcpu * vcpu)711 static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
712 {
713 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
714 }
715
pmu_write_swinc_el0_disabled(struct kvm_vcpu * vcpu)716 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
717 {
718 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
719 }
720
pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu * vcpu)721 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
722 {
723 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
724 }
725
pmu_access_event_counter_el0_disabled(struct kvm_vcpu * vcpu)726 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
727 {
728 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
729 }
730
access_pmcr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)731 static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
732 const struct sys_reg_desc *r)
733 {
734 u64 val;
735
736 if (pmu_access_el0_disabled(vcpu))
737 return false;
738
739 if (p->is_write) {
740 /* Only update writeable bits of PMCR */
741 val = __vcpu_sys_reg(vcpu, PMCR_EL0);
742 val &= ~ARMV8_PMU_PMCR_MASK;
743 val |= p->regval & ARMV8_PMU_PMCR_MASK;
744 if (!kvm_supports_32bit_el0())
745 val |= ARMV8_PMU_PMCR_LC;
746 __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
747 kvm_pmu_handle_pmcr(vcpu, val);
748 kvm_vcpu_pmu_restore_guest(vcpu);
749 } else {
750 /* PMCR.P & PMCR.C are RAZ */
751 val = __vcpu_sys_reg(vcpu, PMCR_EL0)
752 & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
753 p->regval = val;
754 }
755
756 return true;
757 }
758
access_pmselr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)759 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
760 const struct sys_reg_desc *r)
761 {
762 if (pmu_access_event_counter_el0_disabled(vcpu))
763 return false;
764
765 if (p->is_write)
766 __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
767 else
768 /* return PMSELR.SEL field */
769 p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
770 & ARMV8_PMU_COUNTER_MASK;
771
772 return true;
773 }
774
access_pmceid(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)775 static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
776 const struct sys_reg_desc *r)
777 {
778 u64 pmceid, mask, shift;
779
780 BUG_ON(p->is_write);
781
782 if (pmu_access_el0_disabled(vcpu))
783 return false;
784
785 get_access_mask(r, &mask, &shift);
786
787 pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1));
788 pmceid &= mask;
789 pmceid >>= shift;
790
791 p->regval = pmceid;
792
793 return true;
794 }
795
pmu_counter_idx_valid(struct kvm_vcpu * vcpu,u64 idx)796 static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
797 {
798 u64 pmcr, val;
799
800 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
801 val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
802 if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
803 kvm_inject_undefined(vcpu);
804 return false;
805 }
806
807 return true;
808 }
809
access_pmu_evcntr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)810 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
811 struct sys_reg_params *p,
812 const struct sys_reg_desc *r)
813 {
814 u64 idx = ~0UL;
815
816 if (r->CRn == 9 && r->CRm == 13) {
817 if (r->Op2 == 2) {
818 /* PMXEVCNTR_EL0 */
819 if (pmu_access_event_counter_el0_disabled(vcpu))
820 return false;
821
822 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
823 & ARMV8_PMU_COUNTER_MASK;
824 } else if (r->Op2 == 0) {
825 /* PMCCNTR_EL0 */
826 if (pmu_access_cycle_counter_el0_disabled(vcpu))
827 return false;
828
829 idx = ARMV8_PMU_CYCLE_IDX;
830 }
831 } else if (r->CRn == 0 && r->CRm == 9) {
832 /* PMCCNTR */
833 if (pmu_access_event_counter_el0_disabled(vcpu))
834 return false;
835
836 idx = ARMV8_PMU_CYCLE_IDX;
837 } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
838 /* PMEVCNTRn_EL0 */
839 if (pmu_access_event_counter_el0_disabled(vcpu))
840 return false;
841
842 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
843 }
844
845 /* Catch any decoding mistake */
846 WARN_ON(idx == ~0UL);
847
848 if (!pmu_counter_idx_valid(vcpu, idx))
849 return false;
850
851 if (p->is_write) {
852 if (pmu_access_el0_disabled(vcpu))
853 return false;
854
855 kvm_pmu_set_counter_value(vcpu, idx, p->regval);
856 } else {
857 p->regval = kvm_pmu_get_counter_value(vcpu, idx);
858 }
859
860 return true;
861 }
862
access_pmu_evtyper(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)863 static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
864 const struct sys_reg_desc *r)
865 {
866 u64 idx, reg;
867
868 if (pmu_access_el0_disabled(vcpu))
869 return false;
870
871 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
872 /* PMXEVTYPER_EL0 */
873 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
874 reg = PMEVTYPER0_EL0 + idx;
875 } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
876 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
877 if (idx == ARMV8_PMU_CYCLE_IDX)
878 reg = PMCCFILTR_EL0;
879 else
880 /* PMEVTYPERn_EL0 */
881 reg = PMEVTYPER0_EL0 + idx;
882 } else {
883 BUG();
884 }
885
886 if (!pmu_counter_idx_valid(vcpu, idx))
887 return false;
888
889 if (p->is_write) {
890 kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
891 __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
892 kvm_vcpu_pmu_restore_guest(vcpu);
893 } else {
894 p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
895 }
896
897 return true;
898 }
899
access_pmcnten(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)900 static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
901 const struct sys_reg_desc *r)
902 {
903 u64 val, mask;
904
905 if (pmu_access_el0_disabled(vcpu))
906 return false;
907
908 mask = kvm_pmu_valid_counter_mask(vcpu);
909 if (p->is_write) {
910 val = p->regval & mask;
911 if (r->Op2 & 0x1) {
912 /* accessing PMCNTENSET_EL0 */
913 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
914 kvm_pmu_enable_counter_mask(vcpu, val);
915 kvm_vcpu_pmu_restore_guest(vcpu);
916 } else {
917 /* accessing PMCNTENCLR_EL0 */
918 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
919 kvm_pmu_disable_counter_mask(vcpu, val);
920 }
921 } else {
922 p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
923 }
924
925 return true;
926 }
927
access_pminten(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)928 static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
929 const struct sys_reg_desc *r)
930 {
931 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
932
933 if (check_pmu_access_disabled(vcpu, 0))
934 return false;
935
936 if (p->is_write) {
937 u64 val = p->regval & mask;
938
939 if (r->Op2 & 0x1)
940 /* accessing PMINTENSET_EL1 */
941 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
942 else
943 /* accessing PMINTENCLR_EL1 */
944 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
945 } else {
946 p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
947 }
948
949 return true;
950 }
951
access_pmovs(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)952 static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
953 const struct sys_reg_desc *r)
954 {
955 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
956
957 if (pmu_access_el0_disabled(vcpu))
958 return false;
959
960 if (p->is_write) {
961 if (r->CRm & 0x2)
962 /* accessing PMOVSSET_EL0 */
963 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
964 else
965 /* accessing PMOVSCLR_EL0 */
966 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
967 } else {
968 p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
969 }
970
971 return true;
972 }
973
access_pmswinc(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)974 static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
975 const struct sys_reg_desc *r)
976 {
977 u64 mask;
978
979 if (!p->is_write)
980 return read_from_write_only(vcpu, p, r);
981
982 if (pmu_write_swinc_el0_disabled(vcpu))
983 return false;
984
985 mask = kvm_pmu_valid_counter_mask(vcpu);
986 kvm_pmu_software_increment(vcpu, p->regval & mask);
987 return true;
988 }
989
access_pmuserenr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)990 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
991 const struct sys_reg_desc *r)
992 {
993 if (p->is_write) {
994 if (!vcpu_mode_priv(vcpu)) {
995 kvm_inject_undefined(vcpu);
996 return false;
997 }
998
999 __vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
1000 p->regval & ARMV8_PMU_USERENR_MASK;
1001 } else {
1002 p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
1003 & ARMV8_PMU_USERENR_MASK;
1004 }
1005
1006 return true;
1007 }
1008
1009 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
1010 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
1011 { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
1012 trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \
1013 { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
1014 trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \
1015 { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
1016 trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \
1017 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
1018 trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
1019
1020 #define PMU_SYS_REG(r) \
1021 SYS_DESC(r), .reset = reset_pmu_reg, .visibility = pmu_visibility
1022
1023 /* Macro to expand the PMEVCNTRn_EL0 register */
1024 #define PMU_PMEVCNTR_EL0(n) \
1025 { PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \
1026 .reset = reset_pmevcntr, \
1027 .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
1028
1029 /* Macro to expand the PMEVTYPERn_EL0 register */
1030 #define PMU_PMEVTYPER_EL0(n) \
1031 { PMU_SYS_REG(SYS_PMEVTYPERn_EL0(n)), \
1032 .reset = reset_pmevtyper, \
1033 .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
1034
undef_access(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1035 static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1036 const struct sys_reg_desc *r)
1037 {
1038 kvm_inject_undefined(vcpu);
1039
1040 return false;
1041 }
1042
1043 /* Macro to expand the AMU counter and type registers*/
1044 #define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access }
1045 #define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access }
1046 #define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access }
1047 #define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access }
1048
ptrauth_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)1049 static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
1050 const struct sys_reg_desc *rd)
1051 {
1052 return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN;
1053 }
1054
1055 /*
1056 * If we land here on a PtrAuth access, that is because we didn't
1057 * fixup the access on exit by allowing the PtrAuth sysregs. The only
1058 * way this happens is when the guest does not have PtrAuth support
1059 * enabled.
1060 */
1061 #define __PTRAUTH_KEY(k) \
1062 { SYS_DESC(SYS_## k), undef_access, reset_unknown, k, \
1063 .visibility = ptrauth_visibility}
1064
1065 #define PTRAUTH_KEY(k) \
1066 __PTRAUTH_KEY(k ## KEYLO_EL1), \
1067 __PTRAUTH_KEY(k ## KEYHI_EL1)
1068
access_arch_timer(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1069 static bool access_arch_timer(struct kvm_vcpu *vcpu,
1070 struct sys_reg_params *p,
1071 const struct sys_reg_desc *r)
1072 {
1073 enum kvm_arch_timers tmr;
1074 enum kvm_arch_timer_regs treg;
1075 u64 reg = reg_to_encoding(r);
1076
1077 switch (reg) {
1078 case SYS_CNTP_TVAL_EL0:
1079 case SYS_AARCH32_CNTP_TVAL:
1080 tmr = TIMER_PTIMER;
1081 treg = TIMER_REG_TVAL;
1082 break;
1083 case SYS_CNTP_CTL_EL0:
1084 case SYS_AARCH32_CNTP_CTL:
1085 tmr = TIMER_PTIMER;
1086 treg = TIMER_REG_CTL;
1087 break;
1088 case SYS_CNTP_CVAL_EL0:
1089 case SYS_AARCH32_CNTP_CVAL:
1090 tmr = TIMER_PTIMER;
1091 treg = TIMER_REG_CVAL;
1092 break;
1093 default:
1094 BUG();
1095 }
1096
1097 if (p->is_write)
1098 kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
1099 else
1100 p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
1101
1102 return true;
1103 }
1104
1105 /* Read a sanitised cpufeature ID register by sys_reg_desc */
read_id_reg(const struct kvm_vcpu * vcpu,struct sys_reg_desc const * r,bool raz)1106 static u64 read_id_reg(const struct kvm_vcpu *vcpu,
1107 struct sys_reg_desc const *r, bool raz)
1108 {
1109 u32 id = reg_to_encoding(r);
1110 u64 val;
1111
1112 if (raz)
1113 return 0;
1114
1115 val = read_sanitised_ftr_reg(id);
1116
1117 switch (id) {
1118 case SYS_ID_AA64PFR0_EL1:
1119 if (!vcpu_has_sve(vcpu))
1120 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_SVE);
1121 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_AMU);
1122 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2);
1123 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
1124 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3);
1125 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
1126 if (kvm_vgic_global_state.type == VGIC_V3) {
1127 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_GIC);
1128 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_GIC), 1);
1129 }
1130 break;
1131 case SYS_ID_AA64PFR1_EL1:
1132 if (!kvm_has_mte(vcpu->kvm))
1133 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE);
1134
1135 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_SME);
1136 break;
1137 case SYS_ID_AA64ISAR1_EL1:
1138 if (!vcpu_has_ptrauth(vcpu))
1139 val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_APA) |
1140 ARM64_FEATURE_MASK(ID_AA64ISAR1_API) |
1141 ARM64_FEATURE_MASK(ID_AA64ISAR1_GPA) |
1142 ARM64_FEATURE_MASK(ID_AA64ISAR1_GPI));
1143 break;
1144 case SYS_ID_AA64ISAR2_EL1:
1145 if (!vcpu_has_ptrauth(vcpu))
1146 val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_APA3) |
1147 ARM64_FEATURE_MASK(ID_AA64ISAR2_GPA3));
1148 if (!cpus_have_final_cap(ARM64_HAS_WFXT))
1149 val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_WFXT);
1150 break;
1151 case SYS_ID_AA64DFR0_EL1:
1152 /* Limit debug to ARMv8.0 */
1153 val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER);
1154 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), 6);
1155 /* Limit guests to PMUv3 for ARMv8.4 */
1156 val = cpuid_feature_cap_perfmon_field(val,
1157 ID_AA64DFR0_PMUVER_SHIFT,
1158 kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_4 : 0);
1159 /* Hide SPE from guests */
1160 val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER);
1161 break;
1162 case SYS_ID_DFR0_EL1:
1163 /* Limit guests to PMUv3 for ARMv8.4 */
1164 val = cpuid_feature_cap_perfmon_field(val,
1165 ID_DFR0_PERFMON_SHIFT,
1166 kvm_vcpu_has_pmu(vcpu) ? ID_DFR0_PERFMON_8_4 : 0);
1167 break;
1168 }
1169
1170 return val;
1171 }
1172
id_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1173 static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
1174 const struct sys_reg_desc *r)
1175 {
1176 u32 id = reg_to_encoding(r);
1177
1178 switch (id) {
1179 case SYS_ID_AA64ZFR0_EL1:
1180 if (!vcpu_has_sve(vcpu))
1181 return REG_RAZ;
1182 break;
1183 }
1184
1185 return 0;
1186 }
1187
1188 /* cpufeature ID register access trap handlers */
1189
__access_id_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r,bool raz)1190 static bool __access_id_reg(struct kvm_vcpu *vcpu,
1191 struct sys_reg_params *p,
1192 const struct sys_reg_desc *r,
1193 bool raz)
1194 {
1195 if (p->is_write)
1196 return write_to_read_only(vcpu, p, r);
1197
1198 p->regval = read_id_reg(vcpu, r, raz);
1199 return true;
1200 }
1201
access_id_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1202 static bool access_id_reg(struct kvm_vcpu *vcpu,
1203 struct sys_reg_params *p,
1204 const struct sys_reg_desc *r)
1205 {
1206 bool raz = sysreg_visible_as_raz(vcpu, r);
1207
1208 return __access_id_reg(vcpu, p, r, raz);
1209 }
1210
access_raz_id_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1211 static bool access_raz_id_reg(struct kvm_vcpu *vcpu,
1212 struct sys_reg_params *p,
1213 const struct sys_reg_desc *r)
1214 {
1215 return __access_id_reg(vcpu, p, r, true);
1216 }
1217
1218 /* Visibility overrides for SVE-specific control registers */
sve_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)1219 static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
1220 const struct sys_reg_desc *rd)
1221 {
1222 if (vcpu_has_sve(vcpu))
1223 return 0;
1224
1225 return REG_HIDDEN;
1226 }
1227
set_id_aa64pfr0_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,const struct kvm_one_reg * reg,void __user * uaddr)1228 static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
1229 const struct sys_reg_desc *rd,
1230 const struct kvm_one_reg *reg, void __user *uaddr)
1231 {
1232 const u64 id = sys_reg_to_index(rd);
1233 u8 csv2, csv3;
1234 int err;
1235 u64 val;
1236
1237 err = reg_from_user(&val, uaddr, id);
1238 if (err)
1239 return err;
1240
1241 /*
1242 * Allow AA64PFR0_EL1.CSV2 to be set from userspace as long as
1243 * it doesn't promise more than what is actually provided (the
1244 * guest could otherwise be covered in ectoplasmic residue).
1245 */
1246 csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV2_SHIFT);
1247 if (csv2 > 1 ||
1248 (csv2 && arm64_get_spectre_v2_state() != SPECTRE_UNAFFECTED))
1249 return -EINVAL;
1250
1251 /* Same thing for CSV3 */
1252 csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV3_SHIFT);
1253 if (csv3 > 1 ||
1254 (csv3 && arm64_get_meltdown_state() != SPECTRE_UNAFFECTED))
1255 return -EINVAL;
1256
1257 /* We can only differ with CSV[23], and anything else is an error */
1258 val ^= read_id_reg(vcpu, rd, false);
1259 val &= ~((0xFUL << ID_AA64PFR0_CSV2_SHIFT) |
1260 (0xFUL << ID_AA64PFR0_CSV3_SHIFT));
1261 if (val)
1262 return -EINVAL;
1263
1264 vcpu->kvm->arch.pfr0_csv2 = csv2;
1265 vcpu->kvm->arch.pfr0_csv3 = csv3 ;
1266
1267 return 0;
1268 }
1269
1270 /*
1271 * cpufeature ID register user accessors
1272 *
1273 * For now, these registers are immutable for userspace, so no values
1274 * are stored, and for set_id_reg() we don't allow the effective value
1275 * to be changed.
1276 */
__get_id_reg(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,void __user * uaddr,bool raz)1277 static int __get_id_reg(const struct kvm_vcpu *vcpu,
1278 const struct sys_reg_desc *rd, void __user *uaddr,
1279 bool raz)
1280 {
1281 const u64 id = sys_reg_to_index(rd);
1282 const u64 val = read_id_reg(vcpu, rd, raz);
1283
1284 return reg_to_user(uaddr, &val, id);
1285 }
1286
__set_id_reg(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,void __user * uaddr,bool raz)1287 static int __set_id_reg(const struct kvm_vcpu *vcpu,
1288 const struct sys_reg_desc *rd, void __user *uaddr,
1289 bool raz)
1290 {
1291 const u64 id = sys_reg_to_index(rd);
1292 int err;
1293 u64 val;
1294
1295 err = reg_from_user(&val, uaddr, id);
1296 if (err)
1297 return err;
1298
1299 /* This is what we mean by invariant: you can't change it. */
1300 if (val != read_id_reg(vcpu, rd, raz))
1301 return -EINVAL;
1302
1303 return 0;
1304 }
1305
get_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,const struct kvm_one_reg * reg,void __user * uaddr)1306 static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1307 const struct kvm_one_reg *reg, void __user *uaddr)
1308 {
1309 bool raz = sysreg_visible_as_raz(vcpu, rd);
1310
1311 return __get_id_reg(vcpu, rd, uaddr, raz);
1312 }
1313
set_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,const struct kvm_one_reg * reg,void __user * uaddr)1314 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1315 const struct kvm_one_reg *reg, void __user *uaddr)
1316 {
1317 bool raz = sysreg_visible_as_raz(vcpu, rd);
1318
1319 return __set_id_reg(vcpu, rd, uaddr, raz);
1320 }
1321
set_raz_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,const struct kvm_one_reg * reg,void __user * uaddr)1322 static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1323 const struct kvm_one_reg *reg, void __user *uaddr)
1324 {
1325 return __set_id_reg(vcpu, rd, uaddr, true);
1326 }
1327
get_raz_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,const struct kvm_one_reg * reg,void __user * uaddr)1328 static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1329 const struct kvm_one_reg *reg, void __user *uaddr)
1330 {
1331 const u64 id = sys_reg_to_index(rd);
1332 const u64 val = 0;
1333
1334 return reg_to_user(uaddr, &val, id);
1335 }
1336
set_wi_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,const struct kvm_one_reg * reg,void __user * uaddr)1337 static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1338 const struct kvm_one_reg *reg, void __user *uaddr)
1339 {
1340 int err;
1341 u64 val;
1342
1343 /* Perform the access even if we are going to ignore the value */
1344 err = reg_from_user(&val, uaddr, sys_reg_to_index(rd));
1345 if (err)
1346 return err;
1347
1348 return 0;
1349 }
1350
access_ctr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1351 static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1352 const struct sys_reg_desc *r)
1353 {
1354 if (p->is_write)
1355 return write_to_read_only(vcpu, p, r);
1356
1357 p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0);
1358 return true;
1359 }
1360
access_clidr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1361 static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1362 const struct sys_reg_desc *r)
1363 {
1364 if (p->is_write)
1365 return write_to_read_only(vcpu, p, r);
1366
1367 p->regval = read_sysreg(clidr_el1);
1368 return true;
1369 }
1370
access_csselr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1371 static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1372 const struct sys_reg_desc *r)
1373 {
1374 int reg = r->reg;
1375
1376 if (p->is_write)
1377 vcpu_write_sys_reg(vcpu, p->regval, reg);
1378 else
1379 p->regval = vcpu_read_sys_reg(vcpu, reg);
1380 return true;
1381 }
1382
access_ccsidr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1383 static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1384 const struct sys_reg_desc *r)
1385 {
1386 u32 csselr;
1387
1388 if (p->is_write)
1389 return write_to_read_only(vcpu, p, r);
1390
1391 csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
1392 p->regval = get_ccsidr(csselr);
1393
1394 /*
1395 * Guests should not be doing cache operations by set/way at all, and
1396 * for this reason, we trap them and attempt to infer the intent, so
1397 * that we can flush the entire guest's address space at the appropriate
1398 * time.
1399 * To prevent this trapping from causing performance problems, let's
1400 * expose the geometry of all data and unified caches (which are
1401 * guaranteed to be PIPT and thus non-aliasing) as 1 set and 1 way.
1402 * [If guests should attempt to infer aliasing properties from the
1403 * geometry (which is not permitted by the architecture), they would
1404 * only do so for virtually indexed caches.]
1405 */
1406 if (!(csselr & 1)) // data or unified cache
1407 p->regval &= ~GENMASK(27, 3);
1408 return true;
1409 }
1410
mte_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)1411 static unsigned int mte_visibility(const struct kvm_vcpu *vcpu,
1412 const struct sys_reg_desc *rd)
1413 {
1414 if (kvm_has_mte(vcpu->kvm))
1415 return 0;
1416
1417 return REG_HIDDEN;
1418 }
1419
1420 #define MTE_REG(name) { \
1421 SYS_DESC(SYS_##name), \
1422 .access = undef_access, \
1423 .reset = reset_unknown, \
1424 .reg = name, \
1425 .visibility = mte_visibility, \
1426 }
1427
1428 /* sys_reg_desc initialiser for known cpufeature ID registers */
1429 #define ID_SANITISED(name) { \
1430 SYS_DESC(SYS_##name), \
1431 .access = access_id_reg, \
1432 .get_user = get_id_reg, \
1433 .set_user = set_id_reg, \
1434 .visibility = id_visibility, \
1435 }
1436
1437 /*
1438 * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
1439 * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
1440 * (1 <= crm < 8, 0 <= Op2 < 8).
1441 */
1442 #define ID_UNALLOCATED(crm, op2) { \
1443 Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
1444 .access = access_raz_id_reg, \
1445 .get_user = get_raz_reg, \
1446 .set_user = set_raz_id_reg, \
1447 }
1448
1449 /*
1450 * sys_reg_desc initialiser for known ID registers that we hide from guests.
1451 * For now, these are exposed just like unallocated ID regs: they appear
1452 * RAZ for the guest.
1453 */
1454 #define ID_HIDDEN(name) { \
1455 SYS_DESC(SYS_##name), \
1456 .access = access_raz_id_reg, \
1457 .get_user = get_raz_reg, \
1458 .set_user = set_raz_id_reg, \
1459 }
1460
1461 /*
1462 * Architected system registers.
1463 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
1464 *
1465 * Debug handling: We do trap most, if not all debug related system
1466 * registers. The implementation is good enough to ensure that a guest
1467 * can use these with minimal performance degradation. The drawback is
1468 * that we don't implement any of the external debug architecture.
1469 * This should be revisited if we ever encounter a more demanding
1470 * guest...
1471 */
1472 static const struct sys_reg_desc sys_reg_descs[] = {
1473 { SYS_DESC(SYS_DC_ISW), access_dcsw },
1474 { SYS_DESC(SYS_DC_CSW), access_dcsw },
1475 { SYS_DESC(SYS_DC_CISW), access_dcsw },
1476
1477 DBG_BCR_BVR_WCR_WVR_EL1(0),
1478 DBG_BCR_BVR_WCR_WVR_EL1(1),
1479 { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
1480 { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
1481 DBG_BCR_BVR_WCR_WVR_EL1(2),
1482 DBG_BCR_BVR_WCR_WVR_EL1(3),
1483 DBG_BCR_BVR_WCR_WVR_EL1(4),
1484 DBG_BCR_BVR_WCR_WVR_EL1(5),
1485 DBG_BCR_BVR_WCR_WVR_EL1(6),
1486 DBG_BCR_BVR_WCR_WVR_EL1(7),
1487 DBG_BCR_BVR_WCR_WVR_EL1(8),
1488 DBG_BCR_BVR_WCR_WVR_EL1(9),
1489 DBG_BCR_BVR_WCR_WVR_EL1(10),
1490 DBG_BCR_BVR_WCR_WVR_EL1(11),
1491 DBG_BCR_BVR_WCR_WVR_EL1(12),
1492 DBG_BCR_BVR_WCR_WVR_EL1(13),
1493 DBG_BCR_BVR_WCR_WVR_EL1(14),
1494 DBG_BCR_BVR_WCR_WVR_EL1(15),
1495
1496 { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
1497 { SYS_DESC(SYS_OSLAR_EL1), trap_oslar_el1 },
1498 { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1, reset_val, OSLSR_EL1,
1499 SYS_OSLSR_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, },
1500 { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
1501 { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
1502 { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
1503 { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
1504 { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
1505
1506 { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
1507 { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
1508 // DBGDTR[TR]X_EL0 share the same encoding
1509 { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
1510
1511 { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
1512
1513 { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
1514
1515 /*
1516 * ID regs: all ID_SANITISED() entries here must have corresponding
1517 * entries in arm64_ftr_regs[].
1518 */
1519
1520 /* AArch64 mappings of the AArch32 ID registers */
1521 /* CRm=1 */
1522 ID_SANITISED(ID_PFR0_EL1),
1523 ID_SANITISED(ID_PFR1_EL1),
1524 ID_SANITISED(ID_DFR0_EL1),
1525 ID_HIDDEN(ID_AFR0_EL1),
1526 ID_SANITISED(ID_MMFR0_EL1),
1527 ID_SANITISED(ID_MMFR1_EL1),
1528 ID_SANITISED(ID_MMFR2_EL1),
1529 ID_SANITISED(ID_MMFR3_EL1),
1530
1531 /* CRm=2 */
1532 ID_SANITISED(ID_ISAR0_EL1),
1533 ID_SANITISED(ID_ISAR1_EL1),
1534 ID_SANITISED(ID_ISAR2_EL1),
1535 ID_SANITISED(ID_ISAR3_EL1),
1536 ID_SANITISED(ID_ISAR4_EL1),
1537 ID_SANITISED(ID_ISAR5_EL1),
1538 ID_SANITISED(ID_MMFR4_EL1),
1539 ID_SANITISED(ID_ISAR6_EL1),
1540
1541 /* CRm=3 */
1542 ID_SANITISED(MVFR0_EL1),
1543 ID_SANITISED(MVFR1_EL1),
1544 ID_SANITISED(MVFR2_EL1),
1545 ID_UNALLOCATED(3,3),
1546 ID_SANITISED(ID_PFR2_EL1),
1547 ID_HIDDEN(ID_DFR1_EL1),
1548 ID_SANITISED(ID_MMFR5_EL1),
1549 ID_UNALLOCATED(3,7),
1550
1551 /* AArch64 ID registers */
1552 /* CRm=4 */
1553 { SYS_DESC(SYS_ID_AA64PFR0_EL1), .access = access_id_reg,
1554 .get_user = get_id_reg, .set_user = set_id_aa64pfr0_el1, },
1555 ID_SANITISED(ID_AA64PFR1_EL1),
1556 ID_UNALLOCATED(4,2),
1557 ID_UNALLOCATED(4,3),
1558 ID_SANITISED(ID_AA64ZFR0_EL1),
1559 ID_HIDDEN(ID_AA64SMFR0_EL1),
1560 ID_UNALLOCATED(4,6),
1561 ID_UNALLOCATED(4,7),
1562
1563 /* CRm=5 */
1564 ID_SANITISED(ID_AA64DFR0_EL1),
1565 ID_SANITISED(ID_AA64DFR1_EL1),
1566 ID_UNALLOCATED(5,2),
1567 ID_UNALLOCATED(5,3),
1568 ID_HIDDEN(ID_AA64AFR0_EL1),
1569 ID_HIDDEN(ID_AA64AFR1_EL1),
1570 ID_UNALLOCATED(5,6),
1571 ID_UNALLOCATED(5,7),
1572
1573 /* CRm=6 */
1574 ID_SANITISED(ID_AA64ISAR0_EL1),
1575 ID_SANITISED(ID_AA64ISAR1_EL1),
1576 ID_SANITISED(ID_AA64ISAR2_EL1),
1577 ID_UNALLOCATED(6,3),
1578 ID_UNALLOCATED(6,4),
1579 ID_UNALLOCATED(6,5),
1580 ID_UNALLOCATED(6,6),
1581 ID_UNALLOCATED(6,7),
1582
1583 /* CRm=7 */
1584 ID_SANITISED(ID_AA64MMFR0_EL1),
1585 ID_SANITISED(ID_AA64MMFR1_EL1),
1586 ID_SANITISED(ID_AA64MMFR2_EL1),
1587 ID_UNALLOCATED(7,3),
1588 ID_UNALLOCATED(7,4),
1589 ID_UNALLOCATED(7,5),
1590 ID_UNALLOCATED(7,6),
1591 ID_UNALLOCATED(7,7),
1592
1593 { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
1594 { SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
1595 { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
1596
1597 MTE_REG(RGSR_EL1),
1598 MTE_REG(GCR_EL1),
1599
1600 { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
1601 { SYS_DESC(SYS_TRFCR_EL1), undef_access },
1602 { SYS_DESC(SYS_SMPRI_EL1), undef_access },
1603 { SYS_DESC(SYS_SMCR_EL1), undef_access },
1604 { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
1605 { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
1606 { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
1607
1608 PTRAUTH_KEY(APIA),
1609 PTRAUTH_KEY(APIB),
1610 PTRAUTH_KEY(APDA),
1611 PTRAUTH_KEY(APDB),
1612 PTRAUTH_KEY(APGA),
1613
1614 { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
1615 { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
1616 { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
1617
1618 { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
1619 { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
1620 { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
1621 { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
1622 { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
1623 { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
1624 { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
1625 { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
1626
1627 MTE_REG(TFSR_EL1),
1628 MTE_REG(TFSRE0_EL1),
1629
1630 { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
1631 { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
1632
1633 { SYS_DESC(SYS_PMSCR_EL1), undef_access },
1634 { SYS_DESC(SYS_PMSNEVFR_EL1), undef_access },
1635 { SYS_DESC(SYS_PMSICR_EL1), undef_access },
1636 { SYS_DESC(SYS_PMSIRR_EL1), undef_access },
1637 { SYS_DESC(SYS_PMSFCR_EL1), undef_access },
1638 { SYS_DESC(SYS_PMSEVFR_EL1), undef_access },
1639 { SYS_DESC(SYS_PMSLATFR_EL1), undef_access },
1640 { SYS_DESC(SYS_PMSIDR_EL1), undef_access },
1641 { SYS_DESC(SYS_PMBLIMITR_EL1), undef_access },
1642 { SYS_DESC(SYS_PMBPTR_EL1), undef_access },
1643 { SYS_DESC(SYS_PMBSR_EL1), undef_access },
1644 /* PMBIDR_EL1 is not trapped */
1645
1646 { PMU_SYS_REG(SYS_PMINTENSET_EL1),
1647 .access = access_pminten, .reg = PMINTENSET_EL1 },
1648 { PMU_SYS_REG(SYS_PMINTENCLR_EL1),
1649 .access = access_pminten, .reg = PMINTENSET_EL1 },
1650 { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
1651
1652 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
1653 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
1654
1655 { SYS_DESC(SYS_LORSA_EL1), trap_loregion },
1656 { SYS_DESC(SYS_LOREA_EL1), trap_loregion },
1657 { SYS_DESC(SYS_LORN_EL1), trap_loregion },
1658 { SYS_DESC(SYS_LORC_EL1), trap_loregion },
1659 { SYS_DESC(SYS_LORID_EL1), trap_loregion },
1660
1661 { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
1662 { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
1663
1664 { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
1665 { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
1666 { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
1667 { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
1668 { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
1669 { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
1670 { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
1671 { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
1672 { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
1673 { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
1674 { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
1675 { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
1676
1677 { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
1678 { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
1679
1680 { SYS_DESC(SYS_SCXTNUM_EL1), undef_access },
1681
1682 { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
1683
1684 { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
1685 { SYS_DESC(SYS_CLIDR_EL1), access_clidr },
1686 { SYS_DESC(SYS_SMIDR_EL1), undef_access },
1687 { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
1688 { SYS_DESC(SYS_CTR_EL0), access_ctr },
1689 { SYS_DESC(SYS_SVCR), undef_access },
1690
1691 { PMU_SYS_REG(SYS_PMCR_EL0), .access = access_pmcr,
1692 .reset = reset_pmcr, .reg = PMCR_EL0 },
1693 { PMU_SYS_REG(SYS_PMCNTENSET_EL0),
1694 .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
1695 { PMU_SYS_REG(SYS_PMCNTENCLR_EL0),
1696 .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
1697 { PMU_SYS_REG(SYS_PMOVSCLR_EL0),
1698 .access = access_pmovs, .reg = PMOVSSET_EL0 },
1699 /*
1700 * PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was
1701 * previously (and pointlessly) advertised in the past...
1702 */
1703 { PMU_SYS_REG(SYS_PMSWINC_EL0),
1704 .get_user = get_raz_reg, .set_user = set_wi_reg,
1705 .access = access_pmswinc, .reset = NULL },
1706 { PMU_SYS_REG(SYS_PMSELR_EL0),
1707 .access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 },
1708 { PMU_SYS_REG(SYS_PMCEID0_EL0),
1709 .access = access_pmceid, .reset = NULL },
1710 { PMU_SYS_REG(SYS_PMCEID1_EL0),
1711 .access = access_pmceid, .reset = NULL },
1712 { PMU_SYS_REG(SYS_PMCCNTR_EL0),
1713 .access = access_pmu_evcntr, .reset = reset_unknown, .reg = PMCCNTR_EL0 },
1714 { PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
1715 .access = access_pmu_evtyper, .reset = NULL },
1716 { PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
1717 .access = access_pmu_evcntr, .reset = NULL },
1718 /*
1719 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
1720 * in 32bit mode. Here we choose to reset it as zero for consistency.
1721 */
1722 { PMU_SYS_REG(SYS_PMUSERENR_EL0), .access = access_pmuserenr,
1723 .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
1724 { PMU_SYS_REG(SYS_PMOVSSET_EL0),
1725 .access = access_pmovs, .reg = PMOVSSET_EL0 },
1726
1727 { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
1728 { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
1729 { SYS_DESC(SYS_TPIDR2_EL0), undef_access },
1730
1731 { SYS_DESC(SYS_SCXTNUM_EL0), undef_access },
1732
1733 { SYS_DESC(SYS_AMCR_EL0), undef_access },
1734 { SYS_DESC(SYS_AMCFGR_EL0), undef_access },
1735 { SYS_DESC(SYS_AMCGCR_EL0), undef_access },
1736 { SYS_DESC(SYS_AMUSERENR_EL0), undef_access },
1737 { SYS_DESC(SYS_AMCNTENCLR0_EL0), undef_access },
1738 { SYS_DESC(SYS_AMCNTENSET0_EL0), undef_access },
1739 { SYS_DESC(SYS_AMCNTENCLR1_EL0), undef_access },
1740 { SYS_DESC(SYS_AMCNTENSET1_EL0), undef_access },
1741 AMU_AMEVCNTR0_EL0(0),
1742 AMU_AMEVCNTR0_EL0(1),
1743 AMU_AMEVCNTR0_EL0(2),
1744 AMU_AMEVCNTR0_EL0(3),
1745 AMU_AMEVCNTR0_EL0(4),
1746 AMU_AMEVCNTR0_EL0(5),
1747 AMU_AMEVCNTR0_EL0(6),
1748 AMU_AMEVCNTR0_EL0(7),
1749 AMU_AMEVCNTR0_EL0(8),
1750 AMU_AMEVCNTR0_EL0(9),
1751 AMU_AMEVCNTR0_EL0(10),
1752 AMU_AMEVCNTR0_EL0(11),
1753 AMU_AMEVCNTR0_EL0(12),
1754 AMU_AMEVCNTR0_EL0(13),
1755 AMU_AMEVCNTR0_EL0(14),
1756 AMU_AMEVCNTR0_EL0(15),
1757 AMU_AMEVTYPER0_EL0(0),
1758 AMU_AMEVTYPER0_EL0(1),
1759 AMU_AMEVTYPER0_EL0(2),
1760 AMU_AMEVTYPER0_EL0(3),
1761 AMU_AMEVTYPER0_EL0(4),
1762 AMU_AMEVTYPER0_EL0(5),
1763 AMU_AMEVTYPER0_EL0(6),
1764 AMU_AMEVTYPER0_EL0(7),
1765 AMU_AMEVTYPER0_EL0(8),
1766 AMU_AMEVTYPER0_EL0(9),
1767 AMU_AMEVTYPER0_EL0(10),
1768 AMU_AMEVTYPER0_EL0(11),
1769 AMU_AMEVTYPER0_EL0(12),
1770 AMU_AMEVTYPER0_EL0(13),
1771 AMU_AMEVTYPER0_EL0(14),
1772 AMU_AMEVTYPER0_EL0(15),
1773 AMU_AMEVCNTR1_EL0(0),
1774 AMU_AMEVCNTR1_EL0(1),
1775 AMU_AMEVCNTR1_EL0(2),
1776 AMU_AMEVCNTR1_EL0(3),
1777 AMU_AMEVCNTR1_EL0(4),
1778 AMU_AMEVCNTR1_EL0(5),
1779 AMU_AMEVCNTR1_EL0(6),
1780 AMU_AMEVCNTR1_EL0(7),
1781 AMU_AMEVCNTR1_EL0(8),
1782 AMU_AMEVCNTR1_EL0(9),
1783 AMU_AMEVCNTR1_EL0(10),
1784 AMU_AMEVCNTR1_EL0(11),
1785 AMU_AMEVCNTR1_EL0(12),
1786 AMU_AMEVCNTR1_EL0(13),
1787 AMU_AMEVCNTR1_EL0(14),
1788 AMU_AMEVCNTR1_EL0(15),
1789 AMU_AMEVTYPER1_EL0(0),
1790 AMU_AMEVTYPER1_EL0(1),
1791 AMU_AMEVTYPER1_EL0(2),
1792 AMU_AMEVTYPER1_EL0(3),
1793 AMU_AMEVTYPER1_EL0(4),
1794 AMU_AMEVTYPER1_EL0(5),
1795 AMU_AMEVTYPER1_EL0(6),
1796 AMU_AMEVTYPER1_EL0(7),
1797 AMU_AMEVTYPER1_EL0(8),
1798 AMU_AMEVTYPER1_EL0(9),
1799 AMU_AMEVTYPER1_EL0(10),
1800 AMU_AMEVTYPER1_EL0(11),
1801 AMU_AMEVTYPER1_EL0(12),
1802 AMU_AMEVTYPER1_EL0(13),
1803 AMU_AMEVTYPER1_EL0(14),
1804 AMU_AMEVTYPER1_EL0(15),
1805
1806 { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
1807 { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
1808 { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
1809
1810 /* PMEVCNTRn_EL0 */
1811 PMU_PMEVCNTR_EL0(0),
1812 PMU_PMEVCNTR_EL0(1),
1813 PMU_PMEVCNTR_EL0(2),
1814 PMU_PMEVCNTR_EL0(3),
1815 PMU_PMEVCNTR_EL0(4),
1816 PMU_PMEVCNTR_EL0(5),
1817 PMU_PMEVCNTR_EL0(6),
1818 PMU_PMEVCNTR_EL0(7),
1819 PMU_PMEVCNTR_EL0(8),
1820 PMU_PMEVCNTR_EL0(9),
1821 PMU_PMEVCNTR_EL0(10),
1822 PMU_PMEVCNTR_EL0(11),
1823 PMU_PMEVCNTR_EL0(12),
1824 PMU_PMEVCNTR_EL0(13),
1825 PMU_PMEVCNTR_EL0(14),
1826 PMU_PMEVCNTR_EL0(15),
1827 PMU_PMEVCNTR_EL0(16),
1828 PMU_PMEVCNTR_EL0(17),
1829 PMU_PMEVCNTR_EL0(18),
1830 PMU_PMEVCNTR_EL0(19),
1831 PMU_PMEVCNTR_EL0(20),
1832 PMU_PMEVCNTR_EL0(21),
1833 PMU_PMEVCNTR_EL0(22),
1834 PMU_PMEVCNTR_EL0(23),
1835 PMU_PMEVCNTR_EL0(24),
1836 PMU_PMEVCNTR_EL0(25),
1837 PMU_PMEVCNTR_EL0(26),
1838 PMU_PMEVCNTR_EL0(27),
1839 PMU_PMEVCNTR_EL0(28),
1840 PMU_PMEVCNTR_EL0(29),
1841 PMU_PMEVCNTR_EL0(30),
1842 /* PMEVTYPERn_EL0 */
1843 PMU_PMEVTYPER_EL0(0),
1844 PMU_PMEVTYPER_EL0(1),
1845 PMU_PMEVTYPER_EL0(2),
1846 PMU_PMEVTYPER_EL0(3),
1847 PMU_PMEVTYPER_EL0(4),
1848 PMU_PMEVTYPER_EL0(5),
1849 PMU_PMEVTYPER_EL0(6),
1850 PMU_PMEVTYPER_EL0(7),
1851 PMU_PMEVTYPER_EL0(8),
1852 PMU_PMEVTYPER_EL0(9),
1853 PMU_PMEVTYPER_EL0(10),
1854 PMU_PMEVTYPER_EL0(11),
1855 PMU_PMEVTYPER_EL0(12),
1856 PMU_PMEVTYPER_EL0(13),
1857 PMU_PMEVTYPER_EL0(14),
1858 PMU_PMEVTYPER_EL0(15),
1859 PMU_PMEVTYPER_EL0(16),
1860 PMU_PMEVTYPER_EL0(17),
1861 PMU_PMEVTYPER_EL0(18),
1862 PMU_PMEVTYPER_EL0(19),
1863 PMU_PMEVTYPER_EL0(20),
1864 PMU_PMEVTYPER_EL0(21),
1865 PMU_PMEVTYPER_EL0(22),
1866 PMU_PMEVTYPER_EL0(23),
1867 PMU_PMEVTYPER_EL0(24),
1868 PMU_PMEVTYPER_EL0(25),
1869 PMU_PMEVTYPER_EL0(26),
1870 PMU_PMEVTYPER_EL0(27),
1871 PMU_PMEVTYPER_EL0(28),
1872 PMU_PMEVTYPER_EL0(29),
1873 PMU_PMEVTYPER_EL0(30),
1874 /*
1875 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
1876 * in 32bit mode. Here we choose to reset it as zero for consistency.
1877 */
1878 { PMU_SYS_REG(SYS_PMCCFILTR_EL0), .access = access_pmu_evtyper,
1879 .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
1880
1881 { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
1882 { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
1883 { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
1884 };
1885
trap_dbgdidr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1886 static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
1887 struct sys_reg_params *p,
1888 const struct sys_reg_desc *r)
1889 {
1890 if (p->is_write) {
1891 return ignore_write(vcpu, p);
1892 } else {
1893 u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1894 u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1895 u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT);
1896
1897 p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
1898 (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
1899 (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
1900 | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12));
1901 return true;
1902 }
1903 }
1904
1905 /*
1906 * AArch32 debug register mappings
1907 *
1908 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
1909 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
1910 *
1911 * None of the other registers share their location, so treat them as
1912 * if they were 64bit.
1913 */
1914 #define DBG_BCR_BVR_WCR_WVR(n) \
1915 /* DBGBVRn */ \
1916 { AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
1917 /* DBGBCRn */ \
1918 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
1919 /* DBGWVRn */ \
1920 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
1921 /* DBGWCRn */ \
1922 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
1923
1924 #define DBGBXVR(n) \
1925 { AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_bvr, NULL, n }
1926
1927 /*
1928 * Trapped cp14 registers. We generally ignore most of the external
1929 * debug, on the principle that they don't really make sense to a
1930 * guest. Revisit this one day, would this principle change.
1931 */
1932 static const struct sys_reg_desc cp14_regs[] = {
1933 /* DBGDIDR */
1934 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr },
1935 /* DBGDTRRXext */
1936 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
1937
1938 DBG_BCR_BVR_WCR_WVR(0),
1939 /* DBGDSCRint */
1940 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
1941 DBG_BCR_BVR_WCR_WVR(1),
1942 /* DBGDCCINT */
1943 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug_regs, NULL, MDCCINT_EL1 },
1944 /* DBGDSCRext */
1945 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug_regs, NULL, MDSCR_EL1 },
1946 DBG_BCR_BVR_WCR_WVR(2),
1947 /* DBGDTR[RT]Xint */
1948 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
1949 /* DBGDTR[RT]Xext */
1950 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
1951 DBG_BCR_BVR_WCR_WVR(3),
1952 DBG_BCR_BVR_WCR_WVR(4),
1953 DBG_BCR_BVR_WCR_WVR(5),
1954 /* DBGWFAR */
1955 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
1956 /* DBGOSECCR */
1957 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
1958 DBG_BCR_BVR_WCR_WVR(6),
1959 /* DBGVCR */
1960 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug_regs, NULL, DBGVCR32_EL2 },
1961 DBG_BCR_BVR_WCR_WVR(7),
1962 DBG_BCR_BVR_WCR_WVR(8),
1963 DBG_BCR_BVR_WCR_WVR(9),
1964 DBG_BCR_BVR_WCR_WVR(10),
1965 DBG_BCR_BVR_WCR_WVR(11),
1966 DBG_BCR_BVR_WCR_WVR(12),
1967 DBG_BCR_BVR_WCR_WVR(13),
1968 DBG_BCR_BVR_WCR_WVR(14),
1969 DBG_BCR_BVR_WCR_WVR(15),
1970
1971 /* DBGDRAR (32bit) */
1972 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
1973
1974 DBGBXVR(0),
1975 /* DBGOSLAR */
1976 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_oslar_el1 },
1977 DBGBXVR(1),
1978 /* DBGOSLSR */
1979 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1, NULL, OSLSR_EL1 },
1980 DBGBXVR(2),
1981 DBGBXVR(3),
1982 /* DBGOSDLR */
1983 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
1984 DBGBXVR(4),
1985 /* DBGPRCR */
1986 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
1987 DBGBXVR(5),
1988 DBGBXVR(6),
1989 DBGBXVR(7),
1990 DBGBXVR(8),
1991 DBGBXVR(9),
1992 DBGBXVR(10),
1993 DBGBXVR(11),
1994 DBGBXVR(12),
1995 DBGBXVR(13),
1996 DBGBXVR(14),
1997 DBGBXVR(15),
1998
1999 /* DBGDSAR (32bit) */
2000 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
2001
2002 /* DBGDEVID2 */
2003 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
2004 /* DBGDEVID1 */
2005 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
2006 /* DBGDEVID */
2007 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
2008 /* DBGCLAIMSET */
2009 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
2010 /* DBGCLAIMCLR */
2011 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
2012 /* DBGAUTHSTATUS */
2013 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
2014 };
2015
2016 /* Trapped cp14 64bit registers */
2017 static const struct sys_reg_desc cp14_64_regs[] = {
2018 /* DBGDRAR (64bit) */
2019 { Op1( 0), CRm( 1), .access = trap_raz_wi },
2020
2021 /* DBGDSAR (64bit) */
2022 { Op1( 0), CRm( 2), .access = trap_raz_wi },
2023 };
2024
2025 #define CP15_PMU_SYS_REG(_map, _Op1, _CRn, _CRm, _Op2) \
2026 AA32(_map), \
2027 Op1(_Op1), CRn(_CRn), CRm(_CRm), Op2(_Op2), \
2028 .visibility = pmu_visibility
2029
2030 /* Macro to expand the PMEVCNTRn register */
2031 #define PMU_PMEVCNTR(n) \
2032 { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
2033 (0b1000 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
2034 .access = access_pmu_evcntr }
2035
2036 /* Macro to expand the PMEVTYPERn register */
2037 #define PMU_PMEVTYPER(n) \
2038 { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
2039 (0b1100 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
2040 .access = access_pmu_evtyper }
2041 /*
2042 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
2043 * depending on the way they are accessed (as a 32bit or a 64bit
2044 * register).
2045 */
2046 static const struct sys_reg_desc cp15_regs[] = {
2047 { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
2048 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, SCTLR_EL1 },
2049 /* ACTLR */
2050 { AA32(LO), Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr, NULL, ACTLR_EL1 },
2051 /* ACTLR2 */
2052 { AA32(HI), Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr, NULL, ACTLR_EL1 },
2053 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
2054 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, TTBR1_EL1 },
2055 /* TTBCR */
2056 { AA32(LO), Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, TCR_EL1 },
2057 /* TTBCR2 */
2058 { AA32(HI), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, TCR_EL1 },
2059 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, DACR32_EL2 },
2060 /* DFSR */
2061 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, ESR_EL1 },
2062 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, IFSR32_EL2 },
2063 /* ADFSR */
2064 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, AFSR0_EL1 },
2065 /* AIFSR */
2066 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, AFSR1_EL1 },
2067 /* DFAR */
2068 { AA32(LO), Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, FAR_EL1 },
2069 /* IFAR */
2070 { AA32(HI), Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, FAR_EL1 },
2071
2072 /*
2073 * DC{C,I,CI}SW operations:
2074 */
2075 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
2076 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
2077 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
2078
2079 /* PMU */
2080 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 0), .access = access_pmcr },
2081 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 1), .access = access_pmcnten },
2082 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 2), .access = access_pmcnten },
2083 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 3), .access = access_pmovs },
2084 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 4), .access = access_pmswinc },
2085 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 5), .access = access_pmselr },
2086 { CP15_PMU_SYS_REG(LO, 0, 9, 12, 6), .access = access_pmceid },
2087 { CP15_PMU_SYS_REG(LO, 0, 9, 12, 7), .access = access_pmceid },
2088 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 0), .access = access_pmu_evcntr },
2089 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 1), .access = access_pmu_evtyper },
2090 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 2), .access = access_pmu_evcntr },
2091 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 0), .access = access_pmuserenr },
2092 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 1), .access = access_pminten },
2093 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 2), .access = access_pminten },
2094 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 3), .access = access_pmovs },
2095 { CP15_PMU_SYS_REG(HI, 0, 9, 14, 4), .access = access_pmceid },
2096 { CP15_PMU_SYS_REG(HI, 0, 9, 14, 5), .access = access_pmceid },
2097 /* PMMIR */
2098 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 6), .access = trap_raz_wi },
2099
2100 /* PRRR/MAIR0 */
2101 { AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 },
2102 /* NMRR/MAIR1 */
2103 { AA32(HI), Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, MAIR_EL1 },
2104 /* AMAIR0 */
2105 { AA32(LO), Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, AMAIR_EL1 },
2106 /* AMAIR1 */
2107 { AA32(HI), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, AMAIR_EL1 },
2108
2109 /* ICC_SRE */
2110 { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
2111
2112 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, CONTEXTIDR_EL1 },
2113
2114 /* Arch Tmers */
2115 { SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
2116 { SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
2117
2118 /* PMEVCNTRn */
2119 PMU_PMEVCNTR(0),
2120 PMU_PMEVCNTR(1),
2121 PMU_PMEVCNTR(2),
2122 PMU_PMEVCNTR(3),
2123 PMU_PMEVCNTR(4),
2124 PMU_PMEVCNTR(5),
2125 PMU_PMEVCNTR(6),
2126 PMU_PMEVCNTR(7),
2127 PMU_PMEVCNTR(8),
2128 PMU_PMEVCNTR(9),
2129 PMU_PMEVCNTR(10),
2130 PMU_PMEVCNTR(11),
2131 PMU_PMEVCNTR(12),
2132 PMU_PMEVCNTR(13),
2133 PMU_PMEVCNTR(14),
2134 PMU_PMEVCNTR(15),
2135 PMU_PMEVCNTR(16),
2136 PMU_PMEVCNTR(17),
2137 PMU_PMEVCNTR(18),
2138 PMU_PMEVCNTR(19),
2139 PMU_PMEVCNTR(20),
2140 PMU_PMEVCNTR(21),
2141 PMU_PMEVCNTR(22),
2142 PMU_PMEVCNTR(23),
2143 PMU_PMEVCNTR(24),
2144 PMU_PMEVCNTR(25),
2145 PMU_PMEVCNTR(26),
2146 PMU_PMEVCNTR(27),
2147 PMU_PMEVCNTR(28),
2148 PMU_PMEVCNTR(29),
2149 PMU_PMEVCNTR(30),
2150 /* PMEVTYPERn */
2151 PMU_PMEVTYPER(0),
2152 PMU_PMEVTYPER(1),
2153 PMU_PMEVTYPER(2),
2154 PMU_PMEVTYPER(3),
2155 PMU_PMEVTYPER(4),
2156 PMU_PMEVTYPER(5),
2157 PMU_PMEVTYPER(6),
2158 PMU_PMEVTYPER(7),
2159 PMU_PMEVTYPER(8),
2160 PMU_PMEVTYPER(9),
2161 PMU_PMEVTYPER(10),
2162 PMU_PMEVTYPER(11),
2163 PMU_PMEVTYPER(12),
2164 PMU_PMEVTYPER(13),
2165 PMU_PMEVTYPER(14),
2166 PMU_PMEVTYPER(15),
2167 PMU_PMEVTYPER(16),
2168 PMU_PMEVTYPER(17),
2169 PMU_PMEVTYPER(18),
2170 PMU_PMEVTYPER(19),
2171 PMU_PMEVTYPER(20),
2172 PMU_PMEVTYPER(21),
2173 PMU_PMEVTYPER(22),
2174 PMU_PMEVTYPER(23),
2175 PMU_PMEVTYPER(24),
2176 PMU_PMEVTYPER(25),
2177 PMU_PMEVTYPER(26),
2178 PMU_PMEVTYPER(27),
2179 PMU_PMEVTYPER(28),
2180 PMU_PMEVTYPER(29),
2181 PMU_PMEVTYPER(30),
2182 /* PMCCFILTR */
2183 { CP15_PMU_SYS_REG(DIRECT, 0, 14, 15, 7), .access = access_pmu_evtyper },
2184
2185 { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
2186 { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
2187 { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, CSSELR_EL1 },
2188 };
2189
2190 static const struct sys_reg_desc cp15_64_regs[] = {
2191 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
2192 { CP15_PMU_SYS_REG(DIRECT, 0, 0, 9, 0), .access = access_pmu_evcntr },
2193 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
2194 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 },
2195 { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
2196 { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
2197 { SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer },
2198 };
2199
check_sysreg_table(const struct sys_reg_desc * table,unsigned int n,bool is_32)2200 static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
2201 bool is_32)
2202 {
2203 unsigned int i;
2204
2205 for (i = 0; i < n; i++) {
2206 if (!is_32 && table[i].reg && !table[i].reset) {
2207 kvm_err("sys_reg table %pS entry %d lacks reset\n", &table[i], i);
2208 return false;
2209 }
2210
2211 if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
2212 kvm_err("sys_reg table %pS entry %d out of order\n", &table[i - 1], i - 1);
2213 return false;
2214 }
2215 }
2216
2217 return true;
2218 }
2219
kvm_handle_cp14_load_store(struct kvm_vcpu * vcpu)2220 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu)
2221 {
2222 kvm_inject_undefined(vcpu);
2223 return 1;
2224 }
2225
perform_access(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * r)2226 static void perform_access(struct kvm_vcpu *vcpu,
2227 struct sys_reg_params *params,
2228 const struct sys_reg_desc *r)
2229 {
2230 trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
2231
2232 /* Check for regs disabled by runtime config */
2233 if (sysreg_hidden(vcpu, r)) {
2234 kvm_inject_undefined(vcpu);
2235 return;
2236 }
2237
2238 /*
2239 * Not having an accessor means that we have configured a trap
2240 * that we don't know how to handle. This certainly qualifies
2241 * as a gross bug that should be fixed right away.
2242 */
2243 BUG_ON(!r->access);
2244
2245 /* Skip instruction if instructed so */
2246 if (likely(r->access(vcpu, params, r)))
2247 kvm_incr_pc(vcpu);
2248 }
2249
2250 /*
2251 * emulate_cp -- tries to match a sys_reg access in a handling table, and
2252 * call the corresponding trap handler.
2253 *
2254 * @params: pointer to the descriptor of the access
2255 * @table: array of trap descriptors
2256 * @num: size of the trap descriptor array
2257 *
2258 * Return true if the access has been handled, false if not.
2259 */
emulate_cp(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * table,size_t num)2260 static bool emulate_cp(struct kvm_vcpu *vcpu,
2261 struct sys_reg_params *params,
2262 const struct sys_reg_desc *table,
2263 size_t num)
2264 {
2265 const struct sys_reg_desc *r;
2266
2267 if (!table)
2268 return false; /* Not handled */
2269
2270 r = find_reg(params, table, num);
2271
2272 if (r) {
2273 perform_access(vcpu, params, r);
2274 return true;
2275 }
2276
2277 /* Not handled */
2278 return false;
2279 }
2280
unhandled_cp_access(struct kvm_vcpu * vcpu,struct sys_reg_params * params)2281 static void unhandled_cp_access(struct kvm_vcpu *vcpu,
2282 struct sys_reg_params *params)
2283 {
2284 u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
2285 int cp = -1;
2286
2287 switch (esr_ec) {
2288 case ESR_ELx_EC_CP15_32:
2289 case ESR_ELx_EC_CP15_64:
2290 cp = 15;
2291 break;
2292 case ESR_ELx_EC_CP14_MR:
2293 case ESR_ELx_EC_CP14_64:
2294 cp = 14;
2295 break;
2296 default:
2297 WARN_ON(1);
2298 }
2299
2300 print_sys_reg_msg(params,
2301 "Unsupported guest CP%d access at: %08lx [%08lx]\n",
2302 cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2303 kvm_inject_undefined(vcpu);
2304 }
2305
2306 /**
2307 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
2308 * @vcpu: The VCPU pointer
2309 * @run: The kvm_run struct
2310 */
kvm_handle_cp_64(struct kvm_vcpu * vcpu,const struct sys_reg_desc * global,size_t nr_global)2311 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
2312 const struct sys_reg_desc *global,
2313 size_t nr_global)
2314 {
2315 struct sys_reg_params params;
2316 u64 esr = kvm_vcpu_get_esr(vcpu);
2317 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2318 int Rt2 = (esr >> 10) & 0x1f;
2319
2320 params.CRm = (esr >> 1) & 0xf;
2321 params.is_write = ((esr & 1) == 0);
2322
2323 params.Op0 = 0;
2324 params.Op1 = (esr >> 16) & 0xf;
2325 params.Op2 = 0;
2326 params.CRn = 0;
2327
2328 /*
2329 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
2330 * backends between AArch32 and AArch64, we get away with it.
2331 */
2332 if (params.is_write) {
2333 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
2334 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
2335 }
2336
2337 /*
2338 * If the table contains a handler, handle the
2339 * potential register operation in the case of a read and return
2340 * with success.
2341 */
2342 if (emulate_cp(vcpu, ¶ms, global, nr_global)) {
2343 /* Split up the value between registers for the read side */
2344 if (!params.is_write) {
2345 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
2346 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
2347 }
2348
2349 return 1;
2350 }
2351
2352 unhandled_cp_access(vcpu, ¶ms);
2353 return 1;
2354 }
2355
2356 static bool emulate_sys_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *params);
2357
2358 /*
2359 * The CP10 ID registers are architecturally mapped to AArch64 feature
2360 * registers. Abuse that fact so we can rely on the AArch64 handler for accesses
2361 * from AArch32.
2362 */
kvm_esr_cp10_id_to_sys64(u64 esr,struct sys_reg_params * params)2363 static bool kvm_esr_cp10_id_to_sys64(u64 esr, struct sys_reg_params *params)
2364 {
2365 u8 reg_id = (esr >> 10) & 0xf;
2366 bool valid;
2367
2368 params->is_write = ((esr & 1) == 0);
2369 params->Op0 = 3;
2370 params->Op1 = 0;
2371 params->CRn = 0;
2372 params->CRm = 3;
2373
2374 /* CP10 ID registers are read-only */
2375 valid = !params->is_write;
2376
2377 switch (reg_id) {
2378 /* MVFR0 */
2379 case 0b0111:
2380 params->Op2 = 0;
2381 break;
2382 /* MVFR1 */
2383 case 0b0110:
2384 params->Op2 = 1;
2385 break;
2386 /* MVFR2 */
2387 case 0b0101:
2388 params->Op2 = 2;
2389 break;
2390 default:
2391 valid = false;
2392 }
2393
2394 if (valid)
2395 return true;
2396
2397 kvm_pr_unimpl("Unhandled cp10 register %s: %u\n",
2398 params->is_write ? "write" : "read", reg_id);
2399 return false;
2400 }
2401
2402 /**
2403 * kvm_handle_cp10_id() - Handles a VMRS trap on guest access to a 'Media and
2404 * VFP Register' from AArch32.
2405 * @vcpu: The vCPU pointer
2406 *
2407 * MVFR{0-2} are architecturally mapped to the AArch64 MVFR{0-2}_EL1 registers.
2408 * Work out the correct AArch64 system register encoding and reroute to the
2409 * AArch64 system register emulation.
2410 */
kvm_handle_cp10_id(struct kvm_vcpu * vcpu)2411 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu)
2412 {
2413 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2414 u64 esr = kvm_vcpu_get_esr(vcpu);
2415 struct sys_reg_params params;
2416
2417 /* UNDEF on any unhandled register access */
2418 if (!kvm_esr_cp10_id_to_sys64(esr, ¶ms)) {
2419 kvm_inject_undefined(vcpu);
2420 return 1;
2421 }
2422
2423 if (emulate_sys_reg(vcpu, ¶ms))
2424 vcpu_set_reg(vcpu, Rt, params.regval);
2425
2426 return 1;
2427 }
2428
2429 /**
2430 * kvm_emulate_cp15_id_reg() - Handles an MRC trap on a guest CP15 access where
2431 * CRn=0, which corresponds to the AArch32 feature
2432 * registers.
2433 * @vcpu: the vCPU pointer
2434 * @params: the system register access parameters.
2435 *
2436 * Our cp15 system register tables do not enumerate the AArch32 feature
2437 * registers. Conveniently, our AArch64 table does, and the AArch32 system
2438 * register encoding can be trivially remapped into the AArch64 for the feature
2439 * registers: Append op0=3, leaving op1, CRn, CRm, and op2 the same.
2440 *
2441 * According to DDI0487G.b G7.3.1, paragraph "Behavior of VMSAv8-32 32-bit
2442 * System registers with (coproc=0b1111, CRn==c0)", read accesses from this
2443 * range are either UNKNOWN or RES0. Rerouting remains architectural as we
2444 * treat undefined registers in this range as RAZ.
2445 */
kvm_emulate_cp15_id_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * params)2446 static int kvm_emulate_cp15_id_reg(struct kvm_vcpu *vcpu,
2447 struct sys_reg_params *params)
2448 {
2449 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2450
2451 /* Treat impossible writes to RO registers as UNDEFINED */
2452 if (params->is_write) {
2453 unhandled_cp_access(vcpu, params);
2454 return 1;
2455 }
2456
2457 params->Op0 = 3;
2458
2459 /*
2460 * All registers where CRm > 3 are known to be UNKNOWN/RAZ from AArch32.
2461 * Avoid conflicting with future expansion of AArch64 feature registers
2462 * and simply treat them as RAZ here.
2463 */
2464 if (params->CRm > 3)
2465 params->regval = 0;
2466 else if (!emulate_sys_reg(vcpu, params))
2467 return 1;
2468
2469 vcpu_set_reg(vcpu, Rt, params->regval);
2470 return 1;
2471 }
2472
2473 /**
2474 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
2475 * @vcpu: The VCPU pointer
2476 * @run: The kvm_run struct
2477 */
kvm_handle_cp_32(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * global,size_t nr_global)2478 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
2479 struct sys_reg_params *params,
2480 const struct sys_reg_desc *global,
2481 size_t nr_global)
2482 {
2483 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2484
2485 params->regval = vcpu_get_reg(vcpu, Rt);
2486
2487 if (emulate_cp(vcpu, params, global, nr_global)) {
2488 if (!params->is_write)
2489 vcpu_set_reg(vcpu, Rt, params->regval);
2490 return 1;
2491 }
2492
2493 unhandled_cp_access(vcpu, params);
2494 return 1;
2495 }
2496
kvm_handle_cp15_64(struct kvm_vcpu * vcpu)2497 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu)
2498 {
2499 return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs));
2500 }
2501
kvm_handle_cp15_32(struct kvm_vcpu * vcpu)2502 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu)
2503 {
2504 struct sys_reg_params params;
2505
2506 params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
2507
2508 /*
2509 * Certain AArch32 ID registers are handled by rerouting to the AArch64
2510 * system register table. Registers in the ID range where CRm=0 are
2511 * excluded from this scheme as they do not trivially map into AArch64
2512 * system register encodings.
2513 */
2514 if (params.Op1 == 0 && params.CRn == 0 && params.CRm)
2515 return kvm_emulate_cp15_id_reg(vcpu, ¶ms);
2516
2517 return kvm_handle_cp_32(vcpu, ¶ms, cp15_regs, ARRAY_SIZE(cp15_regs));
2518 }
2519
kvm_handle_cp14_64(struct kvm_vcpu * vcpu)2520 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu)
2521 {
2522 return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs));
2523 }
2524
kvm_handle_cp14_32(struct kvm_vcpu * vcpu)2525 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
2526 {
2527 struct sys_reg_params params;
2528
2529 params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
2530
2531 return kvm_handle_cp_32(vcpu, ¶ms, cp14_regs, ARRAY_SIZE(cp14_regs));
2532 }
2533
is_imp_def_sys_reg(struct sys_reg_params * params)2534 static bool is_imp_def_sys_reg(struct sys_reg_params *params)
2535 {
2536 // See ARM DDI 0487E.a, section D12.3.2
2537 return params->Op0 == 3 && (params->CRn & 0b1011) == 0b1011;
2538 }
2539
2540 /**
2541 * emulate_sys_reg - Emulate a guest access to an AArch64 system register
2542 * @vcpu: The VCPU pointer
2543 * @params: Decoded system register parameters
2544 *
2545 * Return: true if the system register access was successful, false otherwise.
2546 */
emulate_sys_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * params)2547 static bool emulate_sys_reg(struct kvm_vcpu *vcpu,
2548 struct sys_reg_params *params)
2549 {
2550 const struct sys_reg_desc *r;
2551
2552 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2553
2554 if (likely(r)) {
2555 perform_access(vcpu, params, r);
2556 return true;
2557 }
2558
2559 if (is_imp_def_sys_reg(params)) {
2560 kvm_inject_undefined(vcpu);
2561 } else {
2562 print_sys_reg_msg(params,
2563 "Unsupported guest sys_reg access at: %lx [%08lx]\n",
2564 *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2565 kvm_inject_undefined(vcpu);
2566 }
2567 return false;
2568 }
2569
2570 /**
2571 * kvm_reset_sys_regs - sets system registers to reset value
2572 * @vcpu: The VCPU pointer
2573 *
2574 * This function finds the right table above and sets the registers on the
2575 * virtual CPU struct to their architecturally defined reset values.
2576 */
kvm_reset_sys_regs(struct kvm_vcpu * vcpu)2577 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
2578 {
2579 unsigned long i;
2580
2581 for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++)
2582 if (sys_reg_descs[i].reset)
2583 sys_reg_descs[i].reset(vcpu, &sys_reg_descs[i]);
2584 }
2585
2586 /**
2587 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
2588 * @vcpu: The VCPU pointer
2589 */
kvm_handle_sys_reg(struct kvm_vcpu * vcpu)2590 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
2591 {
2592 struct sys_reg_params params;
2593 unsigned long esr = kvm_vcpu_get_esr(vcpu);
2594 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2595
2596 trace_kvm_handle_sys_reg(esr);
2597
2598 params = esr_sys64_to_params(esr);
2599 params.regval = vcpu_get_reg(vcpu, Rt);
2600
2601 if (!emulate_sys_reg(vcpu, ¶ms))
2602 return 1;
2603
2604 if (!params.is_write)
2605 vcpu_set_reg(vcpu, Rt, params.regval);
2606 return 1;
2607 }
2608
2609 /******************************************************************************
2610 * Userspace API
2611 *****************************************************************************/
2612
index_to_params(u64 id,struct sys_reg_params * params)2613 static bool index_to_params(u64 id, struct sys_reg_params *params)
2614 {
2615 switch (id & KVM_REG_SIZE_MASK) {
2616 case KVM_REG_SIZE_U64:
2617 /* Any unused index bits means it's not valid. */
2618 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
2619 | KVM_REG_ARM_COPROC_MASK
2620 | KVM_REG_ARM64_SYSREG_OP0_MASK
2621 | KVM_REG_ARM64_SYSREG_OP1_MASK
2622 | KVM_REG_ARM64_SYSREG_CRN_MASK
2623 | KVM_REG_ARM64_SYSREG_CRM_MASK
2624 | KVM_REG_ARM64_SYSREG_OP2_MASK))
2625 return false;
2626 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
2627 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
2628 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
2629 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
2630 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
2631 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
2632 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
2633 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
2634 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
2635 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
2636 return true;
2637 default:
2638 return false;
2639 }
2640 }
2641
find_reg_by_id(u64 id,struct sys_reg_params * params,const struct sys_reg_desc table[],unsigned int num)2642 const struct sys_reg_desc *find_reg_by_id(u64 id,
2643 struct sys_reg_params *params,
2644 const struct sys_reg_desc table[],
2645 unsigned int num)
2646 {
2647 if (!index_to_params(id, params))
2648 return NULL;
2649
2650 return find_reg(params, table, num);
2651 }
2652
2653 /* Decode an index value, and find the sys_reg_desc entry. */
index_to_sys_reg_desc(struct kvm_vcpu * vcpu,u64 id)2654 static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
2655 u64 id)
2656 {
2657 const struct sys_reg_desc *r;
2658 struct sys_reg_params params;
2659
2660 /* We only do sys_reg for now. */
2661 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
2662 return NULL;
2663
2664 if (!index_to_params(id, ¶ms))
2665 return NULL;
2666
2667 r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2668
2669 /* Not saved in the sys_reg array and not otherwise accessible? */
2670 if (r && !(r->reg || r->get_user))
2671 r = NULL;
2672
2673 return r;
2674 }
2675
2676 /*
2677 * These are the invariant sys_reg registers: we let the guest see the
2678 * host versions of these, so they're part of the guest state.
2679 *
2680 * A future CPU may provide a mechanism to present different values to
2681 * the guest, or a future kvm may trap them.
2682 */
2683
2684 #define FUNCTION_INVARIANT(reg) \
2685 static void get_##reg(struct kvm_vcpu *v, \
2686 const struct sys_reg_desc *r) \
2687 { \
2688 ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
2689 }
2690
2691 FUNCTION_INVARIANT(midr_el1)
FUNCTION_INVARIANT(revidr_el1)2692 FUNCTION_INVARIANT(revidr_el1)
2693 FUNCTION_INVARIANT(clidr_el1)
2694 FUNCTION_INVARIANT(aidr_el1)
2695
2696 static void get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
2697 {
2698 ((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
2699 }
2700
2701 /* ->val is filled in by kvm_sys_reg_table_init() */
2702 static struct sys_reg_desc invariant_sys_regs[] = {
2703 { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
2704 { SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
2705 { SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 },
2706 { SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
2707 { SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
2708 };
2709
reg_from_user(u64 * val,const void __user * uaddr,u64 id)2710 static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
2711 {
2712 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
2713 return -EFAULT;
2714 return 0;
2715 }
2716
reg_to_user(void __user * uaddr,const u64 * val,u64 id)2717 static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
2718 {
2719 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
2720 return -EFAULT;
2721 return 0;
2722 }
2723
get_invariant_sys_reg(u64 id,void __user * uaddr)2724 static int get_invariant_sys_reg(u64 id, void __user *uaddr)
2725 {
2726 struct sys_reg_params params;
2727 const struct sys_reg_desc *r;
2728
2729 r = find_reg_by_id(id, ¶ms, invariant_sys_regs,
2730 ARRAY_SIZE(invariant_sys_regs));
2731 if (!r)
2732 return -ENOENT;
2733
2734 return reg_to_user(uaddr, &r->val, id);
2735 }
2736
set_invariant_sys_reg(u64 id,void __user * uaddr)2737 static int set_invariant_sys_reg(u64 id, void __user *uaddr)
2738 {
2739 struct sys_reg_params params;
2740 const struct sys_reg_desc *r;
2741 int err;
2742 u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
2743
2744 r = find_reg_by_id(id, ¶ms, invariant_sys_regs,
2745 ARRAY_SIZE(invariant_sys_regs));
2746 if (!r)
2747 return -ENOENT;
2748
2749 err = reg_from_user(&val, uaddr, id);
2750 if (err)
2751 return err;
2752
2753 /* This is what we mean by invariant: you can't change it. */
2754 if (r->val != val)
2755 return -EINVAL;
2756
2757 return 0;
2758 }
2759
is_valid_cache(u32 val)2760 static bool is_valid_cache(u32 val)
2761 {
2762 u32 level, ctype;
2763
2764 if (val >= CSSELR_MAX)
2765 return false;
2766
2767 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
2768 level = (val >> 1);
2769 ctype = (cache_levels >> (level * 3)) & 7;
2770
2771 switch (ctype) {
2772 case 0: /* No cache */
2773 return false;
2774 case 1: /* Instruction cache only */
2775 return (val & 1);
2776 case 2: /* Data cache only */
2777 case 4: /* Unified cache */
2778 return !(val & 1);
2779 case 3: /* Separate instruction and data caches */
2780 return true;
2781 default: /* Reserved: we can't know instruction or data. */
2782 return false;
2783 }
2784 }
2785
demux_c15_get(u64 id,void __user * uaddr)2786 static int demux_c15_get(u64 id, void __user *uaddr)
2787 {
2788 u32 val;
2789 u32 __user *uval = uaddr;
2790
2791 /* Fail if we have unknown bits set. */
2792 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2793 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2794 return -ENOENT;
2795
2796 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2797 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2798 if (KVM_REG_SIZE(id) != 4)
2799 return -ENOENT;
2800 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2801 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2802 if (!is_valid_cache(val))
2803 return -ENOENT;
2804
2805 return put_user(get_ccsidr(val), uval);
2806 default:
2807 return -ENOENT;
2808 }
2809 }
2810
demux_c15_set(u64 id,void __user * uaddr)2811 static int demux_c15_set(u64 id, void __user *uaddr)
2812 {
2813 u32 val, newval;
2814 u32 __user *uval = uaddr;
2815
2816 /* Fail if we have unknown bits set. */
2817 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2818 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2819 return -ENOENT;
2820
2821 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2822 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2823 if (KVM_REG_SIZE(id) != 4)
2824 return -ENOENT;
2825 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2826 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2827 if (!is_valid_cache(val))
2828 return -ENOENT;
2829
2830 if (get_user(newval, uval))
2831 return -EFAULT;
2832
2833 /* This is also invariant: you can't change it. */
2834 if (newval != get_ccsidr(val))
2835 return -EINVAL;
2836 return 0;
2837 default:
2838 return -ENOENT;
2839 }
2840 }
2841
kvm_arm_sys_reg_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)2842 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2843 {
2844 const struct sys_reg_desc *r;
2845 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2846
2847 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2848 return demux_c15_get(reg->id, uaddr);
2849
2850 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2851 return -ENOENT;
2852
2853 r = index_to_sys_reg_desc(vcpu, reg->id);
2854 if (!r)
2855 return get_invariant_sys_reg(reg->id, uaddr);
2856
2857 /* Check for regs disabled by runtime config */
2858 if (sysreg_hidden(vcpu, r))
2859 return -ENOENT;
2860
2861 if (r->get_user)
2862 return (r->get_user)(vcpu, r, reg, uaddr);
2863
2864 return reg_to_user(uaddr, &__vcpu_sys_reg(vcpu, r->reg), reg->id);
2865 }
2866
kvm_arm_sys_reg_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)2867 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2868 {
2869 const struct sys_reg_desc *r;
2870 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2871
2872 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2873 return demux_c15_set(reg->id, uaddr);
2874
2875 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2876 return -ENOENT;
2877
2878 r = index_to_sys_reg_desc(vcpu, reg->id);
2879 if (!r)
2880 return set_invariant_sys_reg(reg->id, uaddr);
2881
2882 /* Check for regs disabled by runtime config */
2883 if (sysreg_hidden(vcpu, r))
2884 return -ENOENT;
2885
2886 if (r->set_user)
2887 return (r->set_user)(vcpu, r, reg, uaddr);
2888
2889 return reg_from_user(&__vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
2890 }
2891
num_demux_regs(void)2892 static unsigned int num_demux_regs(void)
2893 {
2894 unsigned int i, count = 0;
2895
2896 for (i = 0; i < CSSELR_MAX; i++)
2897 if (is_valid_cache(i))
2898 count++;
2899
2900 return count;
2901 }
2902
write_demux_regids(u64 __user * uindices)2903 static int write_demux_regids(u64 __user *uindices)
2904 {
2905 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
2906 unsigned int i;
2907
2908 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
2909 for (i = 0; i < CSSELR_MAX; i++) {
2910 if (!is_valid_cache(i))
2911 continue;
2912 if (put_user(val | i, uindices))
2913 return -EFAULT;
2914 uindices++;
2915 }
2916 return 0;
2917 }
2918
sys_reg_to_index(const struct sys_reg_desc * reg)2919 static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
2920 {
2921 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
2922 KVM_REG_ARM64_SYSREG |
2923 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
2924 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
2925 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
2926 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
2927 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
2928 }
2929
copy_reg_to_user(const struct sys_reg_desc * reg,u64 __user ** uind)2930 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
2931 {
2932 if (!*uind)
2933 return true;
2934
2935 if (put_user(sys_reg_to_index(reg), *uind))
2936 return false;
2937
2938 (*uind)++;
2939 return true;
2940 }
2941
walk_one_sys_reg(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 __user ** uind,unsigned int * total)2942 static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
2943 const struct sys_reg_desc *rd,
2944 u64 __user **uind,
2945 unsigned int *total)
2946 {
2947 /*
2948 * Ignore registers we trap but don't save,
2949 * and for which no custom user accessor is provided.
2950 */
2951 if (!(rd->reg || rd->get_user))
2952 return 0;
2953
2954 if (sysreg_hidden(vcpu, rd))
2955 return 0;
2956
2957 if (!copy_reg_to_user(rd, uind))
2958 return -EFAULT;
2959
2960 (*total)++;
2961 return 0;
2962 }
2963
2964 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
walk_sys_regs(struct kvm_vcpu * vcpu,u64 __user * uind)2965 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
2966 {
2967 const struct sys_reg_desc *i2, *end2;
2968 unsigned int total = 0;
2969 int err;
2970
2971 i2 = sys_reg_descs;
2972 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
2973
2974 while (i2 != end2) {
2975 err = walk_one_sys_reg(vcpu, i2++, &uind, &total);
2976 if (err)
2977 return err;
2978 }
2979 return total;
2980 }
2981
kvm_arm_num_sys_reg_descs(struct kvm_vcpu * vcpu)2982 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
2983 {
2984 return ARRAY_SIZE(invariant_sys_regs)
2985 + num_demux_regs()
2986 + walk_sys_regs(vcpu, (u64 __user *)NULL);
2987 }
2988
kvm_arm_copy_sys_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)2989 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
2990 {
2991 unsigned int i;
2992 int err;
2993
2994 /* Then give them all the invariant registers' indices. */
2995 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
2996 if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
2997 return -EFAULT;
2998 uindices++;
2999 }
3000
3001 err = walk_sys_regs(vcpu, uindices);
3002 if (err < 0)
3003 return err;
3004 uindices += err;
3005
3006 return write_demux_regids(uindices);
3007 }
3008
kvm_sys_reg_table_init(void)3009 int kvm_sys_reg_table_init(void)
3010 {
3011 bool valid = true;
3012 unsigned int i;
3013 struct sys_reg_desc clidr;
3014
3015 /* Make sure tables are unique and in order. */
3016 valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false);
3017 valid &= check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), true);
3018 valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), true);
3019 valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true);
3020 valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true);
3021 valid &= check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs), false);
3022
3023 if (!valid)
3024 return -EINVAL;
3025
3026 /* We abuse the reset function to overwrite the table itself. */
3027 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
3028 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
3029
3030 /*
3031 * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
3032 *
3033 * If software reads the Cache Type fields from Ctype1
3034 * upwards, once it has seen a value of 0b000, no caches
3035 * exist at further-out levels of the hierarchy. So, for
3036 * example, if Ctype3 is the first Cache Type field with a
3037 * value of 0b000, the values of Ctype4 to Ctype7 must be
3038 * ignored.
3039 */
3040 get_clidr_el1(NULL, &clidr); /* Ugly... */
3041 cache_levels = clidr.val;
3042 for (i = 0; i < 7; i++)
3043 if (((cache_levels >> (i*3)) & 7) == 0)
3044 break;
3045 /* Clear all higher bits. */
3046 cache_levels &= (1 << (i*3))-1;
3047
3048 return 0;
3049 }
3050