1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/kvm/coproc.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Authors: Rusty Russell <rusty@rustcorp.com.au>
9 * Christoffer Dall <c.dall@virtualopensystems.com>
10 */
11
12 #include <linux/bitfield.h>
13 #include <linux/bsearch.h>
14 #include <linux/kvm_host.h>
15 #include <linux/mm.h>
16 #include <linux/printk.h>
17 #include <linux/uaccess.h>
18
19 #include <asm/cacheflush.h>
20 #include <asm/cputype.h>
21 #include <asm/debug-monitors.h>
22 #include <asm/esr.h>
23 #include <asm/kvm_arm.h>
24 #include <asm/kvm_emulate.h>
25 #include <asm/kvm_hyp.h>
26 #include <asm/kvm_mmu.h>
27 #include <asm/perf_event.h>
28 #include <asm/sysreg.h>
29
30 #include <trace/events/kvm.h>
31
32 #include "sys_regs.h"
33
34 #include "trace.h"
35
36 /*
37 * For AArch32, we only take care of what is being trapped. Anything
38 * that has to do with init and userspace access has to go via the
39 * 64bit interface.
40 */
41
42 static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
43
read_from_write_only(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * r)44 static bool read_from_write_only(struct kvm_vcpu *vcpu,
45 struct sys_reg_params *params,
46 const struct sys_reg_desc *r)
47 {
48 WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
49 print_sys_reg_instr(params);
50 kvm_inject_undefined(vcpu);
51 return false;
52 }
53
write_to_read_only(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * r)54 static bool write_to_read_only(struct kvm_vcpu *vcpu,
55 struct sys_reg_params *params,
56 const struct sys_reg_desc *r)
57 {
58 WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
59 print_sys_reg_instr(params);
60 kvm_inject_undefined(vcpu);
61 return false;
62 }
63
vcpu_read_sys_reg(const struct kvm_vcpu * vcpu,int reg)64 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
65 {
66 u64 val = 0x8badf00d8badf00d;
67
68 if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) &&
69 __vcpu_read_sys_reg_from_cpu(reg, &val))
70 return val;
71
72 return __vcpu_sys_reg(vcpu, reg);
73 }
74
vcpu_write_sys_reg(struct kvm_vcpu * vcpu,u64 val,int reg)75 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
76 {
77 if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) &&
78 __vcpu_write_sys_reg_to_cpu(val, reg))
79 return;
80
81 __vcpu_sys_reg(vcpu, reg) = val;
82 }
83
84 /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
85 static u32 cache_levels;
86
87 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
88 #define CSSELR_MAX 14
89
90 /* Which cache CCSIDR represents depends on CSSELR value. */
get_ccsidr(u32 csselr)91 static u32 get_ccsidr(u32 csselr)
92 {
93 u32 ccsidr;
94
95 /* Make sure noone else changes CSSELR during this! */
96 local_irq_disable();
97 write_sysreg(csselr, csselr_el1);
98 isb();
99 ccsidr = read_sysreg(ccsidr_el1);
100 local_irq_enable();
101
102 return ccsidr;
103 }
104
105 /*
106 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
107 */
access_dcsw(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)108 static bool access_dcsw(struct kvm_vcpu *vcpu,
109 struct sys_reg_params *p,
110 const struct sys_reg_desc *r)
111 {
112 if (!p->is_write)
113 return read_from_write_only(vcpu, p, r);
114
115 /*
116 * Only track S/W ops if we don't have FWB. It still indicates
117 * that the guest is a bit broken (S/W operations should only
118 * be done by firmware, knowing that there is only a single
119 * CPU left in the system, and certainly not from non-secure
120 * software).
121 */
122 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
123 kvm_set_way_flush(vcpu);
124
125 return true;
126 }
127
get_access_mask(const struct sys_reg_desc * r,u64 * mask,u64 * shift)128 static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift)
129 {
130 switch (r->aarch32_map) {
131 case AA32_LO:
132 *mask = GENMASK_ULL(31, 0);
133 *shift = 0;
134 break;
135 case AA32_HI:
136 *mask = GENMASK_ULL(63, 32);
137 *shift = 32;
138 break;
139 default:
140 *mask = GENMASK_ULL(63, 0);
141 *shift = 0;
142 break;
143 }
144 }
145
146 /*
147 * Generic accessor for VM registers. Only called as long as HCR_TVM
148 * is set. If the guest enables the MMU, we stop trapping the VM
149 * sys_regs and leave it in complete control of the caches.
150 */
access_vm_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)151 static bool access_vm_reg(struct kvm_vcpu *vcpu,
152 struct sys_reg_params *p,
153 const struct sys_reg_desc *r)
154 {
155 bool was_enabled = vcpu_has_cache_enabled(vcpu);
156 u64 val, mask, shift;
157
158 BUG_ON(!p->is_write);
159
160 get_access_mask(r, &mask, &shift);
161
162 if (~mask) {
163 val = vcpu_read_sys_reg(vcpu, r->reg);
164 val &= ~mask;
165 } else {
166 val = 0;
167 }
168
169 val |= (p->regval & (mask >> shift)) << shift;
170 vcpu_write_sys_reg(vcpu, val, r->reg);
171
172 kvm_toggle_cache(vcpu, was_enabled);
173 return true;
174 }
175
access_actlr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)176 static bool access_actlr(struct kvm_vcpu *vcpu,
177 struct sys_reg_params *p,
178 const struct sys_reg_desc *r)
179 {
180 u64 mask, shift;
181
182 if (p->is_write)
183 return ignore_write(vcpu, p);
184
185 get_access_mask(r, &mask, &shift);
186 p->regval = (vcpu_read_sys_reg(vcpu, r->reg) & mask) >> shift;
187
188 return true;
189 }
190
191 /*
192 * Trap handler for the GICv3 SGI generation system register.
193 * Forward the request to the VGIC emulation.
194 * The cp15_64 code makes sure this automatically works
195 * for both AArch64 and AArch32 accesses.
196 */
access_gic_sgi(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)197 static bool access_gic_sgi(struct kvm_vcpu *vcpu,
198 struct sys_reg_params *p,
199 const struct sys_reg_desc *r)
200 {
201 bool g1;
202
203 if (!p->is_write)
204 return read_from_write_only(vcpu, p, r);
205
206 /*
207 * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
208 * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
209 * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
210 * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
211 * group.
212 */
213 if (p->Op0 == 0) { /* AArch32 */
214 switch (p->Op1) {
215 default: /* Keep GCC quiet */
216 case 0: /* ICC_SGI1R */
217 g1 = true;
218 break;
219 case 1: /* ICC_ASGI1R */
220 case 2: /* ICC_SGI0R */
221 g1 = false;
222 break;
223 }
224 } else { /* AArch64 */
225 switch (p->Op2) {
226 default: /* Keep GCC quiet */
227 case 5: /* ICC_SGI1R_EL1 */
228 g1 = true;
229 break;
230 case 6: /* ICC_ASGI1R_EL1 */
231 case 7: /* ICC_SGI0R_EL1 */
232 g1 = false;
233 break;
234 }
235 }
236
237 vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
238
239 return true;
240 }
241
access_gic_sre(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)242 static bool access_gic_sre(struct kvm_vcpu *vcpu,
243 struct sys_reg_params *p,
244 const struct sys_reg_desc *r)
245 {
246 if (p->is_write)
247 return ignore_write(vcpu, p);
248
249 p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
250 return true;
251 }
252
trap_raz_wi(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)253 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
254 struct sys_reg_params *p,
255 const struct sys_reg_desc *r)
256 {
257 if (p->is_write)
258 return ignore_write(vcpu, p);
259 else
260 return read_zero(vcpu, p);
261 }
262
263 /*
264 * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
265 * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
266 * system, these registers should UNDEF. LORID_EL1 being a RO register, we
267 * treat it separately.
268 */
trap_loregion(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)269 static bool trap_loregion(struct kvm_vcpu *vcpu,
270 struct sys_reg_params *p,
271 const struct sys_reg_desc *r)
272 {
273 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
274 u32 sr = reg_to_encoding(r);
275
276 if (!(val & (0xfUL << ID_AA64MMFR1_EL1_LO_SHIFT))) {
277 kvm_inject_undefined(vcpu);
278 return false;
279 }
280
281 if (p->is_write && sr == SYS_LORID_EL1)
282 return write_to_read_only(vcpu, p, r);
283
284 return trap_raz_wi(vcpu, p, r);
285 }
286
trap_oslar_el1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)287 static bool trap_oslar_el1(struct kvm_vcpu *vcpu,
288 struct sys_reg_params *p,
289 const struct sys_reg_desc *r)
290 {
291 u64 oslsr;
292
293 if (!p->is_write)
294 return read_from_write_only(vcpu, p, r);
295
296 /* Forward the OSLK bit to OSLSR */
297 oslsr = __vcpu_sys_reg(vcpu, OSLSR_EL1) & ~SYS_OSLSR_OSLK;
298 if (p->regval & SYS_OSLAR_OSLK)
299 oslsr |= SYS_OSLSR_OSLK;
300
301 __vcpu_sys_reg(vcpu, OSLSR_EL1) = oslsr;
302 return true;
303 }
304
trap_oslsr_el1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)305 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
306 struct sys_reg_params *p,
307 const struct sys_reg_desc *r)
308 {
309 if (p->is_write)
310 return write_to_read_only(vcpu, p, r);
311
312 p->regval = __vcpu_sys_reg(vcpu, r->reg);
313 return true;
314 }
315
set_oslsr_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)316 static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
317 u64 val)
318 {
319 /*
320 * The only modifiable bit is the OSLK bit. Refuse the write if
321 * userspace attempts to change any other bit in the register.
322 */
323 if ((val ^ rd->val) & ~SYS_OSLSR_OSLK)
324 return -EINVAL;
325
326 __vcpu_sys_reg(vcpu, rd->reg) = val;
327 return 0;
328 }
329
trap_dbgauthstatus_el1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)330 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
331 struct sys_reg_params *p,
332 const struct sys_reg_desc *r)
333 {
334 if (p->is_write) {
335 return ignore_write(vcpu, p);
336 } else {
337 p->regval = read_sysreg(dbgauthstatus_el1);
338 return true;
339 }
340 }
341
342 /*
343 * We want to avoid world-switching all the DBG registers all the
344 * time:
345 *
346 * - If we've touched any debug register, it is likely that we're
347 * going to touch more of them. It then makes sense to disable the
348 * traps and start doing the save/restore dance
349 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
350 * then mandatory to save/restore the registers, as the guest
351 * depends on them.
352 *
353 * For this, we use a DIRTY bit, indicating the guest has modified the
354 * debug registers, used as follow:
355 *
356 * On guest entry:
357 * - If the dirty bit is set (because we're coming back from trapping),
358 * disable the traps, save host registers, restore guest registers.
359 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
360 * set the dirty bit, disable the traps, save host registers,
361 * restore guest registers.
362 * - Otherwise, enable the traps
363 *
364 * On guest exit:
365 * - If the dirty bit is set, save guest registers, restore host
366 * registers and clear the dirty bit. This ensure that the host can
367 * now use the debug registers.
368 */
trap_debug_regs(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)369 static bool trap_debug_regs(struct kvm_vcpu *vcpu,
370 struct sys_reg_params *p,
371 const struct sys_reg_desc *r)
372 {
373 if (p->is_write) {
374 vcpu_write_sys_reg(vcpu, p->regval, r->reg);
375 vcpu_set_flag(vcpu, DEBUG_DIRTY);
376 } else {
377 p->regval = vcpu_read_sys_reg(vcpu, r->reg);
378 }
379
380 trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
381
382 return true;
383 }
384
385 /*
386 * reg_to_dbg/dbg_to_reg
387 *
388 * A 32 bit write to a debug register leave top bits alone
389 * A 32 bit read from a debug register only returns the bottom bits
390 *
391 * All writes will set the DEBUG_DIRTY flag to ensure the hyp code
392 * switches between host and guest values in future.
393 */
reg_to_dbg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd,u64 * dbg_reg)394 static void reg_to_dbg(struct kvm_vcpu *vcpu,
395 struct sys_reg_params *p,
396 const struct sys_reg_desc *rd,
397 u64 *dbg_reg)
398 {
399 u64 mask, shift, val;
400
401 get_access_mask(rd, &mask, &shift);
402
403 val = *dbg_reg;
404 val &= ~mask;
405 val |= (p->regval & (mask >> shift)) << shift;
406 *dbg_reg = val;
407
408 vcpu_set_flag(vcpu, DEBUG_DIRTY);
409 }
410
dbg_to_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd,u64 * dbg_reg)411 static void dbg_to_reg(struct kvm_vcpu *vcpu,
412 struct sys_reg_params *p,
413 const struct sys_reg_desc *rd,
414 u64 *dbg_reg)
415 {
416 u64 mask, shift;
417
418 get_access_mask(rd, &mask, &shift);
419 p->regval = (*dbg_reg & mask) >> shift;
420 }
421
trap_bvr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd)422 static bool trap_bvr(struct kvm_vcpu *vcpu,
423 struct sys_reg_params *p,
424 const struct sys_reg_desc *rd)
425 {
426 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
427
428 if (p->is_write)
429 reg_to_dbg(vcpu, p, rd, dbg_reg);
430 else
431 dbg_to_reg(vcpu, p, rd, dbg_reg);
432
433 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
434
435 return true;
436 }
437
set_bvr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)438 static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
439 u64 val)
440 {
441 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = val;
442 return 0;
443 }
444
get_bvr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 * val)445 static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
446 u64 *val)
447 {
448 *val = vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
449 return 0;
450 }
451
reset_bvr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)452 static void reset_bvr(struct kvm_vcpu *vcpu,
453 const struct sys_reg_desc *rd)
454 {
455 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
456 }
457
trap_bcr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd)458 static bool trap_bcr(struct kvm_vcpu *vcpu,
459 struct sys_reg_params *p,
460 const struct sys_reg_desc *rd)
461 {
462 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
463
464 if (p->is_write)
465 reg_to_dbg(vcpu, p, rd, dbg_reg);
466 else
467 dbg_to_reg(vcpu, p, rd, dbg_reg);
468
469 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
470
471 return true;
472 }
473
set_bcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)474 static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
475 u64 val)
476 {
477 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = val;
478 return 0;
479 }
480
get_bcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 * val)481 static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
482 u64 *val)
483 {
484 *val = vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
485 return 0;
486 }
487
reset_bcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)488 static void reset_bcr(struct kvm_vcpu *vcpu,
489 const struct sys_reg_desc *rd)
490 {
491 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
492 }
493
trap_wvr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd)494 static bool trap_wvr(struct kvm_vcpu *vcpu,
495 struct sys_reg_params *p,
496 const struct sys_reg_desc *rd)
497 {
498 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
499
500 if (p->is_write)
501 reg_to_dbg(vcpu, p, rd, dbg_reg);
502 else
503 dbg_to_reg(vcpu, p, rd, dbg_reg);
504
505 trace_trap_reg(__func__, rd->CRm, p->is_write,
506 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
507
508 return true;
509 }
510
set_wvr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)511 static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
512 u64 val)
513 {
514 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = val;
515 return 0;
516 }
517
get_wvr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 * val)518 static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
519 u64 *val)
520 {
521 *val = vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
522 return 0;
523 }
524
reset_wvr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)525 static void reset_wvr(struct kvm_vcpu *vcpu,
526 const struct sys_reg_desc *rd)
527 {
528 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
529 }
530
trap_wcr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd)531 static bool trap_wcr(struct kvm_vcpu *vcpu,
532 struct sys_reg_params *p,
533 const struct sys_reg_desc *rd)
534 {
535 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
536
537 if (p->is_write)
538 reg_to_dbg(vcpu, p, rd, dbg_reg);
539 else
540 dbg_to_reg(vcpu, p, rd, dbg_reg);
541
542 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
543
544 return true;
545 }
546
set_wcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)547 static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
548 u64 val)
549 {
550 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = val;
551 return 0;
552 }
553
get_wcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 * val)554 static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
555 u64 *val)
556 {
557 *val = vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
558 return 0;
559 }
560
reset_wcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)561 static void reset_wcr(struct kvm_vcpu *vcpu,
562 const struct sys_reg_desc *rd)
563 {
564 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
565 }
566
reset_amair_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)567 static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
568 {
569 u64 amair = read_sysreg(amair_el1);
570 vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
571 }
572
reset_actlr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)573 static void reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
574 {
575 u64 actlr = read_sysreg(actlr_el1);
576 vcpu_write_sys_reg(vcpu, actlr, ACTLR_EL1);
577 }
578
reset_mpidr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)579 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
580 {
581 u64 mpidr;
582
583 /*
584 * Map the vcpu_id into the first three affinity level fields of
585 * the MPIDR. We limit the number of VCPUs in level 0 due to a
586 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
587 * of the GICv3 to be able to address each CPU directly when
588 * sending IPIs.
589 */
590 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
591 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
592 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
593 vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
594 }
595
pmu_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)596 static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
597 const struct sys_reg_desc *r)
598 {
599 if (kvm_vcpu_has_pmu(vcpu))
600 return 0;
601
602 return REG_HIDDEN;
603 }
604
reset_pmu_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)605 static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
606 {
607 u64 n, mask = BIT(ARMV8_PMU_CYCLE_IDX);
608
609 /* No PMU available, any PMU reg may UNDEF... */
610 if (!kvm_arm_support_pmu_v3())
611 return;
612
613 n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
614 n &= ARMV8_PMU_PMCR_N_MASK;
615 if (n)
616 mask |= GENMASK(n - 1, 0);
617
618 reset_unknown(vcpu, r);
619 __vcpu_sys_reg(vcpu, r->reg) &= mask;
620 }
621
reset_pmevcntr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)622 static void reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
623 {
624 reset_unknown(vcpu, r);
625 __vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0);
626 }
627
reset_pmevtyper(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)628 static void reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
629 {
630 reset_unknown(vcpu, r);
631 __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_EVTYPE_MASK;
632 }
633
reset_pmselr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)634 static void reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
635 {
636 reset_unknown(vcpu, r);
637 __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK;
638 }
639
reset_pmcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)640 static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
641 {
642 u64 pmcr, val;
643
644 /* No PMU available, PMCR_EL0 may UNDEF... */
645 if (!kvm_arm_support_pmu_v3())
646 return;
647
648 pmcr = read_sysreg(pmcr_el0);
649 /*
650 * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
651 * except PMCR.E resetting to zero.
652 */
653 val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
654 | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
655 if (!kvm_supports_32bit_el0())
656 val |= ARMV8_PMU_PMCR_LC;
657 __vcpu_sys_reg(vcpu, r->reg) = val;
658 }
659
check_pmu_access_disabled(struct kvm_vcpu * vcpu,u64 flags)660 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
661 {
662 u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
663 bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
664
665 if (!enabled)
666 kvm_inject_undefined(vcpu);
667
668 return !enabled;
669 }
670
pmu_access_el0_disabled(struct kvm_vcpu * vcpu)671 static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
672 {
673 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
674 }
675
pmu_write_swinc_el0_disabled(struct kvm_vcpu * vcpu)676 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
677 {
678 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
679 }
680
pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu * vcpu)681 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
682 {
683 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
684 }
685
pmu_access_event_counter_el0_disabled(struct kvm_vcpu * vcpu)686 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
687 {
688 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
689 }
690
access_pmcr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)691 static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
692 const struct sys_reg_desc *r)
693 {
694 u64 val;
695
696 if (pmu_access_el0_disabled(vcpu))
697 return false;
698
699 if (p->is_write) {
700 /* Only update writeable bits of PMCR */
701 val = __vcpu_sys_reg(vcpu, PMCR_EL0);
702 val &= ~ARMV8_PMU_PMCR_MASK;
703 val |= p->regval & ARMV8_PMU_PMCR_MASK;
704 if (!kvm_supports_32bit_el0())
705 val |= ARMV8_PMU_PMCR_LC;
706 __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
707 kvm_pmu_handle_pmcr(vcpu, val);
708 kvm_vcpu_pmu_restore_guest(vcpu);
709 } else {
710 /* PMCR.P & PMCR.C are RAZ */
711 val = __vcpu_sys_reg(vcpu, PMCR_EL0)
712 & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
713 p->regval = val;
714 }
715
716 return true;
717 }
718
access_pmselr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)719 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
720 const struct sys_reg_desc *r)
721 {
722 if (pmu_access_event_counter_el0_disabled(vcpu))
723 return false;
724
725 if (p->is_write)
726 __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
727 else
728 /* return PMSELR.SEL field */
729 p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
730 & ARMV8_PMU_COUNTER_MASK;
731
732 return true;
733 }
734
access_pmceid(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)735 static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
736 const struct sys_reg_desc *r)
737 {
738 u64 pmceid, mask, shift;
739
740 BUG_ON(p->is_write);
741
742 if (pmu_access_el0_disabled(vcpu))
743 return false;
744
745 get_access_mask(r, &mask, &shift);
746
747 pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1));
748 pmceid &= mask;
749 pmceid >>= shift;
750
751 p->regval = pmceid;
752
753 return true;
754 }
755
pmu_counter_idx_valid(struct kvm_vcpu * vcpu,u64 idx)756 static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
757 {
758 u64 pmcr, val;
759
760 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
761 val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
762 if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
763 kvm_inject_undefined(vcpu);
764 return false;
765 }
766
767 return true;
768 }
769
access_pmu_evcntr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)770 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
771 struct sys_reg_params *p,
772 const struct sys_reg_desc *r)
773 {
774 u64 idx = ~0UL;
775
776 if (r->CRn == 9 && r->CRm == 13) {
777 if (r->Op2 == 2) {
778 /* PMXEVCNTR_EL0 */
779 if (pmu_access_event_counter_el0_disabled(vcpu))
780 return false;
781
782 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
783 & ARMV8_PMU_COUNTER_MASK;
784 } else if (r->Op2 == 0) {
785 /* PMCCNTR_EL0 */
786 if (pmu_access_cycle_counter_el0_disabled(vcpu))
787 return false;
788
789 idx = ARMV8_PMU_CYCLE_IDX;
790 }
791 } else if (r->CRn == 0 && r->CRm == 9) {
792 /* PMCCNTR */
793 if (pmu_access_event_counter_el0_disabled(vcpu))
794 return false;
795
796 idx = ARMV8_PMU_CYCLE_IDX;
797 } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
798 /* PMEVCNTRn_EL0 */
799 if (pmu_access_event_counter_el0_disabled(vcpu))
800 return false;
801
802 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
803 }
804
805 /* Catch any decoding mistake */
806 WARN_ON(idx == ~0UL);
807
808 if (!pmu_counter_idx_valid(vcpu, idx))
809 return false;
810
811 if (p->is_write) {
812 if (pmu_access_el0_disabled(vcpu))
813 return false;
814
815 kvm_pmu_set_counter_value(vcpu, idx, p->regval);
816 } else {
817 p->regval = kvm_pmu_get_counter_value(vcpu, idx);
818 }
819
820 return true;
821 }
822
access_pmu_evtyper(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)823 static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
824 const struct sys_reg_desc *r)
825 {
826 u64 idx, reg;
827
828 if (pmu_access_el0_disabled(vcpu))
829 return false;
830
831 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
832 /* PMXEVTYPER_EL0 */
833 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
834 reg = PMEVTYPER0_EL0 + idx;
835 } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
836 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
837 if (idx == ARMV8_PMU_CYCLE_IDX)
838 reg = PMCCFILTR_EL0;
839 else
840 /* PMEVTYPERn_EL0 */
841 reg = PMEVTYPER0_EL0 + idx;
842 } else {
843 BUG();
844 }
845
846 if (!pmu_counter_idx_valid(vcpu, idx))
847 return false;
848
849 if (p->is_write) {
850 kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
851 __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
852 kvm_vcpu_pmu_restore_guest(vcpu);
853 } else {
854 p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
855 }
856
857 return true;
858 }
859
access_pmcnten(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)860 static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
861 const struct sys_reg_desc *r)
862 {
863 u64 val, mask;
864
865 if (pmu_access_el0_disabled(vcpu))
866 return false;
867
868 mask = kvm_pmu_valid_counter_mask(vcpu);
869 if (p->is_write) {
870 val = p->regval & mask;
871 if (r->Op2 & 0x1) {
872 /* accessing PMCNTENSET_EL0 */
873 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
874 kvm_pmu_enable_counter_mask(vcpu, val);
875 kvm_vcpu_pmu_restore_guest(vcpu);
876 } else {
877 /* accessing PMCNTENCLR_EL0 */
878 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
879 kvm_pmu_disable_counter_mask(vcpu, val);
880 }
881 } else {
882 p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
883 }
884
885 return true;
886 }
887
access_pminten(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)888 static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
889 const struct sys_reg_desc *r)
890 {
891 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
892
893 if (check_pmu_access_disabled(vcpu, 0))
894 return false;
895
896 if (p->is_write) {
897 u64 val = p->regval & mask;
898
899 if (r->Op2 & 0x1)
900 /* accessing PMINTENSET_EL1 */
901 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
902 else
903 /* accessing PMINTENCLR_EL1 */
904 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
905 } else {
906 p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
907 }
908
909 return true;
910 }
911
access_pmovs(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)912 static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
913 const struct sys_reg_desc *r)
914 {
915 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
916
917 if (pmu_access_el0_disabled(vcpu))
918 return false;
919
920 if (p->is_write) {
921 if (r->CRm & 0x2)
922 /* accessing PMOVSSET_EL0 */
923 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
924 else
925 /* accessing PMOVSCLR_EL0 */
926 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
927 } else {
928 p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
929 }
930
931 return true;
932 }
933
access_pmswinc(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)934 static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
935 const struct sys_reg_desc *r)
936 {
937 u64 mask;
938
939 if (!p->is_write)
940 return read_from_write_only(vcpu, p, r);
941
942 if (pmu_write_swinc_el0_disabled(vcpu))
943 return false;
944
945 mask = kvm_pmu_valid_counter_mask(vcpu);
946 kvm_pmu_software_increment(vcpu, p->regval & mask);
947 return true;
948 }
949
access_pmuserenr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)950 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
951 const struct sys_reg_desc *r)
952 {
953 if (p->is_write) {
954 if (!vcpu_mode_priv(vcpu)) {
955 kvm_inject_undefined(vcpu);
956 return false;
957 }
958
959 __vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
960 p->regval & ARMV8_PMU_USERENR_MASK;
961 } else {
962 p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
963 & ARMV8_PMU_USERENR_MASK;
964 }
965
966 return true;
967 }
968
969 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
970 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
971 { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
972 trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \
973 { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
974 trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \
975 { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
976 trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \
977 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
978 trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
979
980 #define PMU_SYS_REG(r) \
981 SYS_DESC(r), .reset = reset_pmu_reg, .visibility = pmu_visibility
982
983 /* Macro to expand the PMEVCNTRn_EL0 register */
984 #define PMU_PMEVCNTR_EL0(n) \
985 { PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \
986 .reset = reset_pmevcntr, \
987 .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
988
989 /* Macro to expand the PMEVTYPERn_EL0 register */
990 #define PMU_PMEVTYPER_EL0(n) \
991 { PMU_SYS_REG(SYS_PMEVTYPERn_EL0(n)), \
992 .reset = reset_pmevtyper, \
993 .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
994
undef_access(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)995 static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
996 const struct sys_reg_desc *r)
997 {
998 kvm_inject_undefined(vcpu);
999
1000 return false;
1001 }
1002
1003 /* Macro to expand the AMU counter and type registers*/
1004 #define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access }
1005 #define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access }
1006 #define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access }
1007 #define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access }
1008
ptrauth_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)1009 static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
1010 const struct sys_reg_desc *rd)
1011 {
1012 return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN;
1013 }
1014
1015 /*
1016 * If we land here on a PtrAuth access, that is because we didn't
1017 * fixup the access on exit by allowing the PtrAuth sysregs. The only
1018 * way this happens is when the guest does not have PtrAuth support
1019 * enabled.
1020 */
1021 #define __PTRAUTH_KEY(k) \
1022 { SYS_DESC(SYS_## k), undef_access, reset_unknown, k, \
1023 .visibility = ptrauth_visibility}
1024
1025 #define PTRAUTH_KEY(k) \
1026 __PTRAUTH_KEY(k ## KEYLO_EL1), \
1027 __PTRAUTH_KEY(k ## KEYHI_EL1)
1028
access_arch_timer(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1029 static bool access_arch_timer(struct kvm_vcpu *vcpu,
1030 struct sys_reg_params *p,
1031 const struct sys_reg_desc *r)
1032 {
1033 enum kvm_arch_timers tmr;
1034 enum kvm_arch_timer_regs treg;
1035 u64 reg = reg_to_encoding(r);
1036
1037 switch (reg) {
1038 case SYS_CNTP_TVAL_EL0:
1039 case SYS_AARCH32_CNTP_TVAL:
1040 tmr = TIMER_PTIMER;
1041 treg = TIMER_REG_TVAL;
1042 break;
1043 case SYS_CNTP_CTL_EL0:
1044 case SYS_AARCH32_CNTP_CTL:
1045 tmr = TIMER_PTIMER;
1046 treg = TIMER_REG_CTL;
1047 break;
1048 case SYS_CNTP_CVAL_EL0:
1049 case SYS_AARCH32_CNTP_CVAL:
1050 tmr = TIMER_PTIMER;
1051 treg = TIMER_REG_CVAL;
1052 break;
1053 default:
1054 BUG();
1055 }
1056
1057 if (p->is_write)
1058 kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
1059 else
1060 p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
1061
1062 return true;
1063 }
1064
1065 /* Read a sanitised cpufeature ID register by sys_reg_desc */
read_id_reg(const struct kvm_vcpu * vcpu,struct sys_reg_desc const * r)1066 static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r)
1067 {
1068 u32 id = reg_to_encoding(r);
1069 u64 val;
1070
1071 if (sysreg_visible_as_raz(vcpu, r))
1072 return 0;
1073
1074 val = read_sanitised_ftr_reg(id);
1075
1076 switch (id) {
1077 case SYS_ID_AA64PFR0_EL1:
1078 if (!vcpu_has_sve(vcpu))
1079 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE);
1080 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU);
1081 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2);
1082 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
1083 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3);
1084 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
1085 if (kvm_vgic_global_state.type == VGIC_V3) {
1086 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC);
1087 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), 1);
1088 }
1089 break;
1090 case SYS_ID_AA64PFR1_EL1:
1091 if (!kvm_has_mte(vcpu->kvm))
1092 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
1093
1094 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
1095 break;
1096 case SYS_ID_AA64ISAR1_EL1:
1097 if (!vcpu_has_ptrauth(vcpu))
1098 val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
1099 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
1100 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
1101 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
1102 break;
1103 case SYS_ID_AA64ISAR2_EL1:
1104 if (!vcpu_has_ptrauth(vcpu))
1105 val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
1106 ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
1107 if (!cpus_have_final_cap(ARM64_HAS_WFXT))
1108 val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
1109 break;
1110 case SYS_ID_AA64DFR0_EL1:
1111 /* Limit debug to ARMv8.0 */
1112 val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer);
1113 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), 6);
1114 /* Limit guests to PMUv3 for ARMv8.4 */
1115 val = cpuid_feature_cap_perfmon_field(val,
1116 ID_AA64DFR0_EL1_PMUVer_SHIFT,
1117 kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_EL1_PMUVer_V3P4 : 0);
1118 /* Hide SPE from guests */
1119 val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer);
1120 break;
1121 case SYS_ID_DFR0_EL1:
1122 /* Limit guests to PMUv3 for ARMv8.4 */
1123 val = cpuid_feature_cap_perfmon_field(val,
1124 ID_DFR0_PERFMON_SHIFT,
1125 kvm_vcpu_has_pmu(vcpu) ? ID_DFR0_PERFMON_8_4 : 0);
1126 break;
1127 }
1128
1129 return val;
1130 }
1131
id_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1132 static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
1133 const struct sys_reg_desc *r)
1134 {
1135 u32 id = reg_to_encoding(r);
1136
1137 switch (id) {
1138 case SYS_ID_AA64ZFR0_EL1:
1139 if (!vcpu_has_sve(vcpu))
1140 return REG_RAZ;
1141 break;
1142 }
1143
1144 return 0;
1145 }
1146
aa32_id_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1147 static unsigned int aa32_id_visibility(const struct kvm_vcpu *vcpu,
1148 const struct sys_reg_desc *r)
1149 {
1150 /*
1151 * AArch32 ID registers are UNKNOWN if AArch32 isn't implemented at any
1152 * EL. Promote to RAZ/WI in order to guarantee consistency between
1153 * systems.
1154 */
1155 if (!kvm_supports_32bit_el0())
1156 return REG_RAZ | REG_USER_WI;
1157
1158 return id_visibility(vcpu, r);
1159 }
1160
raz_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1161 static unsigned int raz_visibility(const struct kvm_vcpu *vcpu,
1162 const struct sys_reg_desc *r)
1163 {
1164 return REG_RAZ;
1165 }
1166
1167 /* cpufeature ID register access trap handlers */
1168
access_id_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1169 static bool access_id_reg(struct kvm_vcpu *vcpu,
1170 struct sys_reg_params *p,
1171 const struct sys_reg_desc *r)
1172 {
1173 if (p->is_write)
1174 return write_to_read_only(vcpu, p, r);
1175
1176 p->regval = read_id_reg(vcpu, r);
1177 return true;
1178 }
1179
1180 /* Visibility overrides for SVE-specific control registers */
sve_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)1181 static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
1182 const struct sys_reg_desc *rd)
1183 {
1184 if (vcpu_has_sve(vcpu))
1185 return 0;
1186
1187 return REG_HIDDEN;
1188 }
1189
set_id_aa64pfr0_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)1190 static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
1191 const struct sys_reg_desc *rd,
1192 u64 val)
1193 {
1194 u8 csv2, csv3;
1195
1196 /*
1197 * Allow AA64PFR0_EL1.CSV2 to be set from userspace as long as
1198 * it doesn't promise more than what is actually provided (the
1199 * guest could otherwise be covered in ectoplasmic residue).
1200 */
1201 csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV2_SHIFT);
1202 if (csv2 > 1 ||
1203 (csv2 && arm64_get_spectre_v2_state() != SPECTRE_UNAFFECTED))
1204 return -EINVAL;
1205
1206 /* Same thing for CSV3 */
1207 csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV3_SHIFT);
1208 if (csv3 > 1 ||
1209 (csv3 && arm64_get_meltdown_state() != SPECTRE_UNAFFECTED))
1210 return -EINVAL;
1211
1212 /* We can only differ with CSV[23], and anything else is an error */
1213 val ^= read_id_reg(vcpu, rd);
1214 val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
1215 ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
1216 if (val)
1217 return -EINVAL;
1218
1219 vcpu->kvm->arch.pfr0_csv2 = csv2;
1220 vcpu->kvm->arch.pfr0_csv3 = csv3;
1221
1222 return 0;
1223 }
1224
1225 /*
1226 * cpufeature ID register user accessors
1227 *
1228 * For now, these registers are immutable for userspace, so no values
1229 * are stored, and for set_id_reg() we don't allow the effective value
1230 * to be changed.
1231 */
get_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 * val)1232 static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1233 u64 *val)
1234 {
1235 *val = read_id_reg(vcpu, rd);
1236 return 0;
1237 }
1238
set_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)1239 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1240 u64 val)
1241 {
1242 /* This is what we mean by invariant: you can't change it. */
1243 if (val != read_id_reg(vcpu, rd))
1244 return -EINVAL;
1245
1246 return 0;
1247 }
1248
get_raz_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 * val)1249 static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1250 u64 *val)
1251 {
1252 *val = 0;
1253 return 0;
1254 }
1255
set_wi_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)1256 static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1257 u64 val)
1258 {
1259 return 0;
1260 }
1261
access_ctr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1262 static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1263 const struct sys_reg_desc *r)
1264 {
1265 if (p->is_write)
1266 return write_to_read_only(vcpu, p, r);
1267
1268 p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0);
1269 return true;
1270 }
1271
access_clidr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1272 static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1273 const struct sys_reg_desc *r)
1274 {
1275 if (p->is_write)
1276 return write_to_read_only(vcpu, p, r);
1277
1278 p->regval = read_sysreg(clidr_el1);
1279 return true;
1280 }
1281
access_csselr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1282 static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1283 const struct sys_reg_desc *r)
1284 {
1285 int reg = r->reg;
1286
1287 if (p->is_write)
1288 vcpu_write_sys_reg(vcpu, p->regval, reg);
1289 else
1290 p->regval = vcpu_read_sys_reg(vcpu, reg);
1291 return true;
1292 }
1293
access_ccsidr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1294 static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1295 const struct sys_reg_desc *r)
1296 {
1297 u32 csselr;
1298
1299 if (p->is_write)
1300 return write_to_read_only(vcpu, p, r);
1301
1302 csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
1303 p->regval = get_ccsidr(csselr);
1304
1305 /*
1306 * Guests should not be doing cache operations by set/way at all, and
1307 * for this reason, we trap them and attempt to infer the intent, so
1308 * that we can flush the entire guest's address space at the appropriate
1309 * time.
1310 * To prevent this trapping from causing performance problems, let's
1311 * expose the geometry of all data and unified caches (which are
1312 * guaranteed to be PIPT and thus non-aliasing) as 1 set and 1 way.
1313 * [If guests should attempt to infer aliasing properties from the
1314 * geometry (which is not permitted by the architecture), they would
1315 * only do so for virtually indexed caches.]
1316 */
1317 if (!(csselr & 1)) // data or unified cache
1318 p->regval &= ~GENMASK(27, 3);
1319 return true;
1320 }
1321
mte_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)1322 static unsigned int mte_visibility(const struct kvm_vcpu *vcpu,
1323 const struct sys_reg_desc *rd)
1324 {
1325 if (kvm_has_mte(vcpu->kvm))
1326 return 0;
1327
1328 return REG_HIDDEN;
1329 }
1330
1331 #define MTE_REG(name) { \
1332 SYS_DESC(SYS_##name), \
1333 .access = undef_access, \
1334 .reset = reset_unknown, \
1335 .reg = name, \
1336 .visibility = mte_visibility, \
1337 }
1338
1339 /* sys_reg_desc initialiser for known cpufeature ID registers */
1340 #define ID_SANITISED(name) { \
1341 SYS_DESC(SYS_##name), \
1342 .access = access_id_reg, \
1343 .get_user = get_id_reg, \
1344 .set_user = set_id_reg, \
1345 .visibility = id_visibility, \
1346 }
1347
1348 /* sys_reg_desc initialiser for known cpufeature ID registers */
1349 #define AA32_ID_SANITISED(name) { \
1350 SYS_DESC(SYS_##name), \
1351 .access = access_id_reg, \
1352 .get_user = get_id_reg, \
1353 .set_user = set_id_reg, \
1354 .visibility = aa32_id_visibility, \
1355 }
1356
1357 /*
1358 * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
1359 * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
1360 * (1 <= crm < 8, 0 <= Op2 < 8).
1361 */
1362 #define ID_UNALLOCATED(crm, op2) { \
1363 Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
1364 .access = access_id_reg, \
1365 .get_user = get_id_reg, \
1366 .set_user = set_id_reg, \
1367 .visibility = raz_visibility \
1368 }
1369
1370 /*
1371 * sys_reg_desc initialiser for known ID registers that we hide from guests.
1372 * For now, these are exposed just like unallocated ID regs: they appear
1373 * RAZ for the guest.
1374 */
1375 #define ID_HIDDEN(name) { \
1376 SYS_DESC(SYS_##name), \
1377 .access = access_id_reg, \
1378 .get_user = get_id_reg, \
1379 .set_user = set_id_reg, \
1380 .visibility = raz_visibility, \
1381 }
1382
1383 /*
1384 * Architected system registers.
1385 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
1386 *
1387 * Debug handling: We do trap most, if not all debug related system
1388 * registers. The implementation is good enough to ensure that a guest
1389 * can use these with minimal performance degradation. The drawback is
1390 * that we don't implement any of the external debug architecture.
1391 * This should be revisited if we ever encounter a more demanding
1392 * guest...
1393 */
1394 static const struct sys_reg_desc sys_reg_descs[] = {
1395 { SYS_DESC(SYS_DC_ISW), access_dcsw },
1396 { SYS_DESC(SYS_DC_CSW), access_dcsw },
1397 { SYS_DESC(SYS_DC_CISW), access_dcsw },
1398
1399 DBG_BCR_BVR_WCR_WVR_EL1(0),
1400 DBG_BCR_BVR_WCR_WVR_EL1(1),
1401 { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
1402 { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
1403 DBG_BCR_BVR_WCR_WVR_EL1(2),
1404 DBG_BCR_BVR_WCR_WVR_EL1(3),
1405 DBG_BCR_BVR_WCR_WVR_EL1(4),
1406 DBG_BCR_BVR_WCR_WVR_EL1(5),
1407 DBG_BCR_BVR_WCR_WVR_EL1(6),
1408 DBG_BCR_BVR_WCR_WVR_EL1(7),
1409 DBG_BCR_BVR_WCR_WVR_EL1(8),
1410 DBG_BCR_BVR_WCR_WVR_EL1(9),
1411 DBG_BCR_BVR_WCR_WVR_EL1(10),
1412 DBG_BCR_BVR_WCR_WVR_EL1(11),
1413 DBG_BCR_BVR_WCR_WVR_EL1(12),
1414 DBG_BCR_BVR_WCR_WVR_EL1(13),
1415 DBG_BCR_BVR_WCR_WVR_EL1(14),
1416 DBG_BCR_BVR_WCR_WVR_EL1(15),
1417
1418 { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
1419 { SYS_DESC(SYS_OSLAR_EL1), trap_oslar_el1 },
1420 { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1, reset_val, OSLSR_EL1,
1421 SYS_OSLSR_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, },
1422 { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
1423 { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
1424 { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
1425 { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
1426 { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
1427
1428 { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
1429 { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
1430 // DBGDTR[TR]X_EL0 share the same encoding
1431 { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
1432
1433 { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
1434
1435 { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
1436
1437 /*
1438 * ID regs: all ID_SANITISED() entries here must have corresponding
1439 * entries in arm64_ftr_regs[].
1440 */
1441
1442 /* AArch64 mappings of the AArch32 ID registers */
1443 /* CRm=1 */
1444 AA32_ID_SANITISED(ID_PFR0_EL1),
1445 AA32_ID_SANITISED(ID_PFR1_EL1),
1446 AA32_ID_SANITISED(ID_DFR0_EL1),
1447 ID_HIDDEN(ID_AFR0_EL1),
1448 AA32_ID_SANITISED(ID_MMFR0_EL1),
1449 AA32_ID_SANITISED(ID_MMFR1_EL1),
1450 AA32_ID_SANITISED(ID_MMFR2_EL1),
1451 AA32_ID_SANITISED(ID_MMFR3_EL1),
1452
1453 /* CRm=2 */
1454 AA32_ID_SANITISED(ID_ISAR0_EL1),
1455 AA32_ID_SANITISED(ID_ISAR1_EL1),
1456 AA32_ID_SANITISED(ID_ISAR2_EL1),
1457 AA32_ID_SANITISED(ID_ISAR3_EL1),
1458 AA32_ID_SANITISED(ID_ISAR4_EL1),
1459 AA32_ID_SANITISED(ID_ISAR5_EL1),
1460 AA32_ID_SANITISED(ID_MMFR4_EL1),
1461 AA32_ID_SANITISED(ID_ISAR6_EL1),
1462
1463 /* CRm=3 */
1464 AA32_ID_SANITISED(MVFR0_EL1),
1465 AA32_ID_SANITISED(MVFR1_EL1),
1466 AA32_ID_SANITISED(MVFR2_EL1),
1467 ID_UNALLOCATED(3,3),
1468 AA32_ID_SANITISED(ID_PFR2_EL1),
1469 ID_HIDDEN(ID_DFR1_EL1),
1470 AA32_ID_SANITISED(ID_MMFR5_EL1),
1471 ID_UNALLOCATED(3,7),
1472
1473 /* AArch64 ID registers */
1474 /* CRm=4 */
1475 { SYS_DESC(SYS_ID_AA64PFR0_EL1), .access = access_id_reg,
1476 .get_user = get_id_reg, .set_user = set_id_aa64pfr0_el1, },
1477 ID_SANITISED(ID_AA64PFR1_EL1),
1478 ID_UNALLOCATED(4,2),
1479 ID_UNALLOCATED(4,3),
1480 ID_SANITISED(ID_AA64ZFR0_EL1),
1481 ID_HIDDEN(ID_AA64SMFR0_EL1),
1482 ID_UNALLOCATED(4,6),
1483 ID_UNALLOCATED(4,7),
1484
1485 /* CRm=5 */
1486 ID_SANITISED(ID_AA64DFR0_EL1),
1487 ID_SANITISED(ID_AA64DFR1_EL1),
1488 ID_UNALLOCATED(5,2),
1489 ID_UNALLOCATED(5,3),
1490 ID_HIDDEN(ID_AA64AFR0_EL1),
1491 ID_HIDDEN(ID_AA64AFR1_EL1),
1492 ID_UNALLOCATED(5,6),
1493 ID_UNALLOCATED(5,7),
1494
1495 /* CRm=6 */
1496 ID_SANITISED(ID_AA64ISAR0_EL1),
1497 ID_SANITISED(ID_AA64ISAR1_EL1),
1498 ID_SANITISED(ID_AA64ISAR2_EL1),
1499 ID_UNALLOCATED(6,3),
1500 ID_UNALLOCATED(6,4),
1501 ID_UNALLOCATED(6,5),
1502 ID_UNALLOCATED(6,6),
1503 ID_UNALLOCATED(6,7),
1504
1505 /* CRm=7 */
1506 ID_SANITISED(ID_AA64MMFR0_EL1),
1507 ID_SANITISED(ID_AA64MMFR1_EL1),
1508 ID_SANITISED(ID_AA64MMFR2_EL1),
1509 ID_UNALLOCATED(7,3),
1510 ID_UNALLOCATED(7,4),
1511 ID_UNALLOCATED(7,5),
1512 ID_UNALLOCATED(7,6),
1513 ID_UNALLOCATED(7,7),
1514
1515 { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
1516 { SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
1517 { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
1518
1519 MTE_REG(RGSR_EL1),
1520 MTE_REG(GCR_EL1),
1521
1522 { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
1523 { SYS_DESC(SYS_TRFCR_EL1), undef_access },
1524 { SYS_DESC(SYS_SMPRI_EL1), undef_access },
1525 { SYS_DESC(SYS_SMCR_EL1), undef_access },
1526 { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
1527 { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
1528 { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
1529
1530 PTRAUTH_KEY(APIA),
1531 PTRAUTH_KEY(APIB),
1532 PTRAUTH_KEY(APDA),
1533 PTRAUTH_KEY(APDB),
1534 PTRAUTH_KEY(APGA),
1535
1536 { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
1537 { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
1538 { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
1539
1540 { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
1541 { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
1542 { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
1543 { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
1544 { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
1545 { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
1546 { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
1547 { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
1548
1549 MTE_REG(TFSR_EL1),
1550 MTE_REG(TFSRE0_EL1),
1551
1552 { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
1553 { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
1554
1555 { SYS_DESC(SYS_PMSCR_EL1), undef_access },
1556 { SYS_DESC(SYS_PMSNEVFR_EL1), undef_access },
1557 { SYS_DESC(SYS_PMSICR_EL1), undef_access },
1558 { SYS_DESC(SYS_PMSIRR_EL1), undef_access },
1559 { SYS_DESC(SYS_PMSFCR_EL1), undef_access },
1560 { SYS_DESC(SYS_PMSEVFR_EL1), undef_access },
1561 { SYS_DESC(SYS_PMSLATFR_EL1), undef_access },
1562 { SYS_DESC(SYS_PMSIDR_EL1), undef_access },
1563 { SYS_DESC(SYS_PMBLIMITR_EL1), undef_access },
1564 { SYS_DESC(SYS_PMBPTR_EL1), undef_access },
1565 { SYS_DESC(SYS_PMBSR_EL1), undef_access },
1566 /* PMBIDR_EL1 is not trapped */
1567
1568 { PMU_SYS_REG(SYS_PMINTENSET_EL1),
1569 .access = access_pminten, .reg = PMINTENSET_EL1 },
1570 { PMU_SYS_REG(SYS_PMINTENCLR_EL1),
1571 .access = access_pminten, .reg = PMINTENSET_EL1 },
1572 { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
1573
1574 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
1575 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
1576
1577 { SYS_DESC(SYS_LORSA_EL1), trap_loregion },
1578 { SYS_DESC(SYS_LOREA_EL1), trap_loregion },
1579 { SYS_DESC(SYS_LORN_EL1), trap_loregion },
1580 { SYS_DESC(SYS_LORC_EL1), trap_loregion },
1581 { SYS_DESC(SYS_LORID_EL1), trap_loregion },
1582
1583 { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
1584 { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
1585
1586 { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
1587 { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
1588 { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
1589 { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
1590 { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
1591 { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
1592 { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
1593 { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
1594 { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
1595 { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
1596 { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
1597 { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
1598
1599 { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
1600 { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
1601
1602 { SYS_DESC(SYS_SCXTNUM_EL1), undef_access },
1603
1604 { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
1605
1606 { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
1607 { SYS_DESC(SYS_CLIDR_EL1), access_clidr },
1608 { SYS_DESC(SYS_SMIDR_EL1), undef_access },
1609 { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
1610 { SYS_DESC(SYS_CTR_EL0), access_ctr },
1611 { SYS_DESC(SYS_SVCR), undef_access },
1612
1613 { PMU_SYS_REG(SYS_PMCR_EL0), .access = access_pmcr,
1614 .reset = reset_pmcr, .reg = PMCR_EL0 },
1615 { PMU_SYS_REG(SYS_PMCNTENSET_EL0),
1616 .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
1617 { PMU_SYS_REG(SYS_PMCNTENCLR_EL0),
1618 .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
1619 { PMU_SYS_REG(SYS_PMOVSCLR_EL0),
1620 .access = access_pmovs, .reg = PMOVSSET_EL0 },
1621 /*
1622 * PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was
1623 * previously (and pointlessly) advertised in the past...
1624 */
1625 { PMU_SYS_REG(SYS_PMSWINC_EL0),
1626 .get_user = get_raz_reg, .set_user = set_wi_reg,
1627 .access = access_pmswinc, .reset = NULL },
1628 { PMU_SYS_REG(SYS_PMSELR_EL0),
1629 .access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 },
1630 { PMU_SYS_REG(SYS_PMCEID0_EL0),
1631 .access = access_pmceid, .reset = NULL },
1632 { PMU_SYS_REG(SYS_PMCEID1_EL0),
1633 .access = access_pmceid, .reset = NULL },
1634 { PMU_SYS_REG(SYS_PMCCNTR_EL0),
1635 .access = access_pmu_evcntr, .reset = reset_unknown, .reg = PMCCNTR_EL0 },
1636 { PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
1637 .access = access_pmu_evtyper, .reset = NULL },
1638 { PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
1639 .access = access_pmu_evcntr, .reset = NULL },
1640 /*
1641 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
1642 * in 32bit mode. Here we choose to reset it as zero for consistency.
1643 */
1644 { PMU_SYS_REG(SYS_PMUSERENR_EL0), .access = access_pmuserenr,
1645 .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
1646 { PMU_SYS_REG(SYS_PMOVSSET_EL0),
1647 .access = access_pmovs, .reg = PMOVSSET_EL0 },
1648
1649 { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
1650 { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
1651 { SYS_DESC(SYS_TPIDR2_EL0), undef_access },
1652
1653 { SYS_DESC(SYS_SCXTNUM_EL0), undef_access },
1654
1655 { SYS_DESC(SYS_AMCR_EL0), undef_access },
1656 { SYS_DESC(SYS_AMCFGR_EL0), undef_access },
1657 { SYS_DESC(SYS_AMCGCR_EL0), undef_access },
1658 { SYS_DESC(SYS_AMUSERENR_EL0), undef_access },
1659 { SYS_DESC(SYS_AMCNTENCLR0_EL0), undef_access },
1660 { SYS_DESC(SYS_AMCNTENSET0_EL0), undef_access },
1661 { SYS_DESC(SYS_AMCNTENCLR1_EL0), undef_access },
1662 { SYS_DESC(SYS_AMCNTENSET1_EL0), undef_access },
1663 AMU_AMEVCNTR0_EL0(0),
1664 AMU_AMEVCNTR0_EL0(1),
1665 AMU_AMEVCNTR0_EL0(2),
1666 AMU_AMEVCNTR0_EL0(3),
1667 AMU_AMEVCNTR0_EL0(4),
1668 AMU_AMEVCNTR0_EL0(5),
1669 AMU_AMEVCNTR0_EL0(6),
1670 AMU_AMEVCNTR0_EL0(7),
1671 AMU_AMEVCNTR0_EL0(8),
1672 AMU_AMEVCNTR0_EL0(9),
1673 AMU_AMEVCNTR0_EL0(10),
1674 AMU_AMEVCNTR0_EL0(11),
1675 AMU_AMEVCNTR0_EL0(12),
1676 AMU_AMEVCNTR0_EL0(13),
1677 AMU_AMEVCNTR0_EL0(14),
1678 AMU_AMEVCNTR0_EL0(15),
1679 AMU_AMEVTYPER0_EL0(0),
1680 AMU_AMEVTYPER0_EL0(1),
1681 AMU_AMEVTYPER0_EL0(2),
1682 AMU_AMEVTYPER0_EL0(3),
1683 AMU_AMEVTYPER0_EL0(4),
1684 AMU_AMEVTYPER0_EL0(5),
1685 AMU_AMEVTYPER0_EL0(6),
1686 AMU_AMEVTYPER0_EL0(7),
1687 AMU_AMEVTYPER0_EL0(8),
1688 AMU_AMEVTYPER0_EL0(9),
1689 AMU_AMEVTYPER0_EL0(10),
1690 AMU_AMEVTYPER0_EL0(11),
1691 AMU_AMEVTYPER0_EL0(12),
1692 AMU_AMEVTYPER0_EL0(13),
1693 AMU_AMEVTYPER0_EL0(14),
1694 AMU_AMEVTYPER0_EL0(15),
1695 AMU_AMEVCNTR1_EL0(0),
1696 AMU_AMEVCNTR1_EL0(1),
1697 AMU_AMEVCNTR1_EL0(2),
1698 AMU_AMEVCNTR1_EL0(3),
1699 AMU_AMEVCNTR1_EL0(4),
1700 AMU_AMEVCNTR1_EL0(5),
1701 AMU_AMEVCNTR1_EL0(6),
1702 AMU_AMEVCNTR1_EL0(7),
1703 AMU_AMEVCNTR1_EL0(8),
1704 AMU_AMEVCNTR1_EL0(9),
1705 AMU_AMEVCNTR1_EL0(10),
1706 AMU_AMEVCNTR1_EL0(11),
1707 AMU_AMEVCNTR1_EL0(12),
1708 AMU_AMEVCNTR1_EL0(13),
1709 AMU_AMEVCNTR1_EL0(14),
1710 AMU_AMEVCNTR1_EL0(15),
1711 AMU_AMEVTYPER1_EL0(0),
1712 AMU_AMEVTYPER1_EL0(1),
1713 AMU_AMEVTYPER1_EL0(2),
1714 AMU_AMEVTYPER1_EL0(3),
1715 AMU_AMEVTYPER1_EL0(4),
1716 AMU_AMEVTYPER1_EL0(5),
1717 AMU_AMEVTYPER1_EL0(6),
1718 AMU_AMEVTYPER1_EL0(7),
1719 AMU_AMEVTYPER1_EL0(8),
1720 AMU_AMEVTYPER1_EL0(9),
1721 AMU_AMEVTYPER1_EL0(10),
1722 AMU_AMEVTYPER1_EL0(11),
1723 AMU_AMEVTYPER1_EL0(12),
1724 AMU_AMEVTYPER1_EL0(13),
1725 AMU_AMEVTYPER1_EL0(14),
1726 AMU_AMEVTYPER1_EL0(15),
1727
1728 { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
1729 { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
1730 { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
1731
1732 /* PMEVCNTRn_EL0 */
1733 PMU_PMEVCNTR_EL0(0),
1734 PMU_PMEVCNTR_EL0(1),
1735 PMU_PMEVCNTR_EL0(2),
1736 PMU_PMEVCNTR_EL0(3),
1737 PMU_PMEVCNTR_EL0(4),
1738 PMU_PMEVCNTR_EL0(5),
1739 PMU_PMEVCNTR_EL0(6),
1740 PMU_PMEVCNTR_EL0(7),
1741 PMU_PMEVCNTR_EL0(8),
1742 PMU_PMEVCNTR_EL0(9),
1743 PMU_PMEVCNTR_EL0(10),
1744 PMU_PMEVCNTR_EL0(11),
1745 PMU_PMEVCNTR_EL0(12),
1746 PMU_PMEVCNTR_EL0(13),
1747 PMU_PMEVCNTR_EL0(14),
1748 PMU_PMEVCNTR_EL0(15),
1749 PMU_PMEVCNTR_EL0(16),
1750 PMU_PMEVCNTR_EL0(17),
1751 PMU_PMEVCNTR_EL0(18),
1752 PMU_PMEVCNTR_EL0(19),
1753 PMU_PMEVCNTR_EL0(20),
1754 PMU_PMEVCNTR_EL0(21),
1755 PMU_PMEVCNTR_EL0(22),
1756 PMU_PMEVCNTR_EL0(23),
1757 PMU_PMEVCNTR_EL0(24),
1758 PMU_PMEVCNTR_EL0(25),
1759 PMU_PMEVCNTR_EL0(26),
1760 PMU_PMEVCNTR_EL0(27),
1761 PMU_PMEVCNTR_EL0(28),
1762 PMU_PMEVCNTR_EL0(29),
1763 PMU_PMEVCNTR_EL0(30),
1764 /* PMEVTYPERn_EL0 */
1765 PMU_PMEVTYPER_EL0(0),
1766 PMU_PMEVTYPER_EL0(1),
1767 PMU_PMEVTYPER_EL0(2),
1768 PMU_PMEVTYPER_EL0(3),
1769 PMU_PMEVTYPER_EL0(4),
1770 PMU_PMEVTYPER_EL0(5),
1771 PMU_PMEVTYPER_EL0(6),
1772 PMU_PMEVTYPER_EL0(7),
1773 PMU_PMEVTYPER_EL0(8),
1774 PMU_PMEVTYPER_EL0(9),
1775 PMU_PMEVTYPER_EL0(10),
1776 PMU_PMEVTYPER_EL0(11),
1777 PMU_PMEVTYPER_EL0(12),
1778 PMU_PMEVTYPER_EL0(13),
1779 PMU_PMEVTYPER_EL0(14),
1780 PMU_PMEVTYPER_EL0(15),
1781 PMU_PMEVTYPER_EL0(16),
1782 PMU_PMEVTYPER_EL0(17),
1783 PMU_PMEVTYPER_EL0(18),
1784 PMU_PMEVTYPER_EL0(19),
1785 PMU_PMEVTYPER_EL0(20),
1786 PMU_PMEVTYPER_EL0(21),
1787 PMU_PMEVTYPER_EL0(22),
1788 PMU_PMEVTYPER_EL0(23),
1789 PMU_PMEVTYPER_EL0(24),
1790 PMU_PMEVTYPER_EL0(25),
1791 PMU_PMEVTYPER_EL0(26),
1792 PMU_PMEVTYPER_EL0(27),
1793 PMU_PMEVTYPER_EL0(28),
1794 PMU_PMEVTYPER_EL0(29),
1795 PMU_PMEVTYPER_EL0(30),
1796 /*
1797 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
1798 * in 32bit mode. Here we choose to reset it as zero for consistency.
1799 */
1800 { PMU_SYS_REG(SYS_PMCCFILTR_EL0), .access = access_pmu_evtyper,
1801 .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
1802
1803 { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
1804 { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
1805 { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
1806 };
1807
trap_dbgdidr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1808 static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
1809 struct sys_reg_params *p,
1810 const struct sys_reg_desc *r)
1811 {
1812 if (p->is_write) {
1813 return ignore_write(vcpu, p);
1814 } else {
1815 u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1816 u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1817 u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL1_EL3_SHIFT);
1818
1819 p->regval = ((((dfr >> ID_AA64DFR0_EL1_WRPs_SHIFT) & 0xf) << 28) |
1820 (((dfr >> ID_AA64DFR0_EL1_BRPs_SHIFT) & 0xf) << 24) |
1821 (((dfr >> ID_AA64DFR0_EL1_CTX_CMPs_SHIFT) & 0xf) << 20)
1822 | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12));
1823 return true;
1824 }
1825 }
1826
1827 /*
1828 * AArch32 debug register mappings
1829 *
1830 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
1831 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
1832 *
1833 * None of the other registers share their location, so treat them as
1834 * if they were 64bit.
1835 */
1836 #define DBG_BCR_BVR_WCR_WVR(n) \
1837 /* DBGBVRn */ \
1838 { AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
1839 /* DBGBCRn */ \
1840 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
1841 /* DBGWVRn */ \
1842 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
1843 /* DBGWCRn */ \
1844 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
1845
1846 #define DBGBXVR(n) \
1847 { AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_bvr, NULL, n }
1848
1849 /*
1850 * Trapped cp14 registers. We generally ignore most of the external
1851 * debug, on the principle that they don't really make sense to a
1852 * guest. Revisit this one day, would this principle change.
1853 */
1854 static const struct sys_reg_desc cp14_regs[] = {
1855 /* DBGDIDR */
1856 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr },
1857 /* DBGDTRRXext */
1858 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
1859
1860 DBG_BCR_BVR_WCR_WVR(0),
1861 /* DBGDSCRint */
1862 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
1863 DBG_BCR_BVR_WCR_WVR(1),
1864 /* DBGDCCINT */
1865 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug_regs, NULL, MDCCINT_EL1 },
1866 /* DBGDSCRext */
1867 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug_regs, NULL, MDSCR_EL1 },
1868 DBG_BCR_BVR_WCR_WVR(2),
1869 /* DBGDTR[RT]Xint */
1870 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
1871 /* DBGDTR[RT]Xext */
1872 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
1873 DBG_BCR_BVR_WCR_WVR(3),
1874 DBG_BCR_BVR_WCR_WVR(4),
1875 DBG_BCR_BVR_WCR_WVR(5),
1876 /* DBGWFAR */
1877 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
1878 /* DBGOSECCR */
1879 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
1880 DBG_BCR_BVR_WCR_WVR(6),
1881 /* DBGVCR */
1882 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug_regs, NULL, DBGVCR32_EL2 },
1883 DBG_BCR_BVR_WCR_WVR(7),
1884 DBG_BCR_BVR_WCR_WVR(8),
1885 DBG_BCR_BVR_WCR_WVR(9),
1886 DBG_BCR_BVR_WCR_WVR(10),
1887 DBG_BCR_BVR_WCR_WVR(11),
1888 DBG_BCR_BVR_WCR_WVR(12),
1889 DBG_BCR_BVR_WCR_WVR(13),
1890 DBG_BCR_BVR_WCR_WVR(14),
1891 DBG_BCR_BVR_WCR_WVR(15),
1892
1893 /* DBGDRAR (32bit) */
1894 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
1895
1896 DBGBXVR(0),
1897 /* DBGOSLAR */
1898 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_oslar_el1 },
1899 DBGBXVR(1),
1900 /* DBGOSLSR */
1901 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1, NULL, OSLSR_EL1 },
1902 DBGBXVR(2),
1903 DBGBXVR(3),
1904 /* DBGOSDLR */
1905 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
1906 DBGBXVR(4),
1907 /* DBGPRCR */
1908 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
1909 DBGBXVR(5),
1910 DBGBXVR(6),
1911 DBGBXVR(7),
1912 DBGBXVR(8),
1913 DBGBXVR(9),
1914 DBGBXVR(10),
1915 DBGBXVR(11),
1916 DBGBXVR(12),
1917 DBGBXVR(13),
1918 DBGBXVR(14),
1919 DBGBXVR(15),
1920
1921 /* DBGDSAR (32bit) */
1922 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
1923
1924 /* DBGDEVID2 */
1925 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
1926 /* DBGDEVID1 */
1927 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
1928 /* DBGDEVID */
1929 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
1930 /* DBGCLAIMSET */
1931 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
1932 /* DBGCLAIMCLR */
1933 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
1934 /* DBGAUTHSTATUS */
1935 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
1936 };
1937
1938 /* Trapped cp14 64bit registers */
1939 static const struct sys_reg_desc cp14_64_regs[] = {
1940 /* DBGDRAR (64bit) */
1941 { Op1( 0), CRm( 1), .access = trap_raz_wi },
1942
1943 /* DBGDSAR (64bit) */
1944 { Op1( 0), CRm( 2), .access = trap_raz_wi },
1945 };
1946
1947 #define CP15_PMU_SYS_REG(_map, _Op1, _CRn, _CRm, _Op2) \
1948 AA32(_map), \
1949 Op1(_Op1), CRn(_CRn), CRm(_CRm), Op2(_Op2), \
1950 .visibility = pmu_visibility
1951
1952 /* Macro to expand the PMEVCNTRn register */
1953 #define PMU_PMEVCNTR(n) \
1954 { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
1955 (0b1000 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
1956 .access = access_pmu_evcntr }
1957
1958 /* Macro to expand the PMEVTYPERn register */
1959 #define PMU_PMEVTYPER(n) \
1960 { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
1961 (0b1100 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
1962 .access = access_pmu_evtyper }
1963 /*
1964 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
1965 * depending on the way they are accessed (as a 32bit or a 64bit
1966 * register).
1967 */
1968 static const struct sys_reg_desc cp15_regs[] = {
1969 { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
1970 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, SCTLR_EL1 },
1971 /* ACTLR */
1972 { AA32(LO), Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr, NULL, ACTLR_EL1 },
1973 /* ACTLR2 */
1974 { AA32(HI), Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr, NULL, ACTLR_EL1 },
1975 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
1976 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, TTBR1_EL1 },
1977 /* TTBCR */
1978 { AA32(LO), Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, TCR_EL1 },
1979 /* TTBCR2 */
1980 { AA32(HI), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, TCR_EL1 },
1981 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, DACR32_EL2 },
1982 /* DFSR */
1983 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, ESR_EL1 },
1984 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, IFSR32_EL2 },
1985 /* ADFSR */
1986 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, AFSR0_EL1 },
1987 /* AIFSR */
1988 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, AFSR1_EL1 },
1989 /* DFAR */
1990 { AA32(LO), Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, FAR_EL1 },
1991 /* IFAR */
1992 { AA32(HI), Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, FAR_EL1 },
1993
1994 /*
1995 * DC{C,I,CI}SW operations:
1996 */
1997 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
1998 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
1999 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
2000
2001 /* PMU */
2002 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 0), .access = access_pmcr },
2003 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 1), .access = access_pmcnten },
2004 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 2), .access = access_pmcnten },
2005 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 3), .access = access_pmovs },
2006 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 4), .access = access_pmswinc },
2007 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 5), .access = access_pmselr },
2008 { CP15_PMU_SYS_REG(LO, 0, 9, 12, 6), .access = access_pmceid },
2009 { CP15_PMU_SYS_REG(LO, 0, 9, 12, 7), .access = access_pmceid },
2010 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 0), .access = access_pmu_evcntr },
2011 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 1), .access = access_pmu_evtyper },
2012 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 2), .access = access_pmu_evcntr },
2013 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 0), .access = access_pmuserenr },
2014 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 1), .access = access_pminten },
2015 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 2), .access = access_pminten },
2016 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 3), .access = access_pmovs },
2017 { CP15_PMU_SYS_REG(HI, 0, 9, 14, 4), .access = access_pmceid },
2018 { CP15_PMU_SYS_REG(HI, 0, 9, 14, 5), .access = access_pmceid },
2019 /* PMMIR */
2020 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 6), .access = trap_raz_wi },
2021
2022 /* PRRR/MAIR0 */
2023 { AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 },
2024 /* NMRR/MAIR1 */
2025 { AA32(HI), Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, MAIR_EL1 },
2026 /* AMAIR0 */
2027 { AA32(LO), Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, AMAIR_EL1 },
2028 /* AMAIR1 */
2029 { AA32(HI), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, AMAIR_EL1 },
2030
2031 /* ICC_SRE */
2032 { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
2033
2034 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, CONTEXTIDR_EL1 },
2035
2036 /* Arch Tmers */
2037 { SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
2038 { SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
2039
2040 /* PMEVCNTRn */
2041 PMU_PMEVCNTR(0),
2042 PMU_PMEVCNTR(1),
2043 PMU_PMEVCNTR(2),
2044 PMU_PMEVCNTR(3),
2045 PMU_PMEVCNTR(4),
2046 PMU_PMEVCNTR(5),
2047 PMU_PMEVCNTR(6),
2048 PMU_PMEVCNTR(7),
2049 PMU_PMEVCNTR(8),
2050 PMU_PMEVCNTR(9),
2051 PMU_PMEVCNTR(10),
2052 PMU_PMEVCNTR(11),
2053 PMU_PMEVCNTR(12),
2054 PMU_PMEVCNTR(13),
2055 PMU_PMEVCNTR(14),
2056 PMU_PMEVCNTR(15),
2057 PMU_PMEVCNTR(16),
2058 PMU_PMEVCNTR(17),
2059 PMU_PMEVCNTR(18),
2060 PMU_PMEVCNTR(19),
2061 PMU_PMEVCNTR(20),
2062 PMU_PMEVCNTR(21),
2063 PMU_PMEVCNTR(22),
2064 PMU_PMEVCNTR(23),
2065 PMU_PMEVCNTR(24),
2066 PMU_PMEVCNTR(25),
2067 PMU_PMEVCNTR(26),
2068 PMU_PMEVCNTR(27),
2069 PMU_PMEVCNTR(28),
2070 PMU_PMEVCNTR(29),
2071 PMU_PMEVCNTR(30),
2072 /* PMEVTYPERn */
2073 PMU_PMEVTYPER(0),
2074 PMU_PMEVTYPER(1),
2075 PMU_PMEVTYPER(2),
2076 PMU_PMEVTYPER(3),
2077 PMU_PMEVTYPER(4),
2078 PMU_PMEVTYPER(5),
2079 PMU_PMEVTYPER(6),
2080 PMU_PMEVTYPER(7),
2081 PMU_PMEVTYPER(8),
2082 PMU_PMEVTYPER(9),
2083 PMU_PMEVTYPER(10),
2084 PMU_PMEVTYPER(11),
2085 PMU_PMEVTYPER(12),
2086 PMU_PMEVTYPER(13),
2087 PMU_PMEVTYPER(14),
2088 PMU_PMEVTYPER(15),
2089 PMU_PMEVTYPER(16),
2090 PMU_PMEVTYPER(17),
2091 PMU_PMEVTYPER(18),
2092 PMU_PMEVTYPER(19),
2093 PMU_PMEVTYPER(20),
2094 PMU_PMEVTYPER(21),
2095 PMU_PMEVTYPER(22),
2096 PMU_PMEVTYPER(23),
2097 PMU_PMEVTYPER(24),
2098 PMU_PMEVTYPER(25),
2099 PMU_PMEVTYPER(26),
2100 PMU_PMEVTYPER(27),
2101 PMU_PMEVTYPER(28),
2102 PMU_PMEVTYPER(29),
2103 PMU_PMEVTYPER(30),
2104 /* PMCCFILTR */
2105 { CP15_PMU_SYS_REG(DIRECT, 0, 14, 15, 7), .access = access_pmu_evtyper },
2106
2107 { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
2108 { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
2109 { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, CSSELR_EL1 },
2110 };
2111
2112 static const struct sys_reg_desc cp15_64_regs[] = {
2113 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
2114 { CP15_PMU_SYS_REG(DIRECT, 0, 0, 9, 0), .access = access_pmu_evcntr },
2115 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
2116 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 },
2117 { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
2118 { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
2119 { SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer },
2120 };
2121
check_sysreg_table(const struct sys_reg_desc * table,unsigned int n,bool is_32)2122 static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
2123 bool is_32)
2124 {
2125 unsigned int i;
2126
2127 for (i = 0; i < n; i++) {
2128 if (!is_32 && table[i].reg && !table[i].reset) {
2129 kvm_err("sys_reg table %pS entry %d lacks reset\n", &table[i], i);
2130 return false;
2131 }
2132
2133 if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
2134 kvm_err("sys_reg table %pS entry %d out of order\n", &table[i - 1], i - 1);
2135 return false;
2136 }
2137 }
2138
2139 return true;
2140 }
2141
kvm_handle_cp14_load_store(struct kvm_vcpu * vcpu)2142 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu)
2143 {
2144 kvm_inject_undefined(vcpu);
2145 return 1;
2146 }
2147
perform_access(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * r)2148 static void perform_access(struct kvm_vcpu *vcpu,
2149 struct sys_reg_params *params,
2150 const struct sys_reg_desc *r)
2151 {
2152 trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
2153
2154 /* Check for regs disabled by runtime config */
2155 if (sysreg_hidden(vcpu, r)) {
2156 kvm_inject_undefined(vcpu);
2157 return;
2158 }
2159
2160 /*
2161 * Not having an accessor means that we have configured a trap
2162 * that we don't know how to handle. This certainly qualifies
2163 * as a gross bug that should be fixed right away.
2164 */
2165 BUG_ON(!r->access);
2166
2167 /* Skip instruction if instructed so */
2168 if (likely(r->access(vcpu, params, r)))
2169 kvm_incr_pc(vcpu);
2170 }
2171
2172 /*
2173 * emulate_cp -- tries to match a sys_reg access in a handling table, and
2174 * call the corresponding trap handler.
2175 *
2176 * @params: pointer to the descriptor of the access
2177 * @table: array of trap descriptors
2178 * @num: size of the trap descriptor array
2179 *
2180 * Return true if the access has been handled, false if not.
2181 */
emulate_cp(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * table,size_t num)2182 static bool emulate_cp(struct kvm_vcpu *vcpu,
2183 struct sys_reg_params *params,
2184 const struct sys_reg_desc *table,
2185 size_t num)
2186 {
2187 const struct sys_reg_desc *r;
2188
2189 if (!table)
2190 return false; /* Not handled */
2191
2192 r = find_reg(params, table, num);
2193
2194 if (r) {
2195 perform_access(vcpu, params, r);
2196 return true;
2197 }
2198
2199 /* Not handled */
2200 return false;
2201 }
2202
unhandled_cp_access(struct kvm_vcpu * vcpu,struct sys_reg_params * params)2203 static void unhandled_cp_access(struct kvm_vcpu *vcpu,
2204 struct sys_reg_params *params)
2205 {
2206 u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
2207 int cp = -1;
2208
2209 switch (esr_ec) {
2210 case ESR_ELx_EC_CP15_32:
2211 case ESR_ELx_EC_CP15_64:
2212 cp = 15;
2213 break;
2214 case ESR_ELx_EC_CP14_MR:
2215 case ESR_ELx_EC_CP14_64:
2216 cp = 14;
2217 break;
2218 default:
2219 WARN_ON(1);
2220 }
2221
2222 print_sys_reg_msg(params,
2223 "Unsupported guest CP%d access at: %08lx [%08lx]\n",
2224 cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2225 kvm_inject_undefined(vcpu);
2226 }
2227
2228 /**
2229 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
2230 * @vcpu: The VCPU pointer
2231 * @run: The kvm_run struct
2232 */
kvm_handle_cp_64(struct kvm_vcpu * vcpu,const struct sys_reg_desc * global,size_t nr_global)2233 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
2234 const struct sys_reg_desc *global,
2235 size_t nr_global)
2236 {
2237 struct sys_reg_params params;
2238 u64 esr = kvm_vcpu_get_esr(vcpu);
2239 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2240 int Rt2 = (esr >> 10) & 0x1f;
2241
2242 params.CRm = (esr >> 1) & 0xf;
2243 params.is_write = ((esr & 1) == 0);
2244
2245 params.Op0 = 0;
2246 params.Op1 = (esr >> 16) & 0xf;
2247 params.Op2 = 0;
2248 params.CRn = 0;
2249
2250 /*
2251 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
2252 * backends between AArch32 and AArch64, we get away with it.
2253 */
2254 if (params.is_write) {
2255 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
2256 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
2257 }
2258
2259 /*
2260 * If the table contains a handler, handle the
2261 * potential register operation in the case of a read and return
2262 * with success.
2263 */
2264 if (emulate_cp(vcpu, ¶ms, global, nr_global)) {
2265 /* Split up the value between registers for the read side */
2266 if (!params.is_write) {
2267 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
2268 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
2269 }
2270
2271 return 1;
2272 }
2273
2274 unhandled_cp_access(vcpu, ¶ms);
2275 return 1;
2276 }
2277
2278 static bool emulate_sys_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *params);
2279
2280 /*
2281 * The CP10 ID registers are architecturally mapped to AArch64 feature
2282 * registers. Abuse that fact so we can rely on the AArch64 handler for accesses
2283 * from AArch32.
2284 */
kvm_esr_cp10_id_to_sys64(u64 esr,struct sys_reg_params * params)2285 static bool kvm_esr_cp10_id_to_sys64(u64 esr, struct sys_reg_params *params)
2286 {
2287 u8 reg_id = (esr >> 10) & 0xf;
2288 bool valid;
2289
2290 params->is_write = ((esr & 1) == 0);
2291 params->Op0 = 3;
2292 params->Op1 = 0;
2293 params->CRn = 0;
2294 params->CRm = 3;
2295
2296 /* CP10 ID registers are read-only */
2297 valid = !params->is_write;
2298
2299 switch (reg_id) {
2300 /* MVFR0 */
2301 case 0b0111:
2302 params->Op2 = 0;
2303 break;
2304 /* MVFR1 */
2305 case 0b0110:
2306 params->Op2 = 1;
2307 break;
2308 /* MVFR2 */
2309 case 0b0101:
2310 params->Op2 = 2;
2311 break;
2312 default:
2313 valid = false;
2314 }
2315
2316 if (valid)
2317 return true;
2318
2319 kvm_pr_unimpl("Unhandled cp10 register %s: %u\n",
2320 params->is_write ? "write" : "read", reg_id);
2321 return false;
2322 }
2323
2324 /**
2325 * kvm_handle_cp10_id() - Handles a VMRS trap on guest access to a 'Media and
2326 * VFP Register' from AArch32.
2327 * @vcpu: The vCPU pointer
2328 *
2329 * MVFR{0-2} are architecturally mapped to the AArch64 MVFR{0-2}_EL1 registers.
2330 * Work out the correct AArch64 system register encoding and reroute to the
2331 * AArch64 system register emulation.
2332 */
kvm_handle_cp10_id(struct kvm_vcpu * vcpu)2333 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu)
2334 {
2335 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2336 u64 esr = kvm_vcpu_get_esr(vcpu);
2337 struct sys_reg_params params;
2338
2339 /* UNDEF on any unhandled register access */
2340 if (!kvm_esr_cp10_id_to_sys64(esr, ¶ms)) {
2341 kvm_inject_undefined(vcpu);
2342 return 1;
2343 }
2344
2345 if (emulate_sys_reg(vcpu, ¶ms))
2346 vcpu_set_reg(vcpu, Rt, params.regval);
2347
2348 return 1;
2349 }
2350
2351 /**
2352 * kvm_emulate_cp15_id_reg() - Handles an MRC trap on a guest CP15 access where
2353 * CRn=0, which corresponds to the AArch32 feature
2354 * registers.
2355 * @vcpu: the vCPU pointer
2356 * @params: the system register access parameters.
2357 *
2358 * Our cp15 system register tables do not enumerate the AArch32 feature
2359 * registers. Conveniently, our AArch64 table does, and the AArch32 system
2360 * register encoding can be trivially remapped into the AArch64 for the feature
2361 * registers: Append op0=3, leaving op1, CRn, CRm, and op2 the same.
2362 *
2363 * According to DDI0487G.b G7.3.1, paragraph "Behavior of VMSAv8-32 32-bit
2364 * System registers with (coproc=0b1111, CRn==c0)", read accesses from this
2365 * range are either UNKNOWN or RES0. Rerouting remains architectural as we
2366 * treat undefined registers in this range as RAZ.
2367 */
kvm_emulate_cp15_id_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * params)2368 static int kvm_emulate_cp15_id_reg(struct kvm_vcpu *vcpu,
2369 struct sys_reg_params *params)
2370 {
2371 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2372
2373 /* Treat impossible writes to RO registers as UNDEFINED */
2374 if (params->is_write) {
2375 unhandled_cp_access(vcpu, params);
2376 return 1;
2377 }
2378
2379 params->Op0 = 3;
2380
2381 /*
2382 * All registers where CRm > 3 are known to be UNKNOWN/RAZ from AArch32.
2383 * Avoid conflicting with future expansion of AArch64 feature registers
2384 * and simply treat them as RAZ here.
2385 */
2386 if (params->CRm > 3)
2387 params->regval = 0;
2388 else if (!emulate_sys_reg(vcpu, params))
2389 return 1;
2390
2391 vcpu_set_reg(vcpu, Rt, params->regval);
2392 return 1;
2393 }
2394
2395 /**
2396 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
2397 * @vcpu: The VCPU pointer
2398 * @run: The kvm_run struct
2399 */
kvm_handle_cp_32(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * global,size_t nr_global)2400 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
2401 struct sys_reg_params *params,
2402 const struct sys_reg_desc *global,
2403 size_t nr_global)
2404 {
2405 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2406
2407 params->regval = vcpu_get_reg(vcpu, Rt);
2408
2409 if (emulate_cp(vcpu, params, global, nr_global)) {
2410 if (!params->is_write)
2411 vcpu_set_reg(vcpu, Rt, params->regval);
2412 return 1;
2413 }
2414
2415 unhandled_cp_access(vcpu, params);
2416 return 1;
2417 }
2418
kvm_handle_cp15_64(struct kvm_vcpu * vcpu)2419 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu)
2420 {
2421 return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs));
2422 }
2423
kvm_handle_cp15_32(struct kvm_vcpu * vcpu)2424 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu)
2425 {
2426 struct sys_reg_params params;
2427
2428 params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
2429
2430 /*
2431 * Certain AArch32 ID registers are handled by rerouting to the AArch64
2432 * system register table. Registers in the ID range where CRm=0 are
2433 * excluded from this scheme as they do not trivially map into AArch64
2434 * system register encodings.
2435 */
2436 if (params.Op1 == 0 && params.CRn == 0 && params.CRm)
2437 return kvm_emulate_cp15_id_reg(vcpu, ¶ms);
2438
2439 return kvm_handle_cp_32(vcpu, ¶ms, cp15_regs, ARRAY_SIZE(cp15_regs));
2440 }
2441
kvm_handle_cp14_64(struct kvm_vcpu * vcpu)2442 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu)
2443 {
2444 return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs));
2445 }
2446
kvm_handle_cp14_32(struct kvm_vcpu * vcpu)2447 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
2448 {
2449 struct sys_reg_params params;
2450
2451 params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
2452
2453 return kvm_handle_cp_32(vcpu, ¶ms, cp14_regs, ARRAY_SIZE(cp14_regs));
2454 }
2455
is_imp_def_sys_reg(struct sys_reg_params * params)2456 static bool is_imp_def_sys_reg(struct sys_reg_params *params)
2457 {
2458 // See ARM DDI 0487E.a, section D12.3.2
2459 return params->Op0 == 3 && (params->CRn & 0b1011) == 0b1011;
2460 }
2461
2462 /**
2463 * emulate_sys_reg - Emulate a guest access to an AArch64 system register
2464 * @vcpu: The VCPU pointer
2465 * @params: Decoded system register parameters
2466 *
2467 * Return: true if the system register access was successful, false otherwise.
2468 */
emulate_sys_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * params)2469 static bool emulate_sys_reg(struct kvm_vcpu *vcpu,
2470 struct sys_reg_params *params)
2471 {
2472 const struct sys_reg_desc *r;
2473
2474 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2475
2476 if (likely(r)) {
2477 perform_access(vcpu, params, r);
2478 return true;
2479 }
2480
2481 if (is_imp_def_sys_reg(params)) {
2482 kvm_inject_undefined(vcpu);
2483 } else {
2484 print_sys_reg_msg(params,
2485 "Unsupported guest sys_reg access at: %lx [%08lx]\n",
2486 *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2487 kvm_inject_undefined(vcpu);
2488 }
2489 return false;
2490 }
2491
2492 /**
2493 * kvm_reset_sys_regs - sets system registers to reset value
2494 * @vcpu: The VCPU pointer
2495 *
2496 * This function finds the right table above and sets the registers on the
2497 * virtual CPU struct to their architecturally defined reset values.
2498 */
kvm_reset_sys_regs(struct kvm_vcpu * vcpu)2499 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
2500 {
2501 unsigned long i;
2502
2503 for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++)
2504 if (sys_reg_descs[i].reset)
2505 sys_reg_descs[i].reset(vcpu, &sys_reg_descs[i]);
2506 }
2507
2508 /**
2509 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
2510 * @vcpu: The VCPU pointer
2511 */
kvm_handle_sys_reg(struct kvm_vcpu * vcpu)2512 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
2513 {
2514 struct sys_reg_params params;
2515 unsigned long esr = kvm_vcpu_get_esr(vcpu);
2516 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2517
2518 trace_kvm_handle_sys_reg(esr);
2519
2520 params = esr_sys64_to_params(esr);
2521 params.regval = vcpu_get_reg(vcpu, Rt);
2522
2523 if (!emulate_sys_reg(vcpu, ¶ms))
2524 return 1;
2525
2526 if (!params.is_write)
2527 vcpu_set_reg(vcpu, Rt, params.regval);
2528 return 1;
2529 }
2530
2531 /******************************************************************************
2532 * Userspace API
2533 *****************************************************************************/
2534
index_to_params(u64 id,struct sys_reg_params * params)2535 static bool index_to_params(u64 id, struct sys_reg_params *params)
2536 {
2537 switch (id & KVM_REG_SIZE_MASK) {
2538 case KVM_REG_SIZE_U64:
2539 /* Any unused index bits means it's not valid. */
2540 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
2541 | KVM_REG_ARM_COPROC_MASK
2542 | KVM_REG_ARM64_SYSREG_OP0_MASK
2543 | KVM_REG_ARM64_SYSREG_OP1_MASK
2544 | KVM_REG_ARM64_SYSREG_CRN_MASK
2545 | KVM_REG_ARM64_SYSREG_CRM_MASK
2546 | KVM_REG_ARM64_SYSREG_OP2_MASK))
2547 return false;
2548 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
2549 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
2550 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
2551 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
2552 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
2553 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
2554 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
2555 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
2556 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
2557 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
2558 return true;
2559 default:
2560 return false;
2561 }
2562 }
2563
get_reg_by_id(u64 id,const struct sys_reg_desc table[],unsigned int num)2564 const struct sys_reg_desc *get_reg_by_id(u64 id,
2565 const struct sys_reg_desc table[],
2566 unsigned int num)
2567 {
2568 struct sys_reg_params params;
2569
2570 if (!index_to_params(id, ¶ms))
2571 return NULL;
2572
2573 return find_reg(¶ms, table, num);
2574 }
2575
2576 /* Decode an index value, and find the sys_reg_desc entry. */
2577 static const struct sys_reg_desc *
id_to_sys_reg_desc(struct kvm_vcpu * vcpu,u64 id,const struct sys_reg_desc table[],unsigned int num)2578 id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id,
2579 const struct sys_reg_desc table[], unsigned int num)
2580
2581 {
2582 const struct sys_reg_desc *r;
2583
2584 /* We only do sys_reg for now. */
2585 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
2586 return NULL;
2587
2588 r = get_reg_by_id(id, table, num);
2589
2590 /* Not saved in the sys_reg array and not otherwise accessible? */
2591 if (r && (!(r->reg || r->get_user) || sysreg_hidden(vcpu, r)))
2592 r = NULL;
2593
2594 return r;
2595 }
2596
2597 /*
2598 * These are the invariant sys_reg registers: we let the guest see the
2599 * host versions of these, so they're part of the guest state.
2600 *
2601 * A future CPU may provide a mechanism to present different values to
2602 * the guest, or a future kvm may trap them.
2603 */
2604
2605 #define FUNCTION_INVARIANT(reg) \
2606 static void get_##reg(struct kvm_vcpu *v, \
2607 const struct sys_reg_desc *r) \
2608 { \
2609 ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
2610 }
2611
2612 FUNCTION_INVARIANT(midr_el1)
FUNCTION_INVARIANT(revidr_el1)2613 FUNCTION_INVARIANT(revidr_el1)
2614 FUNCTION_INVARIANT(clidr_el1)
2615 FUNCTION_INVARIANT(aidr_el1)
2616
2617 static void get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
2618 {
2619 ((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
2620 }
2621
2622 /* ->val is filled in by kvm_sys_reg_table_init() */
2623 static struct sys_reg_desc invariant_sys_regs[] = {
2624 { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
2625 { SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
2626 { SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 },
2627 { SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
2628 { SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
2629 };
2630
get_invariant_sys_reg(u64 id,u64 __user * uaddr)2631 static int get_invariant_sys_reg(u64 id, u64 __user *uaddr)
2632 {
2633 const struct sys_reg_desc *r;
2634
2635 r = get_reg_by_id(id, invariant_sys_regs,
2636 ARRAY_SIZE(invariant_sys_regs));
2637 if (!r)
2638 return -ENOENT;
2639
2640 return put_user(r->val, uaddr);
2641 }
2642
set_invariant_sys_reg(u64 id,u64 __user * uaddr)2643 static int set_invariant_sys_reg(u64 id, u64 __user *uaddr)
2644 {
2645 const struct sys_reg_desc *r;
2646 u64 val;
2647
2648 r = get_reg_by_id(id, invariant_sys_regs,
2649 ARRAY_SIZE(invariant_sys_regs));
2650 if (!r)
2651 return -ENOENT;
2652
2653 if (get_user(val, uaddr))
2654 return -EFAULT;
2655
2656 /* This is what we mean by invariant: you can't change it. */
2657 if (r->val != val)
2658 return -EINVAL;
2659
2660 return 0;
2661 }
2662
is_valid_cache(u32 val)2663 static bool is_valid_cache(u32 val)
2664 {
2665 u32 level, ctype;
2666
2667 if (val >= CSSELR_MAX)
2668 return false;
2669
2670 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
2671 level = (val >> 1);
2672 ctype = (cache_levels >> (level * 3)) & 7;
2673
2674 switch (ctype) {
2675 case 0: /* No cache */
2676 return false;
2677 case 1: /* Instruction cache only */
2678 return (val & 1);
2679 case 2: /* Data cache only */
2680 case 4: /* Unified cache */
2681 return !(val & 1);
2682 case 3: /* Separate instruction and data caches */
2683 return true;
2684 default: /* Reserved: we can't know instruction or data. */
2685 return false;
2686 }
2687 }
2688
demux_c15_get(u64 id,void __user * uaddr)2689 static int demux_c15_get(u64 id, void __user *uaddr)
2690 {
2691 u32 val;
2692 u32 __user *uval = uaddr;
2693
2694 /* Fail if we have unknown bits set. */
2695 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2696 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2697 return -ENOENT;
2698
2699 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2700 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2701 if (KVM_REG_SIZE(id) != 4)
2702 return -ENOENT;
2703 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2704 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2705 if (!is_valid_cache(val))
2706 return -ENOENT;
2707
2708 return put_user(get_ccsidr(val), uval);
2709 default:
2710 return -ENOENT;
2711 }
2712 }
2713
demux_c15_set(u64 id,void __user * uaddr)2714 static int demux_c15_set(u64 id, void __user *uaddr)
2715 {
2716 u32 val, newval;
2717 u32 __user *uval = uaddr;
2718
2719 /* Fail if we have unknown bits set. */
2720 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2721 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2722 return -ENOENT;
2723
2724 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2725 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2726 if (KVM_REG_SIZE(id) != 4)
2727 return -ENOENT;
2728 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2729 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2730 if (!is_valid_cache(val))
2731 return -ENOENT;
2732
2733 if (get_user(newval, uval))
2734 return -EFAULT;
2735
2736 /* This is also invariant: you can't change it. */
2737 if (newval != get_ccsidr(val))
2738 return -EINVAL;
2739 return 0;
2740 default:
2741 return -ENOENT;
2742 }
2743 }
2744
kvm_sys_reg_get_user(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,const struct sys_reg_desc table[],unsigned int num)2745 int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
2746 const struct sys_reg_desc table[], unsigned int num)
2747 {
2748 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
2749 const struct sys_reg_desc *r;
2750 u64 val;
2751 int ret;
2752
2753 r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
2754 if (!r)
2755 return -ENOENT;
2756
2757 if (r->get_user) {
2758 ret = (r->get_user)(vcpu, r, &val);
2759 } else {
2760 val = __vcpu_sys_reg(vcpu, r->reg);
2761 ret = 0;
2762 }
2763
2764 if (!ret)
2765 ret = put_user(val, uaddr);
2766
2767 return ret;
2768 }
2769
kvm_arm_sys_reg_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)2770 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2771 {
2772 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2773 int err;
2774
2775 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2776 return demux_c15_get(reg->id, uaddr);
2777
2778 err = get_invariant_sys_reg(reg->id, uaddr);
2779 if (err != -ENOENT)
2780 return err;
2781
2782 return kvm_sys_reg_get_user(vcpu, reg,
2783 sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2784 }
2785
kvm_sys_reg_set_user(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,const struct sys_reg_desc table[],unsigned int num)2786 int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
2787 const struct sys_reg_desc table[], unsigned int num)
2788 {
2789 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
2790 const struct sys_reg_desc *r;
2791 u64 val;
2792 int ret;
2793
2794 if (get_user(val, uaddr))
2795 return -EFAULT;
2796
2797 r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
2798 if (!r)
2799 return -ENOENT;
2800
2801 if (sysreg_user_write_ignore(vcpu, r))
2802 return 0;
2803
2804 if (r->set_user) {
2805 ret = (r->set_user)(vcpu, r, val);
2806 } else {
2807 __vcpu_sys_reg(vcpu, r->reg) = val;
2808 ret = 0;
2809 }
2810
2811 return ret;
2812 }
2813
kvm_arm_sys_reg_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)2814 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2815 {
2816 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2817 int err;
2818
2819 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2820 return demux_c15_set(reg->id, uaddr);
2821
2822 err = set_invariant_sys_reg(reg->id, uaddr);
2823 if (err != -ENOENT)
2824 return err;
2825
2826 return kvm_sys_reg_set_user(vcpu, reg,
2827 sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2828 }
2829
num_demux_regs(void)2830 static unsigned int num_demux_regs(void)
2831 {
2832 unsigned int i, count = 0;
2833
2834 for (i = 0; i < CSSELR_MAX; i++)
2835 if (is_valid_cache(i))
2836 count++;
2837
2838 return count;
2839 }
2840
write_demux_regids(u64 __user * uindices)2841 static int write_demux_regids(u64 __user *uindices)
2842 {
2843 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
2844 unsigned int i;
2845
2846 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
2847 for (i = 0; i < CSSELR_MAX; i++) {
2848 if (!is_valid_cache(i))
2849 continue;
2850 if (put_user(val | i, uindices))
2851 return -EFAULT;
2852 uindices++;
2853 }
2854 return 0;
2855 }
2856
sys_reg_to_index(const struct sys_reg_desc * reg)2857 static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
2858 {
2859 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
2860 KVM_REG_ARM64_SYSREG |
2861 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
2862 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
2863 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
2864 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
2865 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
2866 }
2867
copy_reg_to_user(const struct sys_reg_desc * reg,u64 __user ** uind)2868 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
2869 {
2870 if (!*uind)
2871 return true;
2872
2873 if (put_user(sys_reg_to_index(reg), *uind))
2874 return false;
2875
2876 (*uind)++;
2877 return true;
2878 }
2879
walk_one_sys_reg(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 __user ** uind,unsigned int * total)2880 static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
2881 const struct sys_reg_desc *rd,
2882 u64 __user **uind,
2883 unsigned int *total)
2884 {
2885 /*
2886 * Ignore registers we trap but don't save,
2887 * and for which no custom user accessor is provided.
2888 */
2889 if (!(rd->reg || rd->get_user))
2890 return 0;
2891
2892 if (sysreg_hidden(vcpu, rd))
2893 return 0;
2894
2895 if (!copy_reg_to_user(rd, uind))
2896 return -EFAULT;
2897
2898 (*total)++;
2899 return 0;
2900 }
2901
2902 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
walk_sys_regs(struct kvm_vcpu * vcpu,u64 __user * uind)2903 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
2904 {
2905 const struct sys_reg_desc *i2, *end2;
2906 unsigned int total = 0;
2907 int err;
2908
2909 i2 = sys_reg_descs;
2910 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
2911
2912 while (i2 != end2) {
2913 err = walk_one_sys_reg(vcpu, i2++, &uind, &total);
2914 if (err)
2915 return err;
2916 }
2917 return total;
2918 }
2919
kvm_arm_num_sys_reg_descs(struct kvm_vcpu * vcpu)2920 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
2921 {
2922 return ARRAY_SIZE(invariant_sys_regs)
2923 + num_demux_regs()
2924 + walk_sys_regs(vcpu, (u64 __user *)NULL);
2925 }
2926
kvm_arm_copy_sys_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)2927 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
2928 {
2929 unsigned int i;
2930 int err;
2931
2932 /* Then give them all the invariant registers' indices. */
2933 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
2934 if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
2935 return -EFAULT;
2936 uindices++;
2937 }
2938
2939 err = walk_sys_regs(vcpu, uindices);
2940 if (err < 0)
2941 return err;
2942 uindices += err;
2943
2944 return write_demux_regids(uindices);
2945 }
2946
kvm_sys_reg_table_init(void)2947 int kvm_sys_reg_table_init(void)
2948 {
2949 bool valid = true;
2950 unsigned int i;
2951 struct sys_reg_desc clidr;
2952
2953 /* Make sure tables are unique and in order. */
2954 valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false);
2955 valid &= check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), true);
2956 valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), true);
2957 valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true);
2958 valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true);
2959 valid &= check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs), false);
2960
2961 if (!valid)
2962 return -EINVAL;
2963
2964 /* We abuse the reset function to overwrite the table itself. */
2965 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
2966 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
2967
2968 /*
2969 * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
2970 *
2971 * If software reads the Cache Type fields from Ctype1
2972 * upwards, once it has seen a value of 0b000, no caches
2973 * exist at further-out levels of the hierarchy. So, for
2974 * example, if Ctype3 is the first Cache Type field with a
2975 * value of 0b000, the values of Ctype4 to Ctype7 must be
2976 * ignored.
2977 */
2978 get_clidr_el1(NULL, &clidr); /* Ugly... */
2979 cache_levels = clidr.val;
2980 for (i = 0; i < 7; i++)
2981 if (((cache_levels >> (i*3)) & 7) == 0)
2982 break;
2983 /* Clear all higher bits. */
2984 cache_levels &= (1 << (i*3))-1;
2985
2986 return 0;
2987 }
2988