1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2021 Google LLC
4 * Author: Fuad Tabba <tabba@google.com>
5 */
6
7 #include <linux/irqchip/arm-gic-v3.h>
8
9 #include <asm/kvm_asm.h>
10 #include <asm/kvm_mmu.h>
11
12 #include <hyp/adjust_pc.h>
13
14 #include <nvhe/fixed_config.h>
15
16 #include "../../sys_regs.h"
17
18 /*
19 * Copies of the host's CPU features registers holding sanitized values at hyp.
20 */
21 u64 id_aa64pfr0_el1_sys_val;
22 u64 id_aa64pfr1_el1_sys_val;
23 u64 id_aa64isar0_el1_sys_val;
24 u64 id_aa64isar1_el1_sys_val;
25 u64 id_aa64isar2_el1_sys_val;
26 u64 id_aa64mmfr0_el1_sys_val;
27 u64 id_aa64mmfr1_el1_sys_val;
28 u64 id_aa64mmfr2_el1_sys_val;
29
30 /*
31 * Inject an unknown/undefined exception to an AArch64 guest while most of its
32 * sysregs are live.
33 */
inject_undef64(struct kvm_vcpu * vcpu)34 static void inject_undef64(struct kvm_vcpu *vcpu)
35 {
36 u64 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
37
38 *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
39 *vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR);
40
41 vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1 |
42 KVM_ARM64_EXCEPT_AA64_ELx_SYNC |
43 KVM_ARM64_PENDING_EXCEPTION);
44
45 __kvm_adjust_pc(vcpu);
46
47 write_sysreg_el1(esr, SYS_ESR);
48 write_sysreg_el1(read_sysreg_el2(SYS_ELR), SYS_ELR);
49 write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
50 write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
51 }
52
53 /*
54 * Returns the restricted features values of the feature register based on the
55 * limitations in restrict_fields.
56 * A feature id field value of 0b0000 does not impose any restrictions.
57 * Note: Use only for unsigned feature field values.
58 */
get_restricted_features_unsigned(u64 sys_reg_val,u64 restrict_fields)59 static u64 get_restricted_features_unsigned(u64 sys_reg_val,
60 u64 restrict_fields)
61 {
62 u64 value = 0UL;
63 u64 mask = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0);
64
65 /*
66 * According to the Arm Architecture Reference Manual, feature fields
67 * use increasing values to indicate increases in functionality.
68 * Iterate over the restricted feature fields and calculate the minimum
69 * unsigned value between the one supported by the system, and what the
70 * value is being restricted to.
71 */
72 while (sys_reg_val && restrict_fields) {
73 value |= min(sys_reg_val & mask, restrict_fields & mask);
74 sys_reg_val &= ~mask;
75 restrict_fields &= ~mask;
76 mask <<= ARM64_FEATURE_FIELD_BITS;
77 }
78
79 return value;
80 }
81
82 /*
83 * Functions that return the value of feature id registers for protected VMs
84 * based on allowed features, system features, and KVM support.
85 */
86
get_pvm_id_aa64pfr0(const struct kvm_vcpu * vcpu)87 static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
88 {
89 const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm);
90 u64 set_mask = 0;
91 u64 allow_mask = PVM_ID_AA64PFR0_ALLOW;
92
93 set_mask |= get_restricted_features_unsigned(id_aa64pfr0_el1_sys_val,
94 PVM_ID_AA64PFR0_RESTRICT_UNSIGNED);
95
96 /* Spectre and Meltdown mitigation in KVM */
97 set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2),
98 (u64)kvm->arch.pfr0_csv2);
99 set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3),
100 (u64)kvm->arch.pfr0_csv3);
101
102 return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask;
103 }
104
get_pvm_id_aa64pfr1(const struct kvm_vcpu * vcpu)105 static u64 get_pvm_id_aa64pfr1(const struct kvm_vcpu *vcpu)
106 {
107 const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm);
108 u64 allow_mask = PVM_ID_AA64PFR1_ALLOW;
109
110 if (!kvm_has_mte(kvm))
111 allow_mask &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE);
112
113 return id_aa64pfr1_el1_sys_val & allow_mask;
114 }
115
get_pvm_id_aa64zfr0(const struct kvm_vcpu * vcpu)116 static u64 get_pvm_id_aa64zfr0(const struct kvm_vcpu *vcpu)
117 {
118 /*
119 * No support for Scalable Vectors, therefore, hyp has no sanitized
120 * copy of the feature id register.
121 */
122 BUILD_BUG_ON(PVM_ID_AA64ZFR0_ALLOW != 0ULL);
123 return 0;
124 }
125
get_pvm_id_aa64dfr0(const struct kvm_vcpu * vcpu)126 static u64 get_pvm_id_aa64dfr0(const struct kvm_vcpu *vcpu)
127 {
128 /*
129 * No support for debug, including breakpoints, and watchpoints,
130 * therefore, pKVM has no sanitized copy of the feature id register.
131 */
132 BUILD_BUG_ON(PVM_ID_AA64DFR0_ALLOW != 0ULL);
133 return 0;
134 }
135
get_pvm_id_aa64dfr1(const struct kvm_vcpu * vcpu)136 static u64 get_pvm_id_aa64dfr1(const struct kvm_vcpu *vcpu)
137 {
138 /*
139 * No support for debug, therefore, hyp has no sanitized copy of the
140 * feature id register.
141 */
142 BUILD_BUG_ON(PVM_ID_AA64DFR1_ALLOW != 0ULL);
143 return 0;
144 }
145
get_pvm_id_aa64afr0(const struct kvm_vcpu * vcpu)146 static u64 get_pvm_id_aa64afr0(const struct kvm_vcpu *vcpu)
147 {
148 /*
149 * No support for implementation defined features, therefore, hyp has no
150 * sanitized copy of the feature id register.
151 */
152 BUILD_BUG_ON(PVM_ID_AA64AFR0_ALLOW != 0ULL);
153 return 0;
154 }
155
get_pvm_id_aa64afr1(const struct kvm_vcpu * vcpu)156 static u64 get_pvm_id_aa64afr1(const struct kvm_vcpu *vcpu)
157 {
158 /*
159 * No support for implementation defined features, therefore, hyp has no
160 * sanitized copy of the feature id register.
161 */
162 BUILD_BUG_ON(PVM_ID_AA64AFR1_ALLOW != 0ULL);
163 return 0;
164 }
165
get_pvm_id_aa64isar0(const struct kvm_vcpu * vcpu)166 static u64 get_pvm_id_aa64isar0(const struct kvm_vcpu *vcpu)
167 {
168 return id_aa64isar0_el1_sys_val & PVM_ID_AA64ISAR0_ALLOW;
169 }
170
get_pvm_id_aa64isar1(const struct kvm_vcpu * vcpu)171 static u64 get_pvm_id_aa64isar1(const struct kvm_vcpu *vcpu)
172 {
173 u64 allow_mask = PVM_ID_AA64ISAR1_ALLOW;
174
175 if (!vcpu_has_ptrauth(vcpu))
176 allow_mask &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_APA) |
177 ARM64_FEATURE_MASK(ID_AA64ISAR1_API) |
178 ARM64_FEATURE_MASK(ID_AA64ISAR1_GPA) |
179 ARM64_FEATURE_MASK(ID_AA64ISAR1_GPI));
180
181 return id_aa64isar1_el1_sys_val & allow_mask;
182 }
183
get_pvm_id_aa64isar2(const struct kvm_vcpu * vcpu)184 static u64 get_pvm_id_aa64isar2(const struct kvm_vcpu *vcpu)
185 {
186 u64 allow_mask = PVM_ID_AA64ISAR2_ALLOW;
187
188 if (!vcpu_has_ptrauth(vcpu))
189 allow_mask &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_APA3) |
190 ARM64_FEATURE_MASK(ID_AA64ISAR2_GPA3));
191
192 return id_aa64isar2_el1_sys_val & allow_mask;
193 }
194
get_pvm_id_aa64mmfr0(const struct kvm_vcpu * vcpu)195 static u64 get_pvm_id_aa64mmfr0(const struct kvm_vcpu *vcpu)
196 {
197 u64 set_mask;
198
199 set_mask = get_restricted_features_unsigned(id_aa64mmfr0_el1_sys_val,
200 PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED);
201
202 return (id_aa64mmfr0_el1_sys_val & PVM_ID_AA64MMFR0_ALLOW) | set_mask;
203 }
204
get_pvm_id_aa64mmfr1(const struct kvm_vcpu * vcpu)205 static u64 get_pvm_id_aa64mmfr1(const struct kvm_vcpu *vcpu)
206 {
207 return id_aa64mmfr1_el1_sys_val & PVM_ID_AA64MMFR1_ALLOW;
208 }
209
get_pvm_id_aa64mmfr2(const struct kvm_vcpu * vcpu)210 static u64 get_pvm_id_aa64mmfr2(const struct kvm_vcpu *vcpu)
211 {
212 return id_aa64mmfr2_el1_sys_val & PVM_ID_AA64MMFR2_ALLOW;
213 }
214
215 /* Read a sanitized cpufeature ID register by its encoding */
pvm_read_id_reg(const struct kvm_vcpu * vcpu,u32 id)216 u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id)
217 {
218 switch (id) {
219 case SYS_ID_AA64PFR0_EL1:
220 return get_pvm_id_aa64pfr0(vcpu);
221 case SYS_ID_AA64PFR1_EL1:
222 return get_pvm_id_aa64pfr1(vcpu);
223 case SYS_ID_AA64ZFR0_EL1:
224 return get_pvm_id_aa64zfr0(vcpu);
225 case SYS_ID_AA64DFR0_EL1:
226 return get_pvm_id_aa64dfr0(vcpu);
227 case SYS_ID_AA64DFR1_EL1:
228 return get_pvm_id_aa64dfr1(vcpu);
229 case SYS_ID_AA64AFR0_EL1:
230 return get_pvm_id_aa64afr0(vcpu);
231 case SYS_ID_AA64AFR1_EL1:
232 return get_pvm_id_aa64afr1(vcpu);
233 case SYS_ID_AA64ISAR0_EL1:
234 return get_pvm_id_aa64isar0(vcpu);
235 case SYS_ID_AA64ISAR1_EL1:
236 return get_pvm_id_aa64isar1(vcpu);
237 case SYS_ID_AA64ISAR2_EL1:
238 return get_pvm_id_aa64isar2(vcpu);
239 case SYS_ID_AA64MMFR0_EL1:
240 return get_pvm_id_aa64mmfr0(vcpu);
241 case SYS_ID_AA64MMFR1_EL1:
242 return get_pvm_id_aa64mmfr1(vcpu);
243 case SYS_ID_AA64MMFR2_EL1:
244 return get_pvm_id_aa64mmfr2(vcpu);
245 default:
246 /* Unhandled ID register, RAZ */
247 return 0;
248 }
249 }
250
read_id_reg(const struct kvm_vcpu * vcpu,struct sys_reg_desc const * r)251 static u64 read_id_reg(const struct kvm_vcpu *vcpu,
252 struct sys_reg_desc const *r)
253 {
254 return pvm_read_id_reg(vcpu, reg_to_encoding(r));
255 }
256
257 /* Handler to RAZ/WI sysregs */
pvm_access_raz_wi(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)258 static bool pvm_access_raz_wi(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
259 const struct sys_reg_desc *r)
260 {
261 if (!p->is_write)
262 p->regval = 0;
263
264 return true;
265 }
266
267 /*
268 * Accessor for AArch32 feature id registers.
269 *
270 * The value of these registers is "unknown" according to the spec if AArch32
271 * isn't supported.
272 */
pvm_access_id_aarch32(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)273 static bool pvm_access_id_aarch32(struct kvm_vcpu *vcpu,
274 struct sys_reg_params *p,
275 const struct sys_reg_desc *r)
276 {
277 if (p->is_write) {
278 inject_undef64(vcpu);
279 return false;
280 }
281
282 /*
283 * No support for AArch32 guests, therefore, pKVM has no sanitized copy
284 * of AArch32 feature id registers.
285 */
286 BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1),
287 PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) > ID_AA64PFR0_ELx_64BIT_ONLY);
288
289 return pvm_access_raz_wi(vcpu, p, r);
290 }
291
292 /*
293 * Accessor for AArch64 feature id registers.
294 *
295 * If access is allowed, set the regval to the protected VM's view of the
296 * register and return true.
297 * Otherwise, inject an undefined exception and return false.
298 */
pvm_access_id_aarch64(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)299 static bool pvm_access_id_aarch64(struct kvm_vcpu *vcpu,
300 struct sys_reg_params *p,
301 const struct sys_reg_desc *r)
302 {
303 if (p->is_write) {
304 inject_undef64(vcpu);
305 return false;
306 }
307
308 p->regval = read_id_reg(vcpu, r);
309 return true;
310 }
311
pvm_gic_read_sre(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)312 static bool pvm_gic_read_sre(struct kvm_vcpu *vcpu,
313 struct sys_reg_params *p,
314 const struct sys_reg_desc *r)
315 {
316 /* pVMs only support GICv3. 'nuf said. */
317 if (!p->is_write)
318 p->regval = ICC_SRE_EL1_DIB | ICC_SRE_EL1_DFB | ICC_SRE_EL1_SRE;
319
320 return true;
321 }
322
323 /* Mark the specified system register as an AArch32 feature id register. */
324 #define AARCH32(REG) { SYS_DESC(REG), .access = pvm_access_id_aarch32 }
325
326 /* Mark the specified system register as an AArch64 feature id register. */
327 #define AARCH64(REG) { SYS_DESC(REG), .access = pvm_access_id_aarch64 }
328
329 /*
330 * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
331 * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
332 * (1 <= crm < 8, 0 <= Op2 < 8).
333 */
334 #define ID_UNALLOCATED(crm, op2) { \
335 Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
336 .access = pvm_access_id_aarch64, \
337 }
338
339 /* Mark the specified system register as Read-As-Zero/Write-Ignored */
340 #define RAZ_WI(REG) { SYS_DESC(REG), .access = pvm_access_raz_wi }
341
342 /* Mark the specified system register as not being handled in hyp. */
343 #define HOST_HANDLED(REG) { SYS_DESC(REG), .access = NULL }
344
345 /*
346 * Architected system registers.
347 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
348 *
349 * NOTE: Anything not explicitly listed here is *restricted by default*, i.e.,
350 * it will lead to injecting an exception into the guest.
351 */
352 static const struct sys_reg_desc pvm_sys_reg_descs[] = {
353 /* Cache maintenance by set/way operations are restricted. */
354
355 /* Debug and Trace Registers are restricted. */
356
357 /* AArch64 mappings of the AArch32 ID registers */
358 /* CRm=1 */
359 AARCH32(SYS_ID_PFR0_EL1),
360 AARCH32(SYS_ID_PFR1_EL1),
361 AARCH32(SYS_ID_DFR0_EL1),
362 AARCH32(SYS_ID_AFR0_EL1),
363 AARCH32(SYS_ID_MMFR0_EL1),
364 AARCH32(SYS_ID_MMFR1_EL1),
365 AARCH32(SYS_ID_MMFR2_EL1),
366 AARCH32(SYS_ID_MMFR3_EL1),
367
368 /* CRm=2 */
369 AARCH32(SYS_ID_ISAR0_EL1),
370 AARCH32(SYS_ID_ISAR1_EL1),
371 AARCH32(SYS_ID_ISAR2_EL1),
372 AARCH32(SYS_ID_ISAR3_EL1),
373 AARCH32(SYS_ID_ISAR4_EL1),
374 AARCH32(SYS_ID_ISAR5_EL1),
375 AARCH32(SYS_ID_MMFR4_EL1),
376 AARCH32(SYS_ID_ISAR6_EL1),
377
378 /* CRm=3 */
379 AARCH32(SYS_MVFR0_EL1),
380 AARCH32(SYS_MVFR1_EL1),
381 AARCH32(SYS_MVFR2_EL1),
382 ID_UNALLOCATED(3,3),
383 AARCH32(SYS_ID_PFR2_EL1),
384 AARCH32(SYS_ID_DFR1_EL1),
385 AARCH32(SYS_ID_MMFR5_EL1),
386 ID_UNALLOCATED(3,7),
387
388 /* AArch64 ID registers */
389 /* CRm=4 */
390 AARCH64(SYS_ID_AA64PFR0_EL1),
391 AARCH64(SYS_ID_AA64PFR1_EL1),
392 ID_UNALLOCATED(4,2),
393 ID_UNALLOCATED(4,3),
394 AARCH64(SYS_ID_AA64ZFR0_EL1),
395 ID_UNALLOCATED(4,5),
396 ID_UNALLOCATED(4,6),
397 ID_UNALLOCATED(4,7),
398 AARCH64(SYS_ID_AA64DFR0_EL1),
399 AARCH64(SYS_ID_AA64DFR1_EL1),
400 ID_UNALLOCATED(5,2),
401 ID_UNALLOCATED(5,3),
402 AARCH64(SYS_ID_AA64AFR0_EL1),
403 AARCH64(SYS_ID_AA64AFR1_EL1),
404 ID_UNALLOCATED(5,6),
405 ID_UNALLOCATED(5,7),
406 AARCH64(SYS_ID_AA64ISAR0_EL1),
407 AARCH64(SYS_ID_AA64ISAR1_EL1),
408 AARCH64(SYS_ID_AA64ISAR2_EL1),
409 ID_UNALLOCATED(6,3),
410 ID_UNALLOCATED(6,4),
411 ID_UNALLOCATED(6,5),
412 ID_UNALLOCATED(6,6),
413 ID_UNALLOCATED(6,7),
414 AARCH64(SYS_ID_AA64MMFR0_EL1),
415 AARCH64(SYS_ID_AA64MMFR1_EL1),
416 AARCH64(SYS_ID_AA64MMFR2_EL1),
417 ID_UNALLOCATED(7,3),
418 ID_UNALLOCATED(7,4),
419 ID_UNALLOCATED(7,5),
420 ID_UNALLOCATED(7,6),
421 ID_UNALLOCATED(7,7),
422
423 /* Scalable Vector Registers are restricted. */
424
425 RAZ_WI(SYS_ERRIDR_EL1),
426 RAZ_WI(SYS_ERRSELR_EL1),
427 RAZ_WI(SYS_ERXFR_EL1),
428 RAZ_WI(SYS_ERXCTLR_EL1),
429 RAZ_WI(SYS_ERXSTATUS_EL1),
430 RAZ_WI(SYS_ERXADDR_EL1),
431 RAZ_WI(SYS_ERXMISC0_EL1),
432 RAZ_WI(SYS_ERXMISC1_EL1),
433
434 /* Performance Monitoring Registers are restricted. */
435
436 /* Limited Ordering Regions Registers are restricted. */
437
438 HOST_HANDLED(SYS_ICC_SGI1R_EL1),
439 HOST_HANDLED(SYS_ICC_ASGI1R_EL1),
440 HOST_HANDLED(SYS_ICC_SGI0R_EL1),
441 { SYS_DESC(SYS_ICC_SRE_EL1), .access = pvm_gic_read_sre, },
442
443 HOST_HANDLED(SYS_CCSIDR_EL1),
444 HOST_HANDLED(SYS_CLIDR_EL1),
445 HOST_HANDLED(SYS_CSSELR_EL1),
446 HOST_HANDLED(SYS_CTR_EL0),
447
448 /* Performance Monitoring Registers are restricted. */
449
450 /* Activity Monitoring Registers are restricted. */
451
452 HOST_HANDLED(SYS_CNTP_TVAL_EL0),
453 HOST_HANDLED(SYS_CNTP_CTL_EL0),
454 HOST_HANDLED(SYS_CNTP_CVAL_EL0),
455
456 /* Performance Monitoring Registers are restricted. */
457 };
458
459 /*
460 * Checks that the sysreg table is unique and in-order.
461 *
462 * Returns 0 if the table is consistent, or 1 otherwise.
463 */
kvm_check_pvm_sysreg_table(void)464 int kvm_check_pvm_sysreg_table(void)
465 {
466 unsigned int i;
467
468 for (i = 1; i < ARRAY_SIZE(pvm_sys_reg_descs); i++) {
469 if (cmp_sys_reg(&pvm_sys_reg_descs[i-1], &pvm_sys_reg_descs[i]) >= 0)
470 return 1;
471 }
472
473 return 0;
474 }
475
476 /*
477 * Handler for protected VM MSR, MRS or System instruction execution.
478 *
479 * Returns true if the hypervisor has handled the exit, and control should go
480 * back to the guest, or false if it hasn't, to be handled by the host.
481 */
kvm_handle_pvm_sysreg(struct kvm_vcpu * vcpu,u64 * exit_code)482 bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
483 {
484 const struct sys_reg_desc *r;
485 struct sys_reg_params params;
486 unsigned long esr = kvm_vcpu_get_esr(vcpu);
487 int Rt = kvm_vcpu_sys_get_rt(vcpu);
488
489 params = esr_sys64_to_params(esr);
490 params.regval = vcpu_get_reg(vcpu, Rt);
491
492 r = find_reg(¶ms, pvm_sys_reg_descs, ARRAY_SIZE(pvm_sys_reg_descs));
493
494 /* Undefined (RESTRICTED). */
495 if (r == NULL) {
496 inject_undef64(vcpu);
497 return true;
498 }
499
500 /* Handled by the host (HOST_HANDLED) */
501 if (r->access == NULL)
502 return false;
503
504 /* Handled by hyp: skip instruction if instructed to do so. */
505 if (r->access(vcpu, ¶ms, r))
506 __kvm_skip_instr(vcpu);
507
508 if (!params.is_write)
509 vcpu_set_reg(vcpu, Rt, params.regval);
510
511 return true;
512 }
513
514 /*
515 * Handler for protected VM restricted exceptions.
516 *
517 * Inject an undefined exception into the guest and return true to indicate that
518 * the hypervisor has handled the exit, and control should go back to the guest.
519 */
kvm_handle_pvm_restricted(struct kvm_vcpu * vcpu,u64 * exit_code)520 bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code)
521 {
522 inject_undef64(vcpu);
523 return true;
524 }
525