1 // SPDX-License-Identifier: GPL-2.0-only
2
3 #include <linux/irqchip/arm-gic-v3.h>
4 #include <linux/irq.h>
5 #include <linux/irqdomain.h>
6 #include <linux/kstrtox.h>
7 #include <linux/kvm.h>
8 #include <linux/kvm_host.h>
9 #include <kvm/arm_vgic.h>
10 #include <asm/kvm_hyp.h>
11 #include <asm/kvm_mmu.h>
12 #include <asm/kvm_asm.h>
13
14 #include "vgic.h"
15
16 static bool group0_trap;
17 static bool group1_trap;
18 static bool common_trap;
19 static bool dir_trap;
20 static bool gicv4_enable;
21
vgic_v3_set_underflow(struct kvm_vcpu * vcpu)22 void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
23 {
24 struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
25
26 cpuif->vgic_hcr |= ICH_HCR_UIE;
27 }
28
lr_signals_eoi_mi(u64 lr_val)29 static bool lr_signals_eoi_mi(u64 lr_val)
30 {
31 return !(lr_val & ICH_LR_STATE) && (lr_val & ICH_LR_EOI) &&
32 !(lr_val & ICH_LR_HW);
33 }
34
vgic_v3_fold_lr_state(struct kvm_vcpu * vcpu)35 void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
36 {
37 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
38 struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3;
39 u32 model = vcpu->kvm->arch.vgic.vgic_model;
40 int lr;
41
42 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
43
44 cpuif->vgic_hcr &= ~ICH_HCR_UIE;
45
46 for (lr = 0; lr < cpuif->used_lrs; lr++) {
47 u64 val = cpuif->vgic_lr[lr];
48 u32 intid, cpuid;
49 struct vgic_irq *irq;
50 bool is_v2_sgi = false;
51 bool deactivated;
52
53 cpuid = val & GICH_LR_PHYSID_CPUID;
54 cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
55
56 if (model == KVM_DEV_TYPE_ARM_VGIC_V3) {
57 intid = val & ICH_LR_VIRTUAL_ID_MASK;
58 } else {
59 intid = val & GICH_LR_VIRTUALID;
60 is_v2_sgi = vgic_irq_is_sgi(intid);
61 }
62
63 /* Notify fds when the guest EOI'ed a level-triggered IRQ */
64 if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
65 kvm_notify_acked_irq(vcpu->kvm, 0,
66 intid - VGIC_NR_PRIVATE_IRQS);
67
68 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
69 if (!irq) /* An LPI could have been unmapped. */
70 continue;
71
72 raw_spin_lock(&irq->irq_lock);
73
74 /* Always preserve the active bit, note deactivation */
75 deactivated = irq->active && !(val & ICH_LR_ACTIVE_BIT);
76 irq->active = !!(val & ICH_LR_ACTIVE_BIT);
77
78 if (irq->active && is_v2_sgi)
79 irq->active_source = cpuid;
80
81 /* Edge is the only case where we preserve the pending bit */
82 if (irq->config == VGIC_CONFIG_EDGE &&
83 (val & ICH_LR_PENDING_BIT)) {
84 irq->pending_latch = true;
85
86 if (is_v2_sgi)
87 irq->source |= (1 << cpuid);
88 }
89
90 /*
91 * Clear soft pending state when level irqs have been acked.
92 */
93 if (irq->config == VGIC_CONFIG_LEVEL && !(val & ICH_LR_STATE))
94 irq->pending_latch = false;
95
96 /* Handle resampling for mapped interrupts if required */
97 vgic_irq_handle_resampling(irq, deactivated, val & ICH_LR_PENDING_BIT);
98
99 raw_spin_unlock(&irq->irq_lock);
100 vgic_put_irq(vcpu->kvm, irq);
101 }
102
103 cpuif->used_lrs = 0;
104 }
105
106 /* Requires the irq to be locked already */
vgic_v3_populate_lr(struct kvm_vcpu * vcpu,struct vgic_irq * irq,int lr)107 void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
108 {
109 u32 model = vcpu->kvm->arch.vgic.vgic_model;
110 u64 val = irq->intid;
111 bool allow_pending = true, is_v2_sgi;
112
113 is_v2_sgi = (vgic_irq_is_sgi(irq->intid) &&
114 model == KVM_DEV_TYPE_ARM_VGIC_V2);
115
116 if (irq->active) {
117 val |= ICH_LR_ACTIVE_BIT;
118 if (is_v2_sgi)
119 val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
120 if (vgic_irq_is_multi_sgi(irq)) {
121 allow_pending = false;
122 val |= ICH_LR_EOI;
123 }
124 }
125
126 if (irq->hw && !vgic_irq_needs_resampling(irq)) {
127 val |= ICH_LR_HW;
128 val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT;
129 /*
130 * Never set pending+active on a HW interrupt, as the
131 * pending state is kept at the physical distributor
132 * level.
133 */
134 if (irq->active)
135 allow_pending = false;
136 } else {
137 if (irq->config == VGIC_CONFIG_LEVEL) {
138 val |= ICH_LR_EOI;
139
140 /*
141 * Software resampling doesn't work very well
142 * if we allow P+A, so let's not do that.
143 */
144 if (irq->active)
145 allow_pending = false;
146 }
147 }
148
149 if (allow_pending && irq_is_pending(irq)) {
150 val |= ICH_LR_PENDING_BIT;
151
152 if (irq->config == VGIC_CONFIG_EDGE)
153 irq->pending_latch = false;
154
155 if (vgic_irq_is_sgi(irq->intid) &&
156 model == KVM_DEV_TYPE_ARM_VGIC_V2) {
157 u32 src = ffs(irq->source);
158
159 if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
160 irq->intid))
161 return;
162
163 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
164 irq->source &= ~(1 << (src - 1));
165 if (irq->source) {
166 irq->pending_latch = true;
167 val |= ICH_LR_EOI;
168 }
169 }
170 }
171
172 /*
173 * Level-triggered mapped IRQs are special because we only observe
174 * rising edges as input to the VGIC. We therefore lower the line
175 * level here, so that we can take new virtual IRQs. See
176 * vgic_v3_fold_lr_state for more info.
177 */
178 if (vgic_irq_is_mapped_level(irq) && (val & ICH_LR_PENDING_BIT))
179 irq->line_level = false;
180
181 if (irq->group)
182 val |= ICH_LR_GROUP;
183
184 val |= (u64)irq->priority << ICH_LR_PRIORITY_SHIFT;
185
186 vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val;
187 }
188
vgic_v3_clear_lr(struct kvm_vcpu * vcpu,int lr)189 void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
190 {
191 vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = 0;
192 }
193
vgic_v3_set_vmcr(struct kvm_vcpu * vcpu,struct vgic_vmcr * vmcrp)194 void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
195 {
196 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
197 u32 model = vcpu->kvm->arch.vgic.vgic_model;
198 u32 vmcr;
199
200 if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
201 vmcr = (vmcrp->ackctl << ICH_VMCR_ACK_CTL_SHIFT) &
202 ICH_VMCR_ACK_CTL_MASK;
203 vmcr |= (vmcrp->fiqen << ICH_VMCR_FIQ_EN_SHIFT) &
204 ICH_VMCR_FIQ_EN_MASK;
205 } else {
206 /*
207 * When emulating GICv3 on GICv3 with SRE=1 on the
208 * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
209 */
210 vmcr = ICH_VMCR_FIQ_EN_MASK;
211 }
212
213 vmcr |= (vmcrp->cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK;
214 vmcr |= (vmcrp->eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK;
215 vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
216 vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
217 vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
218 vmcr |= (vmcrp->grpen0 << ICH_VMCR_ENG0_SHIFT) & ICH_VMCR_ENG0_MASK;
219 vmcr |= (vmcrp->grpen1 << ICH_VMCR_ENG1_SHIFT) & ICH_VMCR_ENG1_MASK;
220
221 cpu_if->vgic_vmcr = vmcr;
222 }
223
vgic_v3_get_vmcr(struct kvm_vcpu * vcpu,struct vgic_vmcr * vmcrp)224 void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
225 {
226 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
227 u32 model = vcpu->kvm->arch.vgic.vgic_model;
228 u32 vmcr;
229
230 vmcr = cpu_if->vgic_vmcr;
231
232 if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
233 vmcrp->ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >>
234 ICH_VMCR_ACK_CTL_SHIFT;
235 vmcrp->fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >>
236 ICH_VMCR_FIQ_EN_SHIFT;
237 } else {
238 /*
239 * When emulating GICv3 on GICv3 with SRE=1 on the
240 * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
241 */
242 vmcrp->fiqen = 1;
243 vmcrp->ackctl = 0;
244 }
245
246 vmcrp->cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
247 vmcrp->eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT;
248 vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
249 vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
250 vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
251 vmcrp->grpen0 = (vmcr & ICH_VMCR_ENG0_MASK) >> ICH_VMCR_ENG0_SHIFT;
252 vmcrp->grpen1 = (vmcr & ICH_VMCR_ENG1_MASK) >> ICH_VMCR_ENG1_SHIFT;
253 }
254
255 #define INITIAL_PENDBASER_VALUE \
256 (GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb) | \
257 GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, SameAsInner) | \
258 GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable))
259
vgic_v3_enable(struct kvm_vcpu * vcpu)260 void vgic_v3_enable(struct kvm_vcpu *vcpu)
261 {
262 struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
263
264 /*
265 * By forcing VMCR to zero, the GIC will restore the binary
266 * points to their reset values. Anything else resets to zero
267 * anyway.
268 */
269 vgic_v3->vgic_vmcr = 0;
270
271 /*
272 * If we are emulating a GICv3, we do it in an non-GICv2-compatible
273 * way, so we force SRE to 1 to demonstrate this to the guest.
274 * Also, we don't support any form of IRQ/FIQ bypass.
275 * This goes with the spec allowing the value to be RAO/WI.
276 */
277 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
278 vgic_v3->vgic_sre = (ICC_SRE_EL1_DIB |
279 ICC_SRE_EL1_DFB |
280 ICC_SRE_EL1_SRE);
281 vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE;
282 } else {
283 vgic_v3->vgic_sre = 0;
284 }
285
286 vcpu->arch.vgic_cpu.num_id_bits = (kvm_vgic_global_state.ich_vtr_el2 &
287 ICH_VTR_ID_BITS_MASK) >>
288 ICH_VTR_ID_BITS_SHIFT;
289 vcpu->arch.vgic_cpu.num_pri_bits = ((kvm_vgic_global_state.ich_vtr_el2 &
290 ICH_VTR_PRI_BITS_MASK) >>
291 ICH_VTR_PRI_BITS_SHIFT) + 1;
292
293 /* Get the show on the road... */
294 vgic_v3->vgic_hcr = ICH_HCR_EN;
295 if (group0_trap)
296 vgic_v3->vgic_hcr |= ICH_HCR_TALL0;
297 if (group1_trap)
298 vgic_v3->vgic_hcr |= ICH_HCR_TALL1;
299 if (common_trap)
300 vgic_v3->vgic_hcr |= ICH_HCR_TC;
301 if (dir_trap)
302 vgic_v3->vgic_hcr |= ICH_HCR_TDIR;
303 }
304
vgic_v3_lpi_sync_pending_status(struct kvm * kvm,struct vgic_irq * irq)305 int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq)
306 {
307 struct kvm_vcpu *vcpu;
308 int byte_offset, bit_nr;
309 gpa_t pendbase, ptr;
310 bool status;
311 u8 val;
312 int ret;
313 unsigned long flags;
314
315 retry:
316 vcpu = irq->target_vcpu;
317 if (!vcpu)
318 return 0;
319
320 pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
321
322 byte_offset = irq->intid / BITS_PER_BYTE;
323 bit_nr = irq->intid % BITS_PER_BYTE;
324 ptr = pendbase + byte_offset;
325
326 ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
327 if (ret)
328 return ret;
329
330 status = val & (1 << bit_nr);
331
332 raw_spin_lock_irqsave(&irq->irq_lock, flags);
333 if (irq->target_vcpu != vcpu) {
334 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
335 goto retry;
336 }
337 irq->pending_latch = status;
338 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
339
340 if (status) {
341 /* clear consumed data */
342 val &= ~(1 << bit_nr);
343 ret = vgic_write_guest_lock(kvm, ptr, &val, 1);
344 if (ret)
345 return ret;
346 }
347 return 0;
348 }
349
350 /*
351 * The deactivation of the doorbell interrupt will trigger the
352 * unmapping of the associated vPE.
353 */
unmap_all_vpes(struct kvm * kvm)354 static void unmap_all_vpes(struct kvm *kvm)
355 {
356 struct vgic_dist *dist = &kvm->arch.vgic;
357 int i;
358
359 for (i = 0; i < dist->its_vm.nr_vpes; i++)
360 free_irq(dist->its_vm.vpes[i]->irq, kvm_get_vcpu(kvm, i));
361 }
362
map_all_vpes(struct kvm * kvm)363 static void map_all_vpes(struct kvm *kvm)
364 {
365 struct vgic_dist *dist = &kvm->arch.vgic;
366 int i;
367
368 for (i = 0; i < dist->its_vm.nr_vpes; i++)
369 WARN_ON(vgic_v4_request_vpe_irq(kvm_get_vcpu(kvm, i),
370 dist->its_vm.vpes[i]->irq));
371 }
372
373 /**
374 * vgic_v3_save_pending_tables - Save the pending tables into guest RAM
375 * kvm lock and all vcpu lock must be held
376 */
vgic_v3_save_pending_tables(struct kvm * kvm)377 int vgic_v3_save_pending_tables(struct kvm *kvm)
378 {
379 struct vgic_dist *dist = &kvm->arch.vgic;
380 struct vgic_irq *irq;
381 gpa_t last_ptr = ~(gpa_t)0;
382 bool vlpi_avail = false;
383 int ret = 0;
384 u8 val;
385
386 if (unlikely(!vgic_initialized(kvm)))
387 return -ENXIO;
388
389 /*
390 * A preparation for getting any VLPI states.
391 * The above vgic initialized check also ensures that the allocation
392 * and enabling of the doorbells have already been done.
393 */
394 if (kvm_vgic_global_state.has_gicv4_1) {
395 unmap_all_vpes(kvm);
396 vlpi_avail = true;
397 }
398
399 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
400 int byte_offset, bit_nr;
401 struct kvm_vcpu *vcpu;
402 gpa_t pendbase, ptr;
403 bool is_pending;
404 bool stored;
405
406 vcpu = irq->target_vcpu;
407 if (!vcpu)
408 continue;
409
410 pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
411
412 byte_offset = irq->intid / BITS_PER_BYTE;
413 bit_nr = irq->intid % BITS_PER_BYTE;
414 ptr = pendbase + byte_offset;
415
416 if (ptr != last_ptr) {
417 ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
418 if (ret)
419 goto out;
420 last_ptr = ptr;
421 }
422
423 stored = val & (1U << bit_nr);
424
425 is_pending = irq->pending_latch;
426
427 if (irq->hw && vlpi_avail)
428 vgic_v4_get_vlpi_state(irq, &is_pending);
429
430 if (stored == is_pending)
431 continue;
432
433 if (is_pending)
434 val |= 1 << bit_nr;
435 else
436 val &= ~(1 << bit_nr);
437
438 ret = vgic_write_guest_lock(kvm, ptr, &val, 1);
439 if (ret)
440 goto out;
441 }
442
443 out:
444 if (vlpi_avail)
445 map_all_vpes(kvm);
446
447 return ret;
448 }
449
450 /**
451 * vgic_v3_rdist_overlap - check if a region overlaps with any
452 * existing redistributor region
453 *
454 * @kvm: kvm handle
455 * @base: base of the region
456 * @size: size of region
457 *
458 * Return: true if there is an overlap
459 */
vgic_v3_rdist_overlap(struct kvm * kvm,gpa_t base,size_t size)460 bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size)
461 {
462 struct vgic_dist *d = &kvm->arch.vgic;
463 struct vgic_redist_region *rdreg;
464
465 list_for_each_entry(rdreg, &d->rd_regions, list) {
466 if ((base + size > rdreg->base) &&
467 (base < rdreg->base + vgic_v3_rd_region_size(kvm, rdreg)))
468 return true;
469 }
470 return false;
471 }
472
473 /*
474 * Check for overlapping regions and for regions crossing the end of memory
475 * for base addresses which have already been set.
476 */
vgic_v3_check_base(struct kvm * kvm)477 bool vgic_v3_check_base(struct kvm *kvm)
478 {
479 struct vgic_dist *d = &kvm->arch.vgic;
480 struct vgic_redist_region *rdreg;
481
482 if (!IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) &&
483 d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE < d->vgic_dist_base)
484 return false;
485
486 list_for_each_entry(rdreg, &d->rd_regions, list) {
487 size_t sz = vgic_v3_rd_region_size(kvm, rdreg);
488
489 if (vgic_check_iorange(kvm, VGIC_ADDR_UNDEF,
490 rdreg->base, SZ_64K, sz))
491 return false;
492 }
493
494 if (IS_VGIC_ADDR_UNDEF(d->vgic_dist_base))
495 return true;
496
497 return !vgic_v3_rdist_overlap(kvm, d->vgic_dist_base,
498 KVM_VGIC_V3_DIST_SIZE);
499 }
500
501 /**
502 * vgic_v3_rdist_free_slot - Look up registered rdist regions and identify one
503 * which has free space to put a new rdist region.
504 *
505 * @rd_regions: redistributor region list head
506 *
507 * A redistributor regions maps n redistributors, n = region size / (2 x 64kB).
508 * Stride between redistributors is 0 and regions are filled in the index order.
509 *
510 * Return: the redist region handle, if any, that has space to map a new rdist
511 * region.
512 */
vgic_v3_rdist_free_slot(struct list_head * rd_regions)513 struct vgic_redist_region *vgic_v3_rdist_free_slot(struct list_head *rd_regions)
514 {
515 struct vgic_redist_region *rdreg;
516
517 list_for_each_entry(rdreg, rd_regions, list) {
518 if (!vgic_v3_redist_region_full(rdreg))
519 return rdreg;
520 }
521 return NULL;
522 }
523
vgic_v3_rdist_region_from_index(struct kvm * kvm,u32 index)524 struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm,
525 u32 index)
526 {
527 struct list_head *rd_regions = &kvm->arch.vgic.rd_regions;
528 struct vgic_redist_region *rdreg;
529
530 list_for_each_entry(rdreg, rd_regions, list) {
531 if (rdreg->index == index)
532 return rdreg;
533 }
534 return NULL;
535 }
536
537
vgic_v3_map_resources(struct kvm * kvm)538 int vgic_v3_map_resources(struct kvm *kvm)
539 {
540 struct vgic_dist *dist = &kvm->arch.vgic;
541 struct kvm_vcpu *vcpu;
542 unsigned long c;
543
544 kvm_for_each_vcpu(c, vcpu, kvm) {
545 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
546
547 if (IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) {
548 kvm_debug("vcpu %ld redistributor base not set\n", c);
549 return -ENXIO;
550 }
551 }
552
553 if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base)) {
554 kvm_debug("Need to set vgic distributor addresses first\n");
555 return -ENXIO;
556 }
557
558 if (!vgic_v3_check_base(kvm)) {
559 kvm_debug("VGIC redist and dist frames overlap\n");
560 return -EINVAL;
561 }
562
563 /*
564 * For a VGICv3 we require the userland to explicitly initialize
565 * the VGIC before we need to use it.
566 */
567 if (!vgic_initialized(kvm)) {
568 return -EBUSY;
569 }
570
571 if (kvm_vgic_global_state.has_gicv4_1)
572 vgic_v4_configure_vsgis(kvm);
573
574 return 0;
575 }
576
577 DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap);
578
early_group0_trap_cfg(char * buf)579 static int __init early_group0_trap_cfg(char *buf)
580 {
581 return kstrtobool(buf, &group0_trap);
582 }
583 early_param("kvm-arm.vgic_v3_group0_trap", early_group0_trap_cfg);
584
early_group1_trap_cfg(char * buf)585 static int __init early_group1_trap_cfg(char *buf)
586 {
587 return kstrtobool(buf, &group1_trap);
588 }
589 early_param("kvm-arm.vgic_v3_group1_trap", early_group1_trap_cfg);
590
early_common_trap_cfg(char * buf)591 static int __init early_common_trap_cfg(char *buf)
592 {
593 return kstrtobool(buf, &common_trap);
594 }
595 early_param("kvm-arm.vgic_v3_common_trap", early_common_trap_cfg);
596
early_gicv4_enable(char * buf)597 static int __init early_gicv4_enable(char *buf)
598 {
599 return kstrtobool(buf, &gicv4_enable);
600 }
601 early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
602
603 static const struct midr_range broken_seis[] = {
604 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM),
605 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM),
606 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_PRO),
607 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_PRO),
608 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_MAX),
609 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_MAX),
610 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD),
611 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE),
612 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_PRO),
613 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_PRO),
614 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_MAX),
615 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_MAX),
616 {},
617 };
618
vgic_v3_broken_seis(void)619 static bool vgic_v3_broken_seis(void)
620 {
621 return ((kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_SEIS_MASK) &&
622 is_midr_in_range_list(read_cpuid_id(), broken_seis));
623 }
624
625 /**
626 * vgic_v3_probe - probe for a VGICv3 compatible interrupt controller
627 * @info: pointer to the GIC description
628 *
629 * Returns 0 if the VGICv3 has been probed successfully, returns an error code
630 * otherwise
631 */
vgic_v3_probe(const struct gic_kvm_info * info)632 int vgic_v3_probe(const struct gic_kvm_info *info)
633 {
634 u64 ich_vtr_el2 = kvm_call_hyp_ret(__vgic_v3_get_gic_config);
635 bool has_v2;
636 int ret;
637
638 has_v2 = ich_vtr_el2 >> 63;
639 ich_vtr_el2 = (u32)ich_vtr_el2;
640
641 /*
642 * The ListRegs field is 5 bits, but there is an architectural
643 * maximum of 16 list registers. Just ignore bit 4...
644 */
645 kvm_vgic_global_state.nr_lr = (ich_vtr_el2 & 0xf) + 1;
646 kvm_vgic_global_state.can_emulate_gicv2 = false;
647 kvm_vgic_global_state.ich_vtr_el2 = ich_vtr_el2;
648
649 /* GICv4 support? */
650 if (info->has_v4) {
651 kvm_vgic_global_state.has_gicv4 = gicv4_enable;
652 kvm_vgic_global_state.has_gicv4_1 = info->has_v4_1 && gicv4_enable;
653 kvm_info("GICv4%s support %sabled\n",
654 kvm_vgic_global_state.has_gicv4_1 ? ".1" : "",
655 gicv4_enable ? "en" : "dis");
656 }
657
658 kvm_vgic_global_state.vcpu_base = 0;
659
660 if (!info->vcpu.start) {
661 kvm_info("GICv3: no GICV resource entry\n");
662 } else if (!has_v2) {
663 pr_warn(FW_BUG "CPU interface incapable of MMIO access\n");
664 } else if (!PAGE_ALIGNED(info->vcpu.start)) {
665 pr_warn("GICV physical address 0x%llx not page aligned\n",
666 (unsigned long long)info->vcpu.start);
667 } else if (kvm_get_mode() != KVM_MODE_PROTECTED) {
668 kvm_vgic_global_state.vcpu_base = info->vcpu.start;
669 kvm_vgic_global_state.can_emulate_gicv2 = true;
670 ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
671 if (ret) {
672 kvm_err("Cannot register GICv2 KVM device.\n");
673 return ret;
674 }
675 kvm_info("vgic-v2@%llx\n", info->vcpu.start);
676 }
677 ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3);
678 if (ret) {
679 kvm_err("Cannot register GICv3 KVM device.\n");
680 kvm_unregister_device_ops(KVM_DEV_TYPE_ARM_VGIC_V2);
681 return ret;
682 }
683
684 if (kvm_vgic_global_state.vcpu_base == 0)
685 kvm_info("disabling GICv2 emulation\n");
686
687 if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_30115)) {
688 group0_trap = true;
689 group1_trap = true;
690 }
691
692 if (vgic_v3_broken_seis()) {
693 kvm_info("GICv3 with broken locally generated SEI\n");
694
695 kvm_vgic_global_state.ich_vtr_el2 &= ~ICH_VTR_SEIS_MASK;
696 group0_trap = true;
697 group1_trap = true;
698 if (ich_vtr_el2 & ICH_VTR_TDS_MASK)
699 dir_trap = true;
700 else
701 common_trap = true;
702 }
703
704 if (group0_trap || group1_trap || common_trap | dir_trap) {
705 kvm_info("GICv3 sysreg trapping enabled ([%s%s%s%s], reduced performance)\n",
706 group0_trap ? "G0" : "",
707 group1_trap ? "G1" : "",
708 common_trap ? "C" : "",
709 dir_trap ? "D" : "");
710 static_branch_enable(&vgic_v3_cpuif_trap);
711 }
712
713 kvm_vgic_global_state.vctrl_base = NULL;
714 kvm_vgic_global_state.type = VGIC_V3;
715 kvm_vgic_global_state.max_gic_vcpus = VGIC_V3_MAX_CPUS;
716
717 return 0;
718 }
719
vgic_v3_load(struct kvm_vcpu * vcpu)720 void vgic_v3_load(struct kvm_vcpu *vcpu)
721 {
722 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
723
724 /*
725 * If dealing with a GICv2 emulation on GICv3, VMCR_EL2.VFIQen
726 * is dependent on ICC_SRE_EL1.SRE, and we have to perform the
727 * VMCR_EL2 save/restore in the world switch.
728 */
729 if (likely(cpu_if->vgic_sre))
730 kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr);
731
732 kvm_call_hyp(__vgic_v3_restore_aprs, cpu_if);
733
734 if (has_vhe())
735 __vgic_v3_activate_traps(cpu_if);
736
737 WARN_ON(vgic_v4_load(vcpu));
738 }
739
vgic_v3_vmcr_sync(struct kvm_vcpu * vcpu)740 void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
741 {
742 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
743
744 if (likely(cpu_if->vgic_sre))
745 cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr);
746 }
747
vgic_v3_put(struct kvm_vcpu * vcpu)748 void vgic_v3_put(struct kvm_vcpu *vcpu)
749 {
750 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
751
752 WARN_ON(vgic_v4_put(vcpu));
753
754 vgic_v3_vmcr_sync(vcpu);
755
756 kvm_call_hyp(__vgic_v3_save_aprs, cpu_if);
757
758 if (has_vhe())
759 __vgic_v3_deactivate_traps(cpu_if);
760 }
761