1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2017 ARM Ltd.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7 #include <linux/interrupt.h>
8 #include <linux/irq.h>
9 #include <linux/irqdomain.h>
10 #include <linux/kvm_host.h>
11 #include <linux/irqchip/arm-gic-v3.h>
12
13 #include "vgic.h"
14
15 /*
16 * How KVM uses GICv4 (insert rude comments here):
17 *
18 * The vgic-v4 layer acts as a bridge between several entities:
19 * - The GICv4 ITS representation offered by the ITS driver
20 * - VFIO, which is in charge of the PCI endpoint
21 * - The virtual ITS, which is the only thing the guest sees
22 *
23 * The configuration of VLPIs is triggered by a callback from VFIO,
24 * instructing KVM that a PCI device has been configured to deliver
25 * MSIs to a vITS.
26 *
27 * kvm_vgic_v4_set_forwarding() is thus called with the routing entry,
28 * and this is used to find the corresponding vITS data structures
29 * (ITS instance, device, event and irq) using a process that is
30 * extremely similar to the injection of an MSI.
31 *
32 * At this stage, we can link the guest's view of an LPI (uniquely
33 * identified by the routing entry) and the host irq, using the GICv4
34 * driver mapping operation. Should the mapping succeed, we've then
35 * successfully upgraded the guest's LPI to a VLPI. We can then start
36 * with updating GICv4's view of the property table and generating an
37 * INValidation in order to kickstart the delivery of this VLPI to the
38 * guest directly, without software intervention. Well, almost.
39 *
40 * When the PCI endpoint is deconfigured, this operation is reversed
41 * with VFIO calling kvm_vgic_v4_unset_forwarding().
42 *
43 * Once the VLPI has been mapped, it needs to follow any change the
44 * guest performs on its LPI through the vITS. For that, a number of
45 * command handlers have hooks to communicate these changes to the HW:
46 * - Any invalidation triggers a call to its_prop_update_vlpi()
47 * - The INT command results in a irq_set_irqchip_state(), which
48 * generates an INT on the corresponding VLPI.
49 * - The CLEAR command results in a irq_set_irqchip_state(), which
50 * generates an CLEAR on the corresponding VLPI.
51 * - DISCARD translates into an unmap, similar to a call to
52 * kvm_vgic_v4_unset_forwarding().
53 * - MOVI is translated by an update of the existing mapping, changing
54 * the target vcpu, resulting in a VMOVI being generated.
55 * - MOVALL is translated by a string of mapping updates (similar to
56 * the handling of MOVI). MOVALL is horrible.
57 *
58 * Note that a DISCARD/MAPTI sequence emitted from the guest without
59 * reprogramming the PCI endpoint after MAPTI does not result in a
60 * VLPI being mapped, as there is no callback from VFIO (the guest
61 * will get the interrupt via the normal SW injection). Fixing this is
62 * not trivial, and requires some horrible messing with the VFIO
63 * internals. Not fun. Don't do that.
64 *
65 * Then there is the scheduling. Each time a vcpu is about to run on a
66 * physical CPU, KVM must tell the corresponding redistributor about
67 * it. And if we've migrated our vcpu from one CPU to another, we must
68 * tell the ITS (so that the messages reach the right redistributor).
69 * This is done in two steps: first issue a irq_set_affinity() on the
70 * irq corresponding to the vcpu, then call its_make_vpe_resident().
71 * You must be in a non-preemptible context. On exit, a call to
72 * its_make_vpe_non_resident() tells the redistributor that we're done
73 * with the vcpu.
74 *
75 * Finally, the doorbell handling: Each vcpu is allocated an interrupt
76 * which will fire each time a VLPI is made pending whilst the vcpu is
77 * not running. Each time the vcpu gets blocked, the doorbell
78 * interrupt gets enabled. When the vcpu is unblocked (for whatever
79 * reason), the doorbell interrupt is disabled.
80 */
81
82 #define DB_IRQ_FLAGS (IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY | IRQ_NO_BALANCING)
83
vgic_v4_doorbell_handler(int irq,void * info)84 static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
85 {
86 struct kvm_vcpu *vcpu = info;
87
88 /* We got the message, no need to fire again */
89 if (!kvm_vgic_global_state.has_gicv4_1 &&
90 !irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
91 disable_irq_nosync(irq);
92
93 /*
94 * The v4.1 doorbell can fire concurrently with the vPE being
95 * made non-resident. Ensure we only update pending_last
96 * *after* the non-residency sequence has completed.
97 */
98 raw_spin_lock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
99 vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
100 raw_spin_unlock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
101
102 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
103 kvm_vcpu_kick(vcpu);
104
105 return IRQ_HANDLED;
106 }
107
vgic_v4_sync_sgi_config(struct its_vpe * vpe,struct vgic_irq * irq)108 static void vgic_v4_sync_sgi_config(struct its_vpe *vpe, struct vgic_irq *irq)
109 {
110 vpe->sgi_config[irq->intid].enabled = irq->enabled;
111 vpe->sgi_config[irq->intid].group = irq->group;
112 vpe->sgi_config[irq->intid].priority = irq->priority;
113 }
114
vgic_v4_enable_vsgis(struct kvm_vcpu * vcpu)115 static void vgic_v4_enable_vsgis(struct kvm_vcpu *vcpu)
116 {
117 struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
118 int i;
119
120 /*
121 * With GICv4.1, every virtual SGI can be directly injected. So
122 * let's pretend that they are HW interrupts, tied to a host
123 * IRQ. The SGI code will do its magic.
124 */
125 for (i = 0; i < VGIC_NR_SGIS; i++) {
126 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, i);
127 struct irq_desc *desc;
128 unsigned long flags;
129 int ret;
130
131 raw_spin_lock_irqsave(&irq->irq_lock, flags);
132
133 if (irq->hw)
134 goto unlock;
135
136 irq->hw = true;
137 irq->host_irq = irq_find_mapping(vpe->sgi_domain, i);
138
139 /* Transfer the full irq state to the vPE */
140 vgic_v4_sync_sgi_config(vpe, irq);
141 desc = irq_to_desc(irq->host_irq);
142 ret = irq_domain_activate_irq(irq_desc_get_irq_data(desc),
143 false);
144 if (!WARN_ON(ret)) {
145 /* Transfer pending state */
146 ret = irq_set_irqchip_state(irq->host_irq,
147 IRQCHIP_STATE_PENDING,
148 irq->pending_latch);
149 WARN_ON(ret);
150 irq->pending_latch = false;
151 }
152 unlock:
153 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
154 vgic_put_irq(vcpu->kvm, irq);
155 }
156 }
157
vgic_v4_disable_vsgis(struct kvm_vcpu * vcpu)158 static void vgic_v4_disable_vsgis(struct kvm_vcpu *vcpu)
159 {
160 int i;
161
162 for (i = 0; i < VGIC_NR_SGIS; i++) {
163 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, i);
164 struct irq_desc *desc;
165 unsigned long flags;
166 int ret;
167
168 raw_spin_lock_irqsave(&irq->irq_lock, flags);
169
170 if (!irq->hw)
171 goto unlock;
172
173 irq->hw = false;
174 ret = irq_get_irqchip_state(irq->host_irq,
175 IRQCHIP_STATE_PENDING,
176 &irq->pending_latch);
177 WARN_ON(ret);
178
179 desc = irq_to_desc(irq->host_irq);
180 irq_domain_deactivate_irq(irq_desc_get_irq_data(desc));
181 unlock:
182 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
183 vgic_put_irq(vcpu->kvm, irq);
184 }
185 }
186
187 /* Must be called with the kvm lock held */
vgic_v4_configure_vsgis(struct kvm * kvm)188 void vgic_v4_configure_vsgis(struct kvm *kvm)
189 {
190 struct vgic_dist *dist = &kvm->arch.vgic;
191 struct kvm_vcpu *vcpu;
192 unsigned long i;
193
194 kvm_arm_halt_guest(kvm);
195
196 kvm_for_each_vcpu(i, vcpu, kvm) {
197 if (dist->nassgireq)
198 vgic_v4_enable_vsgis(vcpu);
199 else
200 vgic_v4_disable_vsgis(vcpu);
201 }
202
203 kvm_arm_resume_guest(kvm);
204 }
205
206 /*
207 * Must be called with GICv4.1 and the vPE unmapped, which
208 * indicates the invalidation of any VPT caches associated
209 * with the vPE, thus we can get the VLPI state by peeking
210 * at the VPT.
211 */
vgic_v4_get_vlpi_state(struct vgic_irq * irq,bool * val)212 void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val)
213 {
214 struct its_vpe *vpe = &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
215 int mask = BIT(irq->intid % BITS_PER_BYTE);
216 void *va;
217 u8 *ptr;
218
219 va = page_address(vpe->vpt_page);
220 ptr = va + irq->intid / BITS_PER_BYTE;
221
222 *val = !!(*ptr & mask);
223 }
224
vgic_v4_request_vpe_irq(struct kvm_vcpu * vcpu,int irq)225 int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq)
226 {
227 return request_irq(irq, vgic_v4_doorbell_handler, 0, "vcpu", vcpu);
228 }
229
230 /**
231 * vgic_v4_init - Initialize the GICv4 data structures
232 * @kvm: Pointer to the VM being initialized
233 *
234 * We may be called each time a vITS is created, or when the
235 * vgic is initialized. This relies on kvm->lock to be
236 * held. In both cases, the number of vcpus should now be
237 * fixed.
238 */
vgic_v4_init(struct kvm * kvm)239 int vgic_v4_init(struct kvm *kvm)
240 {
241 struct vgic_dist *dist = &kvm->arch.vgic;
242 struct kvm_vcpu *vcpu;
243 int nr_vcpus, ret;
244 unsigned long i;
245
246 if (!kvm_vgic_global_state.has_gicv4)
247 return 0; /* Nothing to see here... move along. */
248
249 if (dist->its_vm.vpes)
250 return 0;
251
252 nr_vcpus = atomic_read(&kvm->online_vcpus);
253
254 dist->its_vm.vpes = kcalloc(nr_vcpus, sizeof(*dist->its_vm.vpes),
255 GFP_KERNEL_ACCOUNT);
256 if (!dist->its_vm.vpes)
257 return -ENOMEM;
258
259 dist->its_vm.nr_vpes = nr_vcpus;
260
261 kvm_for_each_vcpu(i, vcpu, kvm)
262 dist->its_vm.vpes[i] = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
263
264 ret = its_alloc_vcpu_irqs(&dist->its_vm);
265 if (ret < 0) {
266 kvm_err("VPE IRQ allocation failure\n");
267 kfree(dist->its_vm.vpes);
268 dist->its_vm.nr_vpes = 0;
269 dist->its_vm.vpes = NULL;
270 return ret;
271 }
272
273 kvm_for_each_vcpu(i, vcpu, kvm) {
274 int irq = dist->its_vm.vpes[i]->irq;
275 unsigned long irq_flags = DB_IRQ_FLAGS;
276
277 /*
278 * Don't automatically enable the doorbell, as we're
279 * flipping it back and forth when the vcpu gets
280 * blocked. Also disable the lazy disabling, as the
281 * doorbell could kick us out of the guest too
282 * early...
283 *
284 * On GICv4.1, the doorbell is managed in HW and must
285 * be left enabled.
286 */
287 if (kvm_vgic_global_state.has_gicv4_1)
288 irq_flags &= ~IRQ_NOAUTOEN;
289 irq_set_status_flags(irq, irq_flags);
290
291 ret = vgic_v4_request_vpe_irq(vcpu, irq);
292 if (ret) {
293 kvm_err("failed to allocate vcpu IRQ%d\n", irq);
294 /*
295 * Trick: adjust the number of vpes so we know
296 * how many to nuke on teardown...
297 */
298 dist->its_vm.nr_vpes = i;
299 break;
300 }
301 }
302
303 if (ret)
304 vgic_v4_teardown(kvm);
305
306 return ret;
307 }
308
309 /**
310 * vgic_v4_teardown - Free the GICv4 data structures
311 * @kvm: Pointer to the VM being destroyed
312 *
313 * Relies on kvm->lock to be held.
314 */
vgic_v4_teardown(struct kvm * kvm)315 void vgic_v4_teardown(struct kvm *kvm)
316 {
317 struct its_vm *its_vm = &kvm->arch.vgic.its_vm;
318 int i;
319
320 if (!its_vm->vpes)
321 return;
322
323 for (i = 0; i < its_vm->nr_vpes; i++) {
324 struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, i);
325 int irq = its_vm->vpes[i]->irq;
326
327 irq_clear_status_flags(irq, DB_IRQ_FLAGS);
328 free_irq(irq, vcpu);
329 }
330
331 its_free_vcpu_irqs(its_vm);
332 kfree(its_vm->vpes);
333 its_vm->nr_vpes = 0;
334 its_vm->vpes = NULL;
335 }
336
vgic_v4_put(struct kvm_vcpu * vcpu,bool need_db)337 int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db)
338 {
339 struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
340
341 if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident)
342 return 0;
343
344 return its_make_vpe_non_resident(vpe, need_db);
345 }
346
vgic_v4_load(struct kvm_vcpu * vcpu)347 int vgic_v4_load(struct kvm_vcpu *vcpu)
348 {
349 struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
350 int err;
351
352 if (!vgic_supports_direct_msis(vcpu->kvm) || vpe->resident)
353 return 0;
354
355 /*
356 * Before making the VPE resident, make sure the redistributor
357 * corresponding to our current CPU expects us here. See the
358 * doc in drivers/irqchip/irq-gic-v4.c to understand how this
359 * turns into a VMOVP command at the ITS level.
360 */
361 err = irq_set_affinity(vpe->irq, cpumask_of(smp_processor_id()));
362 if (err)
363 return err;
364
365 err = its_make_vpe_resident(vpe, false, vcpu->kvm->arch.vgic.enabled);
366 if (err)
367 return err;
368
369 /*
370 * Now that the VPE is resident, let's get rid of a potential
371 * doorbell interrupt that would still be pending. This is a
372 * GICv4.0 only "feature"...
373 */
374 if (!kvm_vgic_global_state.has_gicv4_1)
375 err = irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING, false);
376
377 return err;
378 }
379
vgic_v4_commit(struct kvm_vcpu * vcpu)380 void vgic_v4_commit(struct kvm_vcpu *vcpu)
381 {
382 struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
383
384 /*
385 * No need to wait for the vPE to be ready across a shallow guest
386 * exit, as only a vcpu_put will invalidate it.
387 */
388 if (!vpe->ready)
389 its_commit_vpe(vpe);
390 }
391
vgic_get_its(struct kvm * kvm,struct kvm_kernel_irq_routing_entry * irq_entry)392 static struct vgic_its *vgic_get_its(struct kvm *kvm,
393 struct kvm_kernel_irq_routing_entry *irq_entry)
394 {
395 struct kvm_msi msi = (struct kvm_msi) {
396 .address_lo = irq_entry->msi.address_lo,
397 .address_hi = irq_entry->msi.address_hi,
398 .data = irq_entry->msi.data,
399 .flags = irq_entry->msi.flags,
400 .devid = irq_entry->msi.devid,
401 };
402
403 return vgic_msi_to_its(kvm, &msi);
404 }
405
kvm_vgic_v4_set_forwarding(struct kvm * kvm,int virq,struct kvm_kernel_irq_routing_entry * irq_entry)406 int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
407 struct kvm_kernel_irq_routing_entry *irq_entry)
408 {
409 struct vgic_its *its;
410 struct vgic_irq *irq;
411 struct its_vlpi_map map;
412 unsigned long flags;
413 int ret;
414
415 if (!vgic_supports_direct_msis(kvm))
416 return 0;
417
418 /*
419 * Get the ITS, and escape early on error (not a valid
420 * doorbell for any of our vITSs).
421 */
422 its = vgic_get_its(kvm, irq_entry);
423 if (IS_ERR(its))
424 return 0;
425
426 mutex_lock(&its->its_lock);
427
428 /* Perform the actual DevID/EventID -> LPI translation. */
429 ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
430 irq_entry->msi.data, &irq);
431 if (ret)
432 goto out;
433
434 /*
435 * Emit the mapping request. If it fails, the ITS probably
436 * isn't v4 compatible, so let's silently bail out. Holding
437 * the ITS lock should ensure that nothing can modify the
438 * target vcpu.
439 */
440 map = (struct its_vlpi_map) {
441 .vm = &kvm->arch.vgic.its_vm,
442 .vpe = &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe,
443 .vintid = irq->intid,
444 .properties = ((irq->priority & 0xfc) |
445 (irq->enabled ? LPI_PROP_ENABLED : 0) |
446 LPI_PROP_GROUP1),
447 .db_enabled = true,
448 };
449
450 ret = its_map_vlpi(virq, &map);
451 if (ret)
452 goto out;
453
454 irq->hw = true;
455 irq->host_irq = virq;
456 atomic_inc(&map.vpe->vlpi_count);
457
458 /* Transfer pending state */
459 raw_spin_lock_irqsave(&irq->irq_lock, flags);
460 if (irq->pending_latch) {
461 ret = irq_set_irqchip_state(irq->host_irq,
462 IRQCHIP_STATE_PENDING,
463 irq->pending_latch);
464 WARN_RATELIMIT(ret, "IRQ %d", irq->host_irq);
465
466 /*
467 * Clear pending_latch and communicate this state
468 * change via vgic_queue_irq_unlock.
469 */
470 irq->pending_latch = false;
471 vgic_queue_irq_unlock(kvm, irq, flags);
472 } else {
473 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
474 }
475
476 out:
477 mutex_unlock(&its->its_lock);
478 return ret;
479 }
480
kvm_vgic_v4_unset_forwarding(struct kvm * kvm,int virq,struct kvm_kernel_irq_routing_entry * irq_entry)481 int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
482 struct kvm_kernel_irq_routing_entry *irq_entry)
483 {
484 struct vgic_its *its;
485 struct vgic_irq *irq;
486 int ret;
487
488 if (!vgic_supports_direct_msis(kvm))
489 return 0;
490
491 /*
492 * Get the ITS, and escape early on error (not a valid
493 * doorbell for any of our vITSs).
494 */
495 its = vgic_get_its(kvm, irq_entry);
496 if (IS_ERR(its))
497 return 0;
498
499 mutex_lock(&its->its_lock);
500
501 ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
502 irq_entry->msi.data, &irq);
503 if (ret)
504 goto out;
505
506 WARN_ON(!(irq->hw && irq->host_irq == virq));
507 if (irq->hw) {
508 atomic_dec(&irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count);
509 irq->hw = false;
510 ret = its_unmap_vlpi(virq);
511 }
512
513 out:
514 mutex_unlock(&its->its_lock);
515 return ret;
516 }
517