Lines Matching refs:irq
63 struct vgic_irq *irq = NULL; in vgic_get_lpi() local
68 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { in vgic_get_lpi()
69 if (irq->intid != intid) in vgic_get_lpi()
76 vgic_get_irq_kref(irq); in vgic_get_lpi()
79 irq = NULL; in vgic_get_lpi()
84 return irq; in vgic_get_lpi()
126 void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq) in __vgic_put_lpi_locked() argument
130 if (!kref_put(&irq->refcount, vgic_irq_release)) in __vgic_put_lpi_locked()
133 list_del(&irq->lpi_list); in __vgic_put_lpi_locked()
136 kfree(irq); in __vgic_put_lpi_locked()
139 void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) in vgic_put_irq() argument
144 if (irq->intid < VGIC_MIN_LPI) in vgic_put_irq()
148 __vgic_put_lpi_locked(kvm, irq); in vgic_put_irq()
155 struct vgic_irq *irq, *tmp; in vgic_flush_pending_lpis() local
160 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { in vgic_flush_pending_lpis()
161 if (irq->intid >= VGIC_MIN_LPI) { in vgic_flush_pending_lpis()
162 raw_spin_lock(&irq->irq_lock); in vgic_flush_pending_lpis()
163 list_del(&irq->ap_list); in vgic_flush_pending_lpis()
164 irq->vcpu = NULL; in vgic_flush_pending_lpis()
165 raw_spin_unlock(&irq->irq_lock); in vgic_flush_pending_lpis()
166 vgic_put_irq(vcpu->kvm, irq); in vgic_flush_pending_lpis()
173 void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending) in vgic_irq_set_phys_pending() argument
175 WARN_ON(irq_set_irqchip_state(irq->host_irq, in vgic_irq_set_phys_pending()
180 bool vgic_get_phys_line_level(struct vgic_irq *irq) in vgic_get_phys_line_level() argument
184 BUG_ON(!irq->hw); in vgic_get_phys_line_level()
186 if (irq->ops && irq->ops->get_input_level) in vgic_get_phys_line_level()
187 return irq->ops->get_input_level(irq->intid); in vgic_get_phys_line_level()
189 WARN_ON(irq_get_irqchip_state(irq->host_irq, in vgic_get_phys_line_level()
196 void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active) in vgic_irq_set_phys_active() argument
199 BUG_ON(!irq->hw); in vgic_irq_set_phys_active()
200 WARN_ON(irq_set_irqchip_state(irq->host_irq, in vgic_irq_set_phys_active()
216 static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq) in vgic_target_oracle() argument
218 lockdep_assert_held(&irq->irq_lock); in vgic_target_oracle()
221 if (irq->active) in vgic_target_oracle()
222 return irq->vcpu ? : irq->target_vcpu; in vgic_target_oracle()
230 if (irq->enabled && irq_is_pending(irq)) { in vgic_target_oracle()
231 if (unlikely(irq->target_vcpu && in vgic_target_oracle()
232 !irq->target_vcpu->kvm->arch.vgic.enabled)) in vgic_target_oracle()
235 return irq->target_vcpu; in vgic_target_oracle()
313 static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owner) in vgic_validate_injection() argument
315 if (irq->owner != owner) in vgic_validate_injection()
318 switch (irq->config) { in vgic_validate_injection()
320 return irq->line_level != level; in vgic_validate_injection()
336 bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, in vgic_queue_irq_unlock() argument
341 lockdep_assert_held(&irq->irq_lock); in vgic_queue_irq_unlock()
344 vcpu = vgic_target_oracle(irq); in vgic_queue_irq_unlock()
345 if (irq->vcpu || !vcpu) { in vgic_queue_irq_unlock()
355 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_queue_irq_unlock()
377 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_queue_irq_unlock()
382 raw_spin_lock(&irq->irq_lock); in vgic_queue_irq_unlock()
396 if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { in vgic_queue_irq_unlock()
397 raw_spin_unlock(&irq->irq_lock); in vgic_queue_irq_unlock()
401 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_queue_irq_unlock()
409 vgic_get_irq_kref(irq); in vgic_queue_irq_unlock()
410 list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head); in vgic_queue_irq_unlock()
411 irq->vcpu = vcpu; in vgic_queue_irq_unlock()
413 raw_spin_unlock(&irq->irq_lock); in vgic_queue_irq_unlock()
443 struct vgic_irq *irq; in kvm_vgic_inject_irq() local
457 irq = vgic_get_irq(kvm, vcpu, intid); in kvm_vgic_inject_irq()
458 if (!irq) in kvm_vgic_inject_irq()
461 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_inject_irq()
463 if (!vgic_validate_injection(irq, level, owner)) { in kvm_vgic_inject_irq()
465 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_inject_irq()
466 vgic_put_irq(kvm, irq); in kvm_vgic_inject_irq()
470 if (irq->config == VGIC_CONFIG_LEVEL) in kvm_vgic_inject_irq()
471 irq->line_level = level; in kvm_vgic_inject_irq()
473 irq->pending_latch = true; in kvm_vgic_inject_irq()
475 vgic_queue_irq_unlock(kvm, irq, flags); in kvm_vgic_inject_irq()
476 vgic_put_irq(kvm, irq); in kvm_vgic_inject_irq()
482 static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq, in kvm_vgic_map_irq() argument
501 irq->hw = true; in kvm_vgic_map_irq()
502 irq->host_irq = host_irq; in kvm_vgic_map_irq()
503 irq->hwintid = data->hwirq; in kvm_vgic_map_irq()
504 irq->ops = ops; in kvm_vgic_map_irq()
509 static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq) in kvm_vgic_unmap_irq() argument
511 irq->hw = false; in kvm_vgic_unmap_irq()
512 irq->hwintid = 0; in kvm_vgic_unmap_irq()
513 irq->ops = NULL; in kvm_vgic_unmap_irq()
519 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); in kvm_vgic_map_phys_irq() local
523 BUG_ON(!irq); in kvm_vgic_map_phys_irq()
525 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_map_phys_irq()
526 ret = kvm_vgic_map_irq(vcpu, irq, host_irq, ops); in kvm_vgic_map_phys_irq()
527 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_map_phys_irq()
528 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_map_phys_irq()
544 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); in kvm_vgic_reset_mapped_irq() local
547 if (!irq->hw) in kvm_vgic_reset_mapped_irq()
550 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_reset_mapped_irq()
551 irq->active = false; in kvm_vgic_reset_mapped_irq()
552 irq->pending_latch = false; in kvm_vgic_reset_mapped_irq()
553 irq->line_level = false; in kvm_vgic_reset_mapped_irq()
554 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_reset_mapped_irq()
556 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_reset_mapped_irq()
561 struct vgic_irq *irq; in kvm_vgic_unmap_phys_irq() local
567 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); in kvm_vgic_unmap_phys_irq()
568 BUG_ON(!irq); in kvm_vgic_unmap_phys_irq()
570 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_unmap_phys_irq()
571 kvm_vgic_unmap_irq(irq); in kvm_vgic_unmap_phys_irq()
572 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_unmap_phys_irq()
573 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_unmap_phys_irq()
580 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); in kvm_vgic_get_map() local
584 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_get_map()
585 if (irq->hw) in kvm_vgic_get_map()
586 ret = irq->hwintid; in kvm_vgic_get_map()
587 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_get_map()
589 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_get_map()
605 struct vgic_irq *irq; in kvm_vgic_set_owner() local
616 irq = vgic_get_irq(vcpu->kvm, vcpu, intid); in kvm_vgic_set_owner()
617 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_set_owner()
618 if (irq->owner && irq->owner != owner) in kvm_vgic_set_owner()
621 irq->owner = owner; in kvm_vgic_set_owner()
622 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_set_owner()
638 struct vgic_irq *irq, *tmp; in vgic_prune_ap_list() local
645 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { in vgic_prune_ap_list()
649 raw_spin_lock(&irq->irq_lock); in vgic_prune_ap_list()
651 BUG_ON(vcpu != irq->vcpu); in vgic_prune_ap_list()
653 target_vcpu = vgic_target_oracle(irq); in vgic_prune_ap_list()
660 list_del(&irq->ap_list); in vgic_prune_ap_list()
661 irq->vcpu = NULL; in vgic_prune_ap_list()
662 raw_spin_unlock(&irq->irq_lock); in vgic_prune_ap_list()
671 vgic_put_irq(vcpu->kvm, irq); in vgic_prune_ap_list()
677 raw_spin_unlock(&irq->irq_lock); in vgic_prune_ap_list()
683 raw_spin_unlock(&irq->irq_lock); in vgic_prune_ap_list()
701 raw_spin_lock(&irq->irq_lock); in vgic_prune_ap_list()
712 if (target_vcpu == vgic_target_oracle(irq)) { in vgic_prune_ap_list()
715 list_del(&irq->ap_list); in vgic_prune_ap_list()
716 irq->vcpu = target_vcpu; in vgic_prune_ap_list()
717 list_add_tail(&irq->ap_list, &new_cpu->ap_list_head); in vgic_prune_ap_list()
721 raw_spin_unlock(&irq->irq_lock); in vgic_prune_ap_list()
746 struct vgic_irq *irq, int lr) in vgic_populate_lr() argument
748 lockdep_assert_held(&irq->irq_lock); in vgic_populate_lr()
751 vgic_v2_populate_lr(vcpu, irq, lr); in vgic_populate_lr()
753 vgic_v3_populate_lr(vcpu, irq, lr); in vgic_populate_lr()
777 struct vgic_irq *irq; in compute_ap_list_depth() local
784 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { in compute_ap_list_depth()
787 raw_spin_lock(&irq->irq_lock); in compute_ap_list_depth()
789 w = vgic_irq_get_lr_count(irq); in compute_ap_list_depth()
790 raw_spin_unlock(&irq->irq_lock); in compute_ap_list_depth()
802 struct vgic_irq *irq; in vgic_flush_lr_state() local
816 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { in vgic_flush_lr_state()
817 raw_spin_lock(&irq->irq_lock); in vgic_flush_lr_state()
826 if (multi_sgi && irq->priority > prio) { in vgic_flush_lr_state()
827 _raw_spin_unlock(&irq->irq_lock); in vgic_flush_lr_state()
831 if (likely(vgic_target_oracle(irq) == vcpu)) { in vgic_flush_lr_state()
832 vgic_populate_lr(vcpu, irq, count++); in vgic_flush_lr_state()
834 if (irq->source) in vgic_flush_lr_state()
835 prio = irq->priority; in vgic_flush_lr_state()
838 raw_spin_unlock(&irq->irq_lock); in vgic_flush_lr_state()
841 if (!list_is_last(&irq->ap_list, in vgic_flush_lr_state()
976 struct vgic_irq *irq; in kvm_vgic_vcpu_pending_irq() local
991 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { in kvm_vgic_vcpu_pending_irq()
992 raw_spin_lock(&irq->irq_lock); in kvm_vgic_vcpu_pending_irq()
993 pending = irq_is_pending(irq) && irq->enabled && in kvm_vgic_vcpu_pending_irq()
994 !irq->active && in kvm_vgic_vcpu_pending_irq()
995 irq->priority < vmcr.pmr; in kvm_vgic_vcpu_pending_irq()
996 raw_spin_unlock(&irq->irq_lock); in kvm_vgic_vcpu_pending_irq()
1026 struct vgic_irq *irq; in kvm_vgic_map_is_active() local
1033 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); in kvm_vgic_map_is_active()
1034 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_map_is_active()
1035 map_is_active = irq->hw && irq->active; in kvm_vgic_map_is_active()
1036 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_map_is_active()
1037 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_map_is_active()
1062 void vgic_irq_handle_resampling(struct vgic_irq *irq, in vgic_irq_handle_resampling() argument
1065 if (vgic_irq_is_mapped_level(irq)) { in vgic_irq_handle_resampling()
1068 if (unlikely(vgic_irq_needs_resampling(irq))) { in vgic_irq_handle_resampling()
1069 resample = !(irq->active || irq->pending_latch); in vgic_irq_handle_resampling()
1070 } else if (lr_pending || (lr_deactivated && irq->line_level)) { in vgic_irq_handle_resampling()
1071 irq->line_level = vgic_get_phys_line_level(irq); in vgic_irq_handle_resampling()
1072 resample = !irq->line_level; in vgic_irq_handle_resampling()
1076 vgic_irq_set_phys_active(irq, false); in vgic_irq_handle_resampling()