Lines Matching refs:prio
137 u8 prio = 0xff; in xive_vm_scan_interrupts() local
149 prio = ffs(pending) - 1; in xive_vm_scan_interrupts()
152 if (prio >= xc->cppr || prio > 7) { in xive_vm_scan_interrupts()
154 prio = xc->mfrr; in xive_vm_scan_interrupts()
161 q = &xc->queues[prio]; in xive_vm_scan_interrupts()
194 if (hirq == XICS_IPI || (prio == 0 && !qpage)) { in xive_vm_scan_interrupts()
213 pending &= ~(1 << prio); in xive_vm_scan_interrupts()
234 if (prio >= xc->mfrr && xc->mfrr < xc->cppr) { in xive_vm_scan_interrupts()
235 prio = xc->mfrr; in xive_vm_scan_interrupts()
272 xc->cppr = prio; in xive_vm_scan_interrupts()
374 u8 pending, prio; in xive_vm_push_pending_to_hw() local
385 prio = ffs(pending) - 1; in xive_vm_push_pending_to_hw()
387 __raw_writeb(prio, xive_tima + TM_SPC_SET_OS_PENDING); in xive_vm_push_pending_to_hw()
393 unsigned int prio; in xive_vm_scan_for_rerouted_irqs() local
396 for (prio = xc->cppr; prio < KVMPPC_XIVE_Q_COUNT; prio++) { in xive_vm_scan_for_rerouted_irqs()
397 struct xive_q *q = &xc->queues[prio]; in xive_vm_scan_for_rerouted_irqs()
866 int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio, in kvmppc_xive_attach_escalation() argument
870 struct xive_q *q = &xc->queues[prio]; in kvmppc_xive_attach_escalation()
875 if (xc->esc_virq[prio]) in kvmppc_xive_attach_escalation()
879 xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq); in kvmppc_xive_attach_escalation()
880 if (!xc->esc_virq[prio]) { in kvmppc_xive_attach_escalation()
882 prio, xc->server_num); in kvmppc_xive_attach_escalation()
891 vcpu->kvm->arch.lpid, xc->server_num, prio); in kvmppc_xive_attach_escalation()
894 prio, xc->server_num); in kvmppc_xive_attach_escalation()
899 pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio); in kvmppc_xive_attach_escalation()
901 rc = request_irq(xc->esc_virq[prio], xive_esc_irq, in kvmppc_xive_attach_escalation()
905 prio, xc->server_num); in kvmppc_xive_attach_escalation()
908 xc->esc_virq_names[prio] = name; in kvmppc_xive_attach_escalation()
919 struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]); in kvmppc_xive_attach_escalation()
930 irq_dispose_mapping(xc->esc_virq[prio]); in kvmppc_xive_attach_escalation()
931 xc->esc_virq[prio] = 0; in kvmppc_xive_attach_escalation()
936 static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio) in xive_provision_queue() argument
940 struct xive_q *q = &xc->queues[prio]; in xive_provision_queue()
951 prio, xc->server_num); in xive_provision_queue()
963 rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage, in xive_provision_queue()
967 prio, xc->server_num); in xive_provision_queue()
972 static int xive_check_provisioning(struct kvm *kvm, u8 prio) in xive_check_provisioning() argument
982 if (xive->qmap & (1 << prio)) in xive_check_provisioning()
985 pr_devel("Provisioning prio... %d\n", prio); in xive_check_provisioning()
991 rc = xive_provision_queue(vcpu, prio); in xive_check_provisioning()
993 kvmppc_xive_attach_escalation(vcpu, prio, in xive_check_provisioning()
1001 xive->qmap |= (1 << prio); in xive_check_provisioning()
1005 static void xive_inc_q_pending(struct kvm *kvm, u32 server, u8 prio) in xive_inc_q_pending() argument
1021 q = &xc->queues[prio]; in xive_inc_q_pending()
1025 static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio) in xive_try_pick_queue() argument
1036 q = &xc->queues[prio]; in xive_try_pick_queue()
1045 int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio) in kvmppc_xive_select_target() argument
1058 pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio); in kvmppc_xive_select_target()
1061 rc = xive_try_pick_queue(vcpu, prio); in kvmppc_xive_select_target()
1071 rc = xive_try_pick_queue(vcpu, prio); in kvmppc_xive_select_target()
1074 pr_devel(" found on 0x%x/%d\n", *server, prio); in kvmppc_xive_select_target()
1146 u8 prio) in xive_finish_unmask() argument
1173 state->guest_priority = prio; in xive_finish_unmask()
1185 u32 server, u8 prio) in xive_target_interrupt() argument
1196 rc = kvmppc_xive_select_target(kvm, &server, prio); in xive_target_interrupt()
1217 state->act_priority = prio; in xive_target_interrupt()
1225 prio, state->number); in xive_target_interrupt()
1561 u8 prio; in kvmppc_xive_set_mapped() local
1599 prio = xive_lock_and_mask(xive, sb, state); in kvmppc_xive_set_mapped()
1600 pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio, in kvmppc_xive_set_mapped()
1634 if (prio != MASKED && !state->old_p) in kvmppc_xive_set_mapped()
1642 state->guest_priority = prio; in kvmppc_xive_set_mapped()
1656 u8 prio; in kvmppc_xive_clr_mapped() local
1674 prio = xive_lock_and_mask(xive, sb, state); in kvmppc_xive_clr_mapped()
1675 pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio, in kvmppc_xive_clr_mapped()
1715 if (prio == MASKED || state->old_p) in kvmppc_xive_clr_mapped()
1722 state->guest_priority = prio; in kvmppc_xive_clr_mapped()
2188 u64 val, prio; in xive_get_source() local
2224 prio = state->saved_scan_prio; in xive_get_source()
2226 if (prio == MASKED) { in xive_get_source()
2228 prio = state->saved_priority; in xive_get_source()
2230 val |= prio << KVM_XICS_PRIORITY_SHIFT; in xive_get_source()
2248 if (state->in_queue || (prio == MASKED && state->saved_q)) in xive_get_source()