Lines Matching refs:iommu
64 int intel_svm_enable_prq(struct intel_iommu *iommu) in intel_svm_enable_prq() argument
73 iommu->name); in intel_svm_enable_prq()
76 iommu->prq = page_address(pages); in intel_svm_enable_prq()
78 irq = dmar_alloc_hwirq(IOMMU_IRQ_ID_OFFSET_PRQ + iommu->seq_id, iommu->node, iommu); in intel_svm_enable_prq()
81 iommu->name); in intel_svm_enable_prq()
85 iommu->pr_irq = irq; in intel_svm_enable_prq()
87 snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name), in intel_svm_enable_prq()
88 "dmar%d-iopfq", iommu->seq_id); in intel_svm_enable_prq()
89 iopfq = iopf_queue_alloc(iommu->iopfq_name); in intel_svm_enable_prq()
91 pr_err("IOMMU: %s: Failed to allocate iopf queue\n", iommu->name); in intel_svm_enable_prq()
95 iommu->iopf_queue = iopfq; in intel_svm_enable_prq()
97 snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id); in intel_svm_enable_prq()
100 iommu->prq_name, iommu); in intel_svm_enable_prq()
103 iommu->name); in intel_svm_enable_prq()
106 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); in intel_svm_enable_prq()
107 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); in intel_svm_enable_prq()
108 dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER); in intel_svm_enable_prq()
110 init_completion(&iommu->prq_complete); in intel_svm_enable_prq()
115 iopf_queue_free(iommu->iopf_queue); in intel_svm_enable_prq()
116 iommu->iopf_queue = NULL; in intel_svm_enable_prq()
119 iommu->pr_irq = 0; in intel_svm_enable_prq()
121 free_pages((unsigned long)iommu->prq, PRQ_ORDER); in intel_svm_enable_prq()
122 iommu->prq = NULL; in intel_svm_enable_prq()
127 int intel_svm_finish_prq(struct intel_iommu *iommu) in intel_svm_finish_prq() argument
129 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); in intel_svm_finish_prq()
130 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); in intel_svm_finish_prq()
131 dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL); in intel_svm_finish_prq()
133 if (iommu->pr_irq) { in intel_svm_finish_prq()
134 free_irq(iommu->pr_irq, iommu); in intel_svm_finish_prq()
135 dmar_free_hwirq(iommu->pr_irq); in intel_svm_finish_prq()
136 iommu->pr_irq = 0; in intel_svm_finish_prq()
139 if (iommu->iopf_queue) { in intel_svm_finish_prq()
140 iopf_queue_free(iommu->iopf_queue); in intel_svm_finish_prq()
141 iommu->iopf_queue = NULL; in intel_svm_finish_prq()
144 free_pages((unsigned long)iommu->prq, PRQ_ORDER); in intel_svm_finish_prq()
145 iommu->prq = NULL; in intel_svm_finish_prq()
150 void intel_svm_check(struct intel_iommu *iommu) in intel_svm_check() argument
152 if (!pasid_supported(iommu)) in intel_svm_check()
156 !cap_fl1gp_support(iommu->cap)) { in intel_svm_check()
158 iommu->name); in intel_svm_check()
163 !cap_fl5lp_support(iommu->cap)) { in intel_svm_check()
165 iommu->name); in intel_svm_check()
169 iommu->flags |= VTD_FLAG_SVM_CAPABLE; in intel_svm_check()
182 qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, address, pages, ih); in __flush_svm_range_dev()
184 qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid, in __flush_svm_range_dev()
228 qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, 0, -1UL, 0); in intel_flush_svm_all()
230 qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid, in intel_flush_svm_all()
275 intel_pasid_tear_down_entry(sdev->iommu, sdev->dev, in intel_mm_release()
318 static int intel_svm_bind_mm(struct intel_iommu *iommu, struct device *dev, in intel_svm_bind_mm() argument
359 sdev->iommu = iommu; in intel_svm_bind_mm()
371 ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, mm->pasid, in intel_svm_bind_mm()
395 struct intel_iommu *iommu; in intel_svm_remove_dev_pasid() local
399 iommu = device_to_iommu(dev, NULL, NULL); in intel_svm_remove_dev_pasid()
400 if (!iommu) in intel_svm_remove_dev_pasid()
483 struct intel_iommu *iommu; in intel_drain_pasid_prq() local
497 iommu = info->iommu; in intel_drain_pasid_prq()
501 did = domain_id_iommu(domain, iommu); in intel_drain_pasid_prq()
509 reinit_completion(&iommu->prq_complete); in intel_drain_pasid_prq()
510 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in intel_drain_pasid_prq()
511 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in intel_drain_pasid_prq()
515 req = &iommu->prq[head / sizeof(*req)]; in intel_drain_pasid_prq()
521 wait_for_completion(&iommu->prq_complete); in intel_drain_pasid_prq()
545 reinit_completion(&iommu->prq_complete); in intel_drain_pasid_prq()
546 qi_submit_sync(iommu, desc, 3, QI_OPT_WAIT_DRAIN); in intel_drain_pasid_prq()
547 if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) { in intel_drain_pasid_prq()
548 wait_for_completion(&iommu->prq_complete); in intel_drain_pasid_prq()
569 static int intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev, in intel_svm_prq_report() argument
602 } else if (dmar_latency_enabled(iommu, DMAR_LATENCY_PRQ)) { in intel_svm_prq_report()
613 static void handle_bad_prq_event(struct intel_iommu *iommu, in handle_bad_prq_event() argument
619 iommu->name, ((unsigned long long *)req)[0], in handle_bad_prq_event()
649 qi_submit_sync(iommu, &desc, 1, 0); in handle_bad_prq_event()
654 struct intel_iommu *iommu = d; in prq_event_thread() local
664 writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG); in prq_event_thread()
666 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in prq_event_thread()
667 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in prq_event_thread()
670 req = &iommu->prq[head / sizeof(*req)]; in prq_event_thread()
675 iommu->name); in prq_event_thread()
677 handle_bad_prq_event(iommu, req, QI_RESP_INVALID); in prq_event_thread()
683 iommu->name); in prq_event_thread()
689 iommu->name); in prq_event_thread()
695 iommu->name); in prq_event_thread()
703 pdev = pci_get_domain_bus_and_slot(iommu->segment, in prq_event_thread()
713 if (intel_svm_prq_report(iommu, &pdev->dev, req)) in prq_event_thread()
714 handle_bad_prq_event(iommu, req, QI_RESP_INVALID); in prq_event_thread()
716 trace_prq_report(iommu, &pdev->dev, req->qw_0, req->qw_1, in prq_event_thread()
718 iommu->prq_seq_number++); in prq_event_thread()
724 dmar_writeq(iommu->reg + DMAR_PQH_REG, tail); in prq_event_thread()
730 if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) { in prq_event_thread()
732 iommu->name); in prq_event_thread()
733 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in prq_event_thread()
734 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in prq_event_thread()
736 iopf_queue_discard_partial(iommu->iopf_queue); in prq_event_thread()
737 writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG); in prq_event_thread()
739 iommu->name); in prq_event_thread()
743 if (!completion_done(&iommu->prq_complete)) in prq_event_thread()
744 complete(&iommu->prq_complete); in prq_event_thread()
754 struct intel_iommu *iommu; in intel_svm_page_response() local
765 iommu = device_to_iommu(dev, &bus, &devfn); in intel_svm_page_response()
766 if (!iommu) in intel_svm_page_response()
810 dmar_latency_update(iommu, DMAR_LATENCY_PRQ, in intel_svm_page_response()
814 qi_submit_sync(iommu, &desc, 1, 0); in intel_svm_page_response()
824 struct intel_iommu *iommu = info->iommu; in intel_svm_set_dev_pasid() local
827 return intel_svm_bind_mm(iommu, dev, mm); in intel_svm_set_dev_pasid()