Lines Matching refs:iommu
223 static inline bool context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn) in context_copied() argument
225 if (!iommu->copied_tables) in context_copied()
228 return test_bit(((long)bus << 8) | devfn, iommu->copied_tables); in context_copied()
232 set_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn) in set_context_copied() argument
234 set_bit(((long)bus << 8) | devfn, iommu->copied_tables); in set_context_copied()
238 clear_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn) in clear_context_copied() argument
240 clear_bit(((long)bus << 8) | devfn, iommu->copied_tables); in clear_context_copied()
273 struct intel_iommu *iommu; /* the corresponding iommu */ member
304 static bool translation_pre_enabled(struct intel_iommu *iommu) in translation_pre_enabled() argument
306 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED); in translation_pre_enabled()
309 static void clear_translation_pre_enabled(struct intel_iommu *iommu) in clear_translation_pre_enabled() argument
311 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED; in clear_translation_pre_enabled()
314 static void init_translation_status(struct intel_iommu *iommu) in init_translation_status() argument
318 gsts = readl(iommu->reg + DMAR_GSTS_REG); in init_translation_status()
320 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED; in init_translation_status()
404 static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu) in __iommu_calculate_sagaw() argument
408 fl_sagaw = BIT(2) | (cap_fl5lp_support(iommu->cap) ? BIT(3) : 0); in __iommu_calculate_sagaw()
409 sl_sagaw = cap_sagaw(iommu->cap); in __iommu_calculate_sagaw()
412 if (!sm_supported(iommu) || !ecap_flts(iommu->ecap)) in __iommu_calculate_sagaw()
416 if (!ecap_slts(iommu->ecap)) in __iommu_calculate_sagaw()
422 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) in __iommu_calculate_agaw() argument
427 sagaw = __iommu_calculate_sagaw(iommu); in __iommu_calculate_agaw()
439 int iommu_calculate_max_sagaw(struct intel_iommu *iommu) in iommu_calculate_max_sagaw() argument
441 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH); in iommu_calculate_max_sagaw()
449 int iommu_calculate_agaw(struct intel_iommu *iommu) in iommu_calculate_agaw() argument
451 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH); in iommu_calculate_agaw()
454 static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu) in iommu_paging_structure_coherency() argument
456 return sm_supported(iommu) ? in iommu_paging_structure_coherency()
457 ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap); in iommu_paging_structure_coherency()
464 struct intel_iommu *iommu; in domain_update_iommu_coherency() local
471 if (!iommu_paging_structure_coherency(info->iommu)) { in domain_update_iommu_coherency()
481 for_each_active_iommu(iommu, drhd) { in domain_update_iommu_coherency()
482 if (!iommu_paging_structure_coherency(iommu)) { in domain_update_iommu_coherency()
494 struct intel_iommu *iommu; in domain_update_iommu_superpage() local
502 for_each_active_iommu(iommu, drhd) { in domain_update_iommu_superpage()
503 if (iommu != skip) { in domain_update_iommu_superpage()
505 if (!cap_fl1gp_support(iommu->cap)) in domain_update_iommu_superpage()
508 mask &= cap_super_page_val(iommu->cap); in domain_update_iommu_superpage()
591 struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, in iommu_context_addr() argument
594 struct root_entry *root = &iommu->root_entry[bus]; in iommu_context_addr()
602 if (!alloc && context_copied(iommu, bus, devfn)) in iommu_context_addr()
606 if (sm_supported(iommu)) { in iommu_context_addr()
620 context = alloc_pgtable_page(iommu->node, GFP_ATOMIC); in iommu_context_addr()
624 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE); in iommu_context_addr()
627 __iommu_flush_cache(iommu, entry, sizeof(*entry)); in iommu_context_addr()
689 static bool iommu_is_dummy(struct intel_iommu *iommu, struct device *dev) in iommu_is_dummy() argument
691 if (!iommu || iommu->drhd->ignored) in iommu_is_dummy()
710 struct intel_iommu *iommu; in device_to_iommu() local
732 for_each_iommu(iommu, drhd) { in device_to_iommu()
766 iommu = NULL; in device_to_iommu()
768 if (iommu_is_dummy(iommu, dev)) in device_to_iommu()
769 iommu = NULL; in device_to_iommu()
773 return iommu; in device_to_iommu()
783 static void free_context_table(struct intel_iommu *iommu) in free_context_table() argument
788 if (!iommu->root_entry) in free_context_table()
792 context = iommu_context_addr(iommu, i, 0, 0); in free_context_table()
796 if (!sm_supported(iommu)) in free_context_table()
799 context = iommu_context_addr(iommu, i, 0x80, 0); in free_context_table()
804 free_pgtable_page(iommu->root_entry); in free_context_table()
805 iommu->root_entry = NULL; in free_context_table()
809 static void pgtable_walk(struct intel_iommu *iommu, unsigned long pfn, in pgtable_walk() argument
833 void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id, in dmar_fault_dump_ptes() argument
845 pr_info("Dump %s table entries for IOVA 0x%llx\n", iommu->name, addr); in dmar_fault_dump_ptes()
848 rt_entry = &iommu->root_entry[bus]; in dmar_fault_dump_ptes()
854 if (sm_supported(iommu)) in dmar_fault_dump_ptes()
861 ctx_entry = iommu_context_addr(iommu, bus, devfn, 0); in dmar_fault_dump_ptes()
871 if (!sm_supported(iommu)) { in dmar_fault_dump_ptes()
911 pgtable_walk(iommu, addr >> VTD_PAGE_SHIFT, bus, devfn, pgtable, level); in dmar_fault_dump_ptes()
1189 static int iommu_alloc_root_entry(struct intel_iommu *iommu) in iommu_alloc_root_entry() argument
1193 root = alloc_pgtable_page(iommu->node, GFP_ATOMIC); in iommu_alloc_root_entry()
1196 iommu->name); in iommu_alloc_root_entry()
1200 __iommu_flush_cache(iommu, root, ROOT_SIZE); in iommu_alloc_root_entry()
1201 iommu->root_entry = root; in iommu_alloc_root_entry()
1206 static void iommu_set_root_entry(struct intel_iommu *iommu) in iommu_set_root_entry() argument
1212 addr = virt_to_phys(iommu->root_entry); in iommu_set_root_entry()
1213 if (sm_supported(iommu)) in iommu_set_root_entry()
1216 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_set_root_entry()
1217 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr); in iommu_set_root_entry()
1219 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); in iommu_set_root_entry()
1222 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_set_root_entry()
1225 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_set_root_entry()
1231 if (cap_esrtps(iommu->cap)) in iommu_set_root_entry()
1234 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); in iommu_set_root_entry()
1235 if (sm_supported(iommu)) in iommu_set_root_entry()
1236 qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0); in iommu_set_root_entry()
1237 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in iommu_set_root_entry()
1240 void iommu_flush_write_buffer(struct intel_iommu *iommu) in iommu_flush_write_buffer() argument
1245 if (!rwbf_quirk && !cap_rwbf(iommu->cap)) in iommu_flush_write_buffer()
1248 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_flush_write_buffer()
1249 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG); in iommu_flush_write_buffer()
1252 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_flush_write_buffer()
1255 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_flush_write_buffer()
1259 static void __iommu_flush_context(struct intel_iommu *iommu, in __iommu_flush_context() argument
1279 iommu->name, type); in __iommu_flush_context()
1284 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_context()
1285 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val); in __iommu_flush_context()
1288 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, in __iommu_flush_context()
1291 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_context()
1295 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, in __iommu_flush_iotlb() argument
1298 int tlb_offset = ecap_iotlb_offset(iommu->ecap); in __iommu_flush_iotlb()
1317 iommu->name, type); in __iommu_flush_iotlb()
1321 if (cap_write_drain(iommu->cap)) in __iommu_flush_iotlb()
1324 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1327 dmar_writeq(iommu->reg + tlb_offset, val_iva); in __iommu_flush_iotlb()
1328 dmar_writeq(iommu->reg + tlb_offset + 8, val); in __iommu_flush_iotlb()
1331 IOMMU_WAIT_OP(iommu, tlb_offset + 8, in __iommu_flush_iotlb()
1334 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1347 struct intel_iommu *iommu, u8 bus, u8 devfn) in domain_lookup_dev_info() argument
1354 if (info->iommu == iommu && info->bus == bus && in domain_lookup_dev_info()
1464 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid, in __iommu_flush_dev_iotlb()
1489 qi_flush_dev_iotlb_pasid(info->iommu, in iommu_flush_dev_iotlb()
1498 static void domain_flush_pasid_iotlb(struct intel_iommu *iommu, in domain_flush_pasid_iotlb() argument
1502 u16 did = domain_id_iommu(domain, iommu); in domain_flush_pasid_iotlb()
1508 qi_flush_piotlb(iommu, did, dev_pasid->pasid, addr, npages, ih); in domain_flush_pasid_iotlb()
1511 qi_flush_piotlb(iommu, did, IOMMU_NO_PASID, addr, npages, ih); in domain_flush_pasid_iotlb()
1515 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, in iommu_flush_iotlb_psi() argument
1523 u16 did = domain_id_iommu(domain, iommu); in iommu_flush_iotlb_psi()
1532 domain_flush_pasid_iotlb(iommu, domain, addr, pages, ih); in iommu_flush_iotlb_psi()
1559 if (!cap_pgsel_inv(iommu->cap) || in iommu_flush_iotlb_psi()
1560 mask > cap_max_amask_val(iommu->cap)) in iommu_flush_iotlb_psi()
1561 iommu->flush.flush_iotlb(iommu, did, 0, 0, in iommu_flush_iotlb_psi()
1564 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask, in iommu_flush_iotlb_psi()
1572 if (!cap_caching_mode(iommu->cap) || !map) in iommu_flush_iotlb_psi()
1577 static inline void __mapping_notify_one(struct intel_iommu *iommu, in __mapping_notify_one() argument
1585 if (cap_caching_mode(iommu->cap) && !domain->use_first_level) in __mapping_notify_one()
1586 iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1); in __mapping_notify_one()
1588 iommu_flush_write_buffer(iommu); in __mapping_notify_one()
1598 struct intel_iommu *iommu = info->iommu; in intel_flush_iotlb_all() local
1599 u16 did = domain_id_iommu(dmar_domain, iommu); in intel_flush_iotlb_all()
1602 domain_flush_pasid_iotlb(iommu, dmar_domain, 0, -1, 0); in intel_flush_iotlb_all()
1604 iommu->flush.flush_iotlb(iommu, did, 0, 0, in intel_flush_iotlb_all()
1607 if (!cap_caching_mode(iommu->cap)) in intel_flush_iotlb_all()
1612 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) in iommu_disable_protect_mem_regions() argument
1617 if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap)) in iommu_disable_protect_mem_regions()
1620 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1621 pmen = readl(iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1623 writel(pmen, iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1626 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, in iommu_disable_protect_mem_regions()
1629 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1632 static void iommu_enable_translation(struct intel_iommu *iommu) in iommu_enable_translation() argument
1637 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_enable_translation()
1638 iommu->gcmd |= DMA_GCMD_TE; in iommu_enable_translation()
1639 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_enable_translation()
1642 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_enable_translation()
1645 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_enable_translation()
1648 static void iommu_disable_translation(struct intel_iommu *iommu) in iommu_disable_translation() argument
1653 if (iommu_skip_te_disable && iommu->drhd->gfx_dedicated && in iommu_disable_translation()
1654 (cap_read_drain(iommu->cap) || cap_write_drain(iommu->cap))) in iommu_disable_translation()
1657 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_disable_translation()
1658 iommu->gcmd &= ~DMA_GCMD_TE; in iommu_disable_translation()
1659 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_disable_translation()
1662 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_disable_translation()
1665 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_disable_translation()
1668 static int iommu_init_domains(struct intel_iommu *iommu) in iommu_init_domains() argument
1672 ndomains = cap_ndoms(iommu->cap); in iommu_init_domains()
1674 iommu->name, ndomains); in iommu_init_domains()
1676 spin_lock_init(&iommu->lock); in iommu_init_domains()
1678 iommu->domain_ids = bitmap_zalloc(ndomains, GFP_KERNEL); in iommu_init_domains()
1679 if (!iommu->domain_ids) in iommu_init_domains()
1688 set_bit(0, iommu->domain_ids); in iommu_init_domains()
1697 if (sm_supported(iommu)) in iommu_init_domains()
1698 set_bit(FLPT_DEFAULT_DID, iommu->domain_ids); in iommu_init_domains()
1703 static void disable_dmar_iommu(struct intel_iommu *iommu) in disable_dmar_iommu() argument
1705 if (!iommu->domain_ids) in disable_dmar_iommu()
1712 if (WARN_ON(bitmap_weight(iommu->domain_ids, cap_ndoms(iommu->cap)) in disable_dmar_iommu()
1716 if (iommu->gcmd & DMA_GCMD_TE) in disable_dmar_iommu()
1717 iommu_disable_translation(iommu); in disable_dmar_iommu()
1720 static void free_dmar_iommu(struct intel_iommu *iommu) in free_dmar_iommu() argument
1722 if (iommu->domain_ids) { in free_dmar_iommu()
1723 bitmap_free(iommu->domain_ids); in free_dmar_iommu()
1724 iommu->domain_ids = NULL; in free_dmar_iommu()
1727 if (iommu->copied_tables) { in free_dmar_iommu()
1728 bitmap_free(iommu->copied_tables); in free_dmar_iommu()
1729 iommu->copied_tables = NULL; in free_dmar_iommu()
1733 free_context_table(iommu); in free_dmar_iommu()
1736 if (pasid_supported(iommu)) { in free_dmar_iommu()
1737 if (ecap_prs(iommu->ecap)) in free_dmar_iommu()
1738 intel_svm_finish_prq(iommu); in free_dmar_iommu()
1782 struct intel_iommu *iommu) in domain_attach_iommu() argument
1792 spin_lock(&iommu->lock); in domain_attach_iommu()
1793 curr = xa_load(&domain->iommu_array, iommu->seq_id); in domain_attach_iommu()
1796 spin_unlock(&iommu->lock); in domain_attach_iommu()
1801 ndomains = cap_ndoms(iommu->cap); in domain_attach_iommu()
1802 num = find_first_zero_bit(iommu->domain_ids, ndomains); in domain_attach_iommu()
1804 pr_err("%s: No free domain ids\n", iommu->name); in domain_attach_iommu()
1808 set_bit(num, iommu->domain_ids); in domain_attach_iommu()
1811 info->iommu = iommu; in domain_attach_iommu()
1812 curr = xa_cmpxchg(&domain->iommu_array, iommu->seq_id, in domain_attach_iommu()
1820 spin_unlock(&iommu->lock); in domain_attach_iommu()
1824 clear_bit(info->did, iommu->domain_ids); in domain_attach_iommu()
1826 spin_unlock(&iommu->lock); in domain_attach_iommu()
1832 struct intel_iommu *iommu) in domain_detach_iommu() argument
1836 spin_lock(&iommu->lock); in domain_detach_iommu()
1837 info = xa_load(&domain->iommu_array, iommu->seq_id); in domain_detach_iommu()
1839 clear_bit(info->did, iommu->domain_ids); in domain_detach_iommu()
1840 xa_erase(&domain->iommu_array, iommu->seq_id); in domain_detach_iommu()
1845 spin_unlock(&iommu->lock); in domain_detach_iommu()
1927 struct intel_iommu *iommu, in domain_context_mapping_one() argument
1932 domain_lookup_dev_info(domain, iommu, bus, devfn); in domain_context_mapping_one()
1933 u16 did = domain_id_iommu(domain, iommu); in domain_context_mapping_one()
1944 spin_lock(&iommu->lock); in domain_context_mapping_one()
1946 context = iommu_context_addr(iommu, bus, devfn, 1); in domain_context_mapping_one()
1951 if (context_present(context) && !context_copied(iommu, bus, devfn)) in domain_context_mapping_one()
1963 if (context_copied(iommu, bus, devfn)) { in domain_context_mapping_one()
1966 if (did_old < cap_ndoms(iommu->cap)) { in domain_context_mapping_one()
1967 iommu->flush.flush_context(iommu, did_old, in domain_context_mapping_one()
1971 iommu->flush.flush_iotlb(iommu, did_old, 0, 0, in domain_context_mapping_one()
1975 clear_context_copied(iommu, bus, devfn); in domain_context_mapping_one()
1980 if (sm_supported(iommu)) { in domain_context_mapping_one()
2012 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { in domain_context_mapping_one()
2032 context_set_address_width(context, iommu->msagaw); in domain_context_mapping_one()
2040 if (!ecap_coherent(iommu->ecap)) in domain_context_mapping_one()
2049 if (cap_caching_mode(iommu->cap)) { in domain_context_mapping_one()
2050 iommu->flush.flush_context(iommu, 0, in domain_context_mapping_one()
2054 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); in domain_context_mapping_one()
2056 iommu_flush_write_buffer(iommu); in domain_context_mapping_one()
2062 spin_unlock(&iommu->lock); in domain_context_mapping_one()
2069 struct intel_iommu *iommu; member
2078 return domain_context_mapping_one(data->domain, data->iommu, in domain_context_mapping_cb()
2088 struct intel_iommu *iommu; in domain_context_mapping() local
2091 iommu = device_to_iommu(dev, &bus, &devfn); in domain_context_mapping()
2092 if (!iommu) in domain_context_mapping()
2098 return domain_context_mapping_one(domain, iommu, table, in domain_context_mapping()
2102 data.iommu = iommu; in domain_context_mapping()
2170 iommu_flush_iotlb_psi(info->iommu, domain, in switch_to_super_page()
2287 struct intel_iommu *iommu = info->iommu; in domain_context_clear_one() local
2291 if (!iommu) in domain_context_clear_one()
2294 spin_lock(&iommu->lock); in domain_context_clear_one()
2295 context = iommu_context_addr(iommu, bus, devfn, 0); in domain_context_clear_one()
2297 spin_unlock(&iommu->lock); in domain_context_clear_one()
2301 if (sm_supported(iommu)) { in domain_context_clear_one()
2305 did_old = domain_id_iommu(info->domain, iommu); in domain_context_clear_one()
2311 __iommu_flush_cache(iommu, context, sizeof(*context)); in domain_context_clear_one()
2312 spin_unlock(&iommu->lock); in domain_context_clear_one()
2313 iommu->flush.flush_context(iommu, in domain_context_clear_one()
2319 if (sm_supported(iommu)) in domain_context_clear_one()
2320 qi_flush_pasid_cache(iommu, did_old, QI_PC_ALL_PASIDS, 0); in domain_context_clear_one()
2322 iommu->flush.flush_iotlb(iommu, in domain_context_clear_one()
2331 static int domain_setup_first_level(struct intel_iommu *iommu, in domain_setup_first_level() argument
2344 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { in domain_setup_first_level()
2360 return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid, in domain_setup_first_level()
2361 domain_id_iommu(domain, iommu), in domain_setup_first_level()
2449 struct intel_iommu *iommu; in dmar_domain_attach_device() local
2454 iommu = device_to_iommu(dev, &bus, &devfn); in dmar_domain_attach_device()
2455 if (!iommu) in dmar_domain_attach_device()
2458 ret = domain_attach_iommu(domain, iommu); in dmar_domain_attach_device()
2467 if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) { in dmar_domain_attach_device()
2470 ret = intel_pasid_setup_pass_through(iommu, domain, in dmar_domain_attach_device()
2473 ret = domain_setup_first_level(iommu, domain, dev, in dmar_domain_attach_device()
2476 ret = intel_pasid_setup_second_level(iommu, domain, in dmar_domain_attach_device()
2492 if (sm_supported(info->iommu) || !domain_type_is_si(info->domain)) in dmar_domain_attach_device()
2553 static void intel_iommu_init_qi(struct intel_iommu *iommu) in intel_iommu_init_qi() argument
2561 if (!iommu->qi) { in intel_iommu_init_qi()
2565 dmar_fault(-1, iommu); in intel_iommu_init_qi()
2570 dmar_disable_qi(iommu); in intel_iommu_init_qi()
2573 if (dmar_enable_qi(iommu)) { in intel_iommu_init_qi()
2577 iommu->flush.flush_context = __iommu_flush_context; in intel_iommu_init_qi()
2578 iommu->flush.flush_iotlb = __iommu_flush_iotlb; in intel_iommu_init_qi()
2580 iommu->name); in intel_iommu_init_qi()
2582 iommu->flush.flush_context = qi_flush_context; in intel_iommu_init_qi()
2583 iommu->flush.flush_iotlb = qi_flush_iotlb; in intel_iommu_init_qi()
2584 pr_info("%s: Using Queued invalidation\n", iommu->name); in intel_iommu_init_qi()
2588 static int copy_context_table(struct intel_iommu *iommu, in copy_context_table() argument
2610 __iommu_flush_cache(iommu, new_ce, in copy_context_table()
2640 new_ce = alloc_pgtable_page(iommu->node, GFP_KERNEL); in copy_context_table()
2654 if (did >= 0 && did < cap_ndoms(iommu->cap)) in copy_context_table()
2655 set_bit(did, iommu->domain_ids); in copy_context_table()
2657 set_context_copied(iommu, bus, devfn); in copy_context_table()
2663 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE); in copy_context_table()
2672 static int copy_translation_tables(struct intel_iommu *iommu) in copy_translation_tables() argument
2682 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG); in copy_translation_tables()
2684 new_ext = !!sm_supported(iommu); in copy_translation_tables()
2695 iommu->copied_tables = bitmap_zalloc(BIT_ULL(16), GFP_KERNEL); in copy_translation_tables()
2696 if (!iommu->copied_tables) in copy_translation_tables()
2715 ret = copy_context_table(iommu, &old_rt[bus], in copy_translation_tables()
2719 iommu->name, bus); in copy_translation_tables()
2724 spin_lock(&iommu->lock); in copy_translation_tables()
2733 iommu->root_entry[bus].lo = val; in copy_translation_tables()
2740 iommu->root_entry[bus].hi = val; in copy_translation_tables()
2743 spin_unlock(&iommu->lock); in copy_translation_tables()
2747 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE); in copy_translation_tables()
2760 struct intel_iommu *iommu; in init_dmars() local
2767 for_each_iommu(iommu, drhd) { in init_dmars()
2769 iommu_disable_translation(iommu); in init_dmars()
2778 if (pasid_supported(iommu)) { in init_dmars()
2779 u32 temp = 2 << ecap_pss(iommu->ecap); in init_dmars()
2785 intel_iommu_init_qi(iommu); in init_dmars()
2787 ret = iommu_init_domains(iommu); in init_dmars()
2791 init_translation_status(iommu); in init_dmars()
2793 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) { in init_dmars()
2794 iommu_disable_translation(iommu); in init_dmars()
2795 clear_translation_pre_enabled(iommu); in init_dmars()
2797 iommu->name); in init_dmars()
2805 ret = iommu_alloc_root_entry(iommu); in init_dmars()
2809 if (translation_pre_enabled(iommu)) { in init_dmars()
2812 ret = copy_translation_tables(iommu); in init_dmars()
2824 iommu->name); in init_dmars()
2825 iommu_disable_translation(iommu); in init_dmars()
2826 clear_translation_pre_enabled(iommu); in init_dmars()
2829 iommu->name); in init_dmars()
2833 if (!ecap_pass_through(iommu->ecap)) in init_dmars()
2835 intel_svm_check(iommu); in init_dmars()
2843 for_each_active_iommu(iommu, drhd) { in init_dmars()
2844 iommu_flush_write_buffer(iommu); in init_dmars()
2845 iommu_set_root_entry(iommu); in init_dmars()
2868 for_each_iommu(iommu, drhd) { in init_dmars()
2875 iommu_disable_protect_mem_regions(iommu); in init_dmars()
2879 iommu_flush_write_buffer(iommu); in init_dmars()
2882 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) { in init_dmars()
2888 ret = intel_svm_enable_prq(iommu); in init_dmars()
2894 ret = dmar_set_interrupt(iommu); in init_dmars()
2902 for_each_active_iommu(iommu, drhd) { in init_dmars()
2903 disable_dmar_iommu(iommu); in init_dmars()
2904 free_dmar_iommu(iommu); in init_dmars()
2954 struct intel_iommu *iommu = NULL; in init_iommu_hw() local
2957 for_each_active_iommu(iommu, drhd) { in init_iommu_hw()
2958 if (iommu->qi) { in init_iommu_hw()
2959 ret = dmar_reenable_qi(iommu); in init_iommu_hw()
2965 for_each_iommu(iommu, drhd) { in init_iommu_hw()
2972 iommu_disable_protect_mem_regions(iommu); in init_iommu_hw()
2976 iommu_flush_write_buffer(iommu); in init_iommu_hw()
2977 iommu_set_root_entry(iommu); in init_iommu_hw()
2978 iommu_enable_translation(iommu); in init_iommu_hw()
2979 iommu_disable_protect_mem_regions(iommu); in init_iommu_hw()
2988 struct intel_iommu *iommu; in iommu_flush_all() local
2990 for_each_active_iommu(iommu, drhd) { in iommu_flush_all()
2991 iommu->flush.flush_context(iommu, 0, 0, 0, in iommu_flush_all()
2993 iommu->flush.flush_iotlb(iommu, 0, 0, 0, in iommu_flush_all()
3001 struct intel_iommu *iommu = NULL; in iommu_suspend() local
3006 for_each_active_iommu(iommu, drhd) { in iommu_suspend()
3007 iommu_disable_translation(iommu); in iommu_suspend()
3009 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_suspend()
3011 iommu->iommu_state[SR_DMAR_FECTL_REG] = in iommu_suspend()
3012 readl(iommu->reg + DMAR_FECTL_REG); in iommu_suspend()
3013 iommu->iommu_state[SR_DMAR_FEDATA_REG] = in iommu_suspend()
3014 readl(iommu->reg + DMAR_FEDATA_REG); in iommu_suspend()
3015 iommu->iommu_state[SR_DMAR_FEADDR_REG] = in iommu_suspend()
3016 readl(iommu->reg + DMAR_FEADDR_REG); in iommu_suspend()
3017 iommu->iommu_state[SR_DMAR_FEUADDR_REG] = in iommu_suspend()
3018 readl(iommu->reg + DMAR_FEUADDR_REG); in iommu_suspend()
3020 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_suspend()
3028 struct intel_iommu *iommu = NULL; in iommu_resume() local
3039 for_each_active_iommu(iommu, drhd) { in iommu_resume()
3041 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_resume()
3043 writel(iommu->iommu_state[SR_DMAR_FECTL_REG], in iommu_resume()
3044 iommu->reg + DMAR_FECTL_REG); in iommu_resume()
3045 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG], in iommu_resume()
3046 iommu->reg + DMAR_FEDATA_REG); in iommu_resume()
3047 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG], in iommu_resume()
3048 iommu->reg + DMAR_FEADDR_REG); in iommu_resume()
3049 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], in iommu_resume()
3050 iommu->reg + DMAR_FEUADDR_REG); in iommu_resume()
3052 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_resume()
3278 struct intel_iommu *iommu = dmaru->iommu; in intel_iommu_add() local
3280 ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_DMAR, iommu); in intel_iommu_add()
3284 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) { in intel_iommu_add()
3286 iommu->name); in intel_iommu_add()
3290 sp = domain_update_iommu_superpage(NULL, iommu) - 1; in intel_iommu_add()
3291 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) { in intel_iommu_add()
3293 iommu->name); in intel_iommu_add()
3300 if (iommu->gcmd & DMA_GCMD_TE) in intel_iommu_add()
3301 iommu_disable_translation(iommu); in intel_iommu_add()
3303 ret = iommu_init_domains(iommu); in intel_iommu_add()
3305 ret = iommu_alloc_root_entry(iommu); in intel_iommu_add()
3309 intel_svm_check(iommu); in intel_iommu_add()
3316 iommu_disable_protect_mem_regions(iommu); in intel_iommu_add()
3320 intel_iommu_init_qi(iommu); in intel_iommu_add()
3321 iommu_flush_write_buffer(iommu); in intel_iommu_add()
3324 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) { in intel_iommu_add()
3325 ret = intel_svm_enable_prq(iommu); in intel_iommu_add()
3330 ret = dmar_set_interrupt(iommu); in intel_iommu_add()
3334 iommu_set_root_entry(iommu); in intel_iommu_add()
3335 iommu_enable_translation(iommu); in intel_iommu_add()
3337 iommu_disable_protect_mem_regions(iommu); in intel_iommu_add()
3341 disable_dmar_iommu(iommu); in intel_iommu_add()
3343 free_dmar_iommu(iommu); in intel_iommu_add()
3350 struct intel_iommu *iommu = dmaru->iommu; in dmar_iommu_hotplug() local
3354 if (iommu == NULL) in dmar_iommu_hotplug()
3360 disable_dmar_iommu(iommu); in dmar_iommu_hotplug()
3361 free_dmar_iommu(iommu); in dmar_iommu_hotplug()
3414 static int dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu) in dmar_ats_supported() argument
3434 return !(satcu->atc_required && !sm_supported(iommu)); in dmar_ats_supported()
3562 struct intel_iommu *iommu; in intel_iommu_memory_notifier() local
3568 for_each_active_iommu(iommu, drhd) in intel_iommu_memory_notifier()
3569 iommu_flush_iotlb_psi(iommu, si_domain, in intel_iommu_memory_notifier()
3588 struct intel_iommu *iommu = NULL; in intel_disable_iommus() local
3591 for_each_iommu(iommu, drhd) in intel_disable_iommus()
3592 iommu_disable_translation(iommu); in intel_disable_iommus()
3598 struct intel_iommu *iommu = NULL; in intel_iommu_shutdown() local
3606 for_each_iommu(iommu, drhd) in intel_iommu_shutdown()
3607 iommu_disable_protect_mem_regions(iommu); in intel_iommu_shutdown()
3619 return container_of(iommu_dev, struct intel_iommu, iommu); in dev_to_intel_iommu()
3625 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in version_show() local
3626 u32 ver = readl(iommu->reg + DMAR_VER_REG); in version_show()
3635 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in address_show() local
3636 return sysfs_emit(buf, "%llx\n", iommu->reg_phys); in address_show()
3643 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in cap_show() local
3644 return sysfs_emit(buf, "%llx\n", iommu->cap); in cap_show()
3651 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in ecap_show() local
3652 return sysfs_emit(buf, "%llx\n", iommu->ecap); in ecap_show()
3659 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in domains_supported_show() local
3660 return sysfs_emit(buf, "%ld\n", cap_ndoms(iommu->cap)); in domains_supported_show()
3667 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in domains_used_show() local
3669 bitmap_weight(iommu->domain_ids, in domains_used_show()
3670 cap_ndoms(iommu->cap))); in domains_used_show()
3732 struct intel_iommu *iommu __maybe_unused; in probe_acpi_namespace_devices()
3736 for_each_active_iommu(iommu, drhd) { in probe_acpi_namespace_devices()
3781 struct intel_iommu *iommu; in intel_iommu_init() local
3826 for_each_iommu(iommu, drhd) in intel_iommu_init()
3827 iommu_disable_protect_mem_regions(iommu); in intel_iommu_init()
3862 for_each_active_iommu(iommu, drhd) { in intel_iommu_init()
3870 if (cap_caching_mode(iommu->cap) && in intel_iommu_init()
3875 iommu_device_sysfs_add(&iommu->iommu, NULL, in intel_iommu_init()
3877 "%s", iommu->name); in intel_iommu_init()
3878 iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL); in intel_iommu_init()
3880 iommu_pmu_register(iommu); in intel_iommu_init()
3892 for_each_iommu(iommu, drhd) { in intel_iommu_init()
3893 if (!drhd->ignored && !translation_pre_enabled(iommu)) in intel_iommu_init()
3894 iommu_enable_translation(iommu); in intel_iommu_init()
3896 iommu_disable_protect_mem_regions(iommu); in intel_iommu_init()
3939 struct intel_iommu *iommu = info->iommu; in dmar_remove_one_dev_info() local
3943 if (dev_is_pci(info->dev) && sm_supported(iommu)) in dmar_remove_one_dev_info()
3944 intel_pasid_tear_down_entry(iommu, info->dev, in dmar_remove_one_dev_info()
3955 domain_detach_iommu(domain, iommu); in dmar_remove_one_dev_info()
3967 struct intel_iommu *iommu = info->iommu; in device_block_translation() local
3972 if (sm_supported(iommu)) in device_block_translation()
3973 intel_pasid_tear_down_entry(iommu, dev, in device_block_translation()
3986 domain_detach_iommu(info->domain, iommu); in device_block_translation()
4074 struct intel_iommu *iommu; in prepare_domain_attach_device() local
4077 iommu = device_to_iommu(dev, NULL, NULL); in prepare_domain_attach_device()
4078 if (!iommu) in prepare_domain_attach_device()
4081 if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap)) in prepare_domain_attach_device()
4085 addr_width = agaw_to_width(iommu->agaw); in prepare_domain_attach_device()
4086 if (addr_width > cap_mgaw(iommu->cap)) in prepare_domain_attach_device()
4087 addr_width = cap_mgaw(iommu->cap); in prepare_domain_attach_device()
4096 while (iommu->agaw < dmar_domain->agaw) { in prepare_domain_attach_device()
4245 iommu_flush_iotlb_psi(info->iommu, dmar_domain, in intel_iommu_tlb_sync()
4277 if (!ecap_sc_support(info->iommu->ecap)) { in domain_support_force_snooping()
4301 intel_pasid_setup_page_snoop_control(info->iommu, info->dev, in domain_set_force_snooping()
4338 return ecap_sc_support(info->iommu->ecap); in intel_iommu_capable()
4348 struct intel_iommu *iommu; in intel_iommu_probe_device() local
4352 iommu = device_to_iommu(dev, &bus, &devfn); in intel_iommu_probe_device()
4353 if (!iommu || !iommu->iommu.ops) in intel_iommu_probe_device()
4367 info->segment = iommu->segment; in intel_iommu_probe_device()
4371 info->iommu = iommu; in intel_iommu_probe_device()
4373 if (ecap_dev_iotlb_support(iommu->ecap) && in intel_iommu_probe_device()
4375 dmar_ats_supported(pdev, iommu)) { in intel_iommu_probe_device()
4386 if (ecap_dit(iommu->ecap)) in intel_iommu_probe_device()
4390 if (sm_supported(iommu)) { in intel_iommu_probe_device()
4391 if (pasid_supported(iommu)) { in intel_iommu_probe_device()
4398 if (info->ats_supported && ecap_prs(iommu->ecap) && in intel_iommu_probe_device()
4406 if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) { in intel_iommu_probe_device()
4416 return &iommu->iommu; in intel_iommu_probe_device()
4505 struct intel_iommu *iommu; in intel_iommu_enable_sva() local
4510 iommu = info->iommu; in intel_iommu_enable_sva()
4511 if (!iommu) in intel_iommu_enable_sva()
4514 if (!(iommu->flags & VTD_FLAG_SVM_CAPABLE)) in intel_iommu_enable_sva()
4541 struct intel_iommu *iommu; in intel_iommu_enable_iopf() local
4550 iommu = info->iommu; in intel_iommu_enable_iopf()
4551 if (!iommu) in intel_iommu_enable_iopf()
4562 ret = iopf_queue_add_device(iommu->iopf_queue, dev); in intel_iommu_enable_iopf()
4580 iopf_queue_remove_device(iommu->iopf_queue, dev); in intel_iommu_enable_iopf()
4588 struct intel_iommu *iommu = info->iommu; in intel_iommu_disable_iopf() local
4610 WARN_ON(iopf_queue_remove_device(iommu->iopf_queue, dev)); in intel_iommu_disable_iopf()
4649 return translation_pre_enabled(info->iommu) && !info->domain; in intel_iommu_is_attach_deferred()
4679 __mapping_notify_one(info->iommu, dmar_domain, pfn, pages); in intel_iommu_iotlb_sync_map()
4684 struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); in intel_iommu_remove_dev_pasid() local
4716 domain_detach_iommu(dmar_domain, iommu); in intel_iommu_remove_dev_pasid()
4719 intel_pasid_tear_down_entry(iommu, dev, pasid, false); in intel_iommu_remove_dev_pasid()
4728 struct intel_iommu *iommu = info->iommu; in intel_iommu_set_dev_pasid() local
4733 if (!pasid_supported(iommu) || dev_is_real_dma_subdevice(dev)) in intel_iommu_set_dev_pasid()
4736 if (context_copied(iommu, info->bus, info->devfn)) in intel_iommu_set_dev_pasid()
4747 ret = domain_attach_iommu(dmar_domain, iommu); in intel_iommu_set_dev_pasid()
4752 ret = intel_pasid_setup_pass_through(iommu, dmar_domain, in intel_iommu_set_dev_pasid()
4755 ret = domain_setup_first_level(iommu, dmar_domain, in intel_iommu_set_dev_pasid()
4758 ret = intel_pasid_setup_second_level(iommu, dmar_domain, in intel_iommu_set_dev_pasid()
4771 domain_detach_iommu(dmar_domain, iommu); in intel_iommu_set_dev_pasid()
4780 struct intel_iommu *iommu = info->iommu; in intel_iommu_hw_info() local
4787 vtd->cap_reg = iommu->cap; in intel_iommu_hw_info()
4788 vtd->ecap_reg = iommu->ecap; in intel_iommu_hw_info()
5052 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid, in quirk_extra_dev_tlb_flush()
5055 qi_flush_dev_iotlb_pasid(info->iommu, sid, info->pfsid, in quirk_extra_dev_tlb_flush()
5074 int ecmd_submit_sync(struct intel_iommu *iommu, u8 ecmd, u64 oa, u64 ob) in ecmd_submit_sync() argument
5080 if (!cap_ecmds(iommu->cap)) in ecmd_submit_sync()
5083 raw_spin_lock_irqsave(&iommu->register_lock, flags); in ecmd_submit_sync()
5085 res = dmar_readq(iommu->reg + DMAR_ECRSP_REG); in ecmd_submit_sync()
5098 dmar_writeq(iommu->reg + DMAR_ECEO_REG, ob); in ecmd_submit_sync()
5099 dmar_writeq(iommu->reg + DMAR_ECMD_REG, ecmd | (oa << DMA_ECMD_OA_SHIFT)); in ecmd_submit_sync()
5101 IOMMU_WAIT_OP(iommu, DMAR_ECRSP_REG, dmar_readq, in ecmd_submit_sync()
5111 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in ecmd_submit_sync()