Lines Matching refs:qi
1159 if (iommu->qi) { in free_iommu()
1160 free_page((unsigned long)iommu->qi->desc); in free_iommu()
1161 kfree(iommu->qi->desc_status); in free_iommu()
1162 kfree(iommu->qi); in free_iommu()
1175 static inline void reclaim_free_desc(struct q_inval *qi) in reclaim_free_desc() argument
1177 while (qi->desc_status[qi->free_tail] == QI_DONE || in reclaim_free_desc()
1178 qi->desc_status[qi->free_tail] == QI_ABORT) { in reclaim_free_desc()
1179 qi->desc_status[qi->free_tail] = QI_FREE; in reclaim_free_desc()
1180 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH; in reclaim_free_desc()
1181 qi->free_cnt++; in reclaim_free_desc()
1215 struct qi_desc *desc = iommu->qi->desc + head; in qi_dump_fault()
1234 desc = iommu->qi->desc + head; in qi_dump_fault()
1246 struct q_inval *qi = iommu->qi; in qi_check_fault() local
1249 if (qi->desc_status[wait_index] == QI_ABORT) in qi_check_fault()
1264 struct qi_desc *desc = qi->desc + head; in qi_check_fault()
1271 memcpy(desc, qi->desc + (wait_index << shift), in qi_check_fault()
1294 if (qi->desc_status[head] == QI_IN_USE) in qi_check_fault()
1295 qi->desc_status[head] = QI_ABORT; in qi_check_fault()
1299 if (qi->desc_status[wait_index] == QI_ABORT) in qi_check_fault()
1321 struct q_inval *qi = iommu->qi; in qi_submit_sync() local
1332 if (!qi) in qi_submit_sync()
1352 raw_spin_lock_irqsave(&qi->q_lock, flags); in qi_submit_sync()
1358 while (qi->free_cnt < count + 2) { in qi_submit_sync()
1359 raw_spin_unlock_irqrestore(&qi->q_lock, flags); in qi_submit_sync()
1361 raw_spin_lock_irqsave(&qi->q_lock, flags); in qi_submit_sync()
1364 index = qi->free_head; in qi_submit_sync()
1370 memcpy(qi->desc + offset, &desc[i], 1 << shift); in qi_submit_sync()
1371 qi->desc_status[(index + i) % QI_LENGTH] = QI_IN_USE; in qi_submit_sync()
1375 qi->desc_status[wait_index] = QI_IN_USE; in qi_submit_sync()
1381 wait_desc.qw1 = virt_to_phys(&qi->desc_status[wait_index]); in qi_submit_sync()
1386 memcpy(qi->desc + offset, &wait_desc, 1 << shift); in qi_submit_sync()
1388 qi->free_head = (qi->free_head + count + 1) % QI_LENGTH; in qi_submit_sync()
1389 qi->free_cnt -= count + 1; in qi_submit_sync()
1395 writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG); in qi_submit_sync()
1397 while (qi->desc_status[wait_index] != QI_DONE) { in qi_submit_sync()
1409 raw_spin_unlock(&qi->q_lock); in qi_submit_sync()
1411 raw_spin_lock(&qi->q_lock); in qi_submit_sync()
1415 qi->desc_status[(index + i) % QI_LENGTH] = QI_DONE; in qi_submit_sync()
1417 reclaim_free_desc(qi); in qi_submit_sync()
1418 raw_spin_unlock_irqrestore(&qi->q_lock, flags); in qi_submit_sync()
1651 struct q_inval *qi = iommu->qi; in __dmar_enable_qi() local
1652 u64 val = virt_to_phys(qi->desc); in __dmar_enable_qi()
1654 qi->free_head = qi->free_tail = 0; in __dmar_enable_qi()
1655 qi->free_cnt = QI_LENGTH; in __dmar_enable_qi()
1687 struct q_inval *qi; in dmar_enable_qi() local
1696 if (iommu->qi) in dmar_enable_qi()
1699 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC); in dmar_enable_qi()
1700 if (!iommu->qi) in dmar_enable_qi()
1703 qi = iommu->qi; in dmar_enable_qi()
1712 kfree(qi); in dmar_enable_qi()
1713 iommu->qi = NULL; in dmar_enable_qi()
1717 qi->desc = page_address(desc_page); in dmar_enable_qi()
1719 qi->desc_status = kcalloc(QI_LENGTH, sizeof(int), GFP_ATOMIC); in dmar_enable_qi()
1720 if (!qi->desc_status) { in dmar_enable_qi()
1721 free_page((unsigned long) qi->desc); in dmar_enable_qi()
1722 kfree(qi); in dmar_enable_qi()
1723 iommu->qi = NULL; in dmar_enable_qi()
1727 raw_spin_lock_init(&qi->q_lock); in dmar_enable_qi()
2090 if (!iommu->qi) in dmar_reenable_qi()