Lines Matching refs:iter

207 				  struct tdp_iter *iter)  in tdp_mmu_init_child_sp()  argument
212 parent_sp = sptep_to_sp(rcu_dereference(iter->sptep)); in tdp_mmu_init_child_sp()
217 tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role); in tdp_mmu_init_child_sp()
529 struct tdp_iter *iter, in tdp_mmu_set_spte_atomic() argument
532 u64 *sptep = rcu_dereference(iter->sptep); in tdp_mmu_set_spte_atomic()
540 WARN_ON_ONCE(iter->yielded || is_removed_spte(iter->old_spte)); in tdp_mmu_set_spte_atomic()
551 if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte)) in tdp_mmu_set_spte_atomic()
554 handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, in tdp_mmu_set_spte_atomic()
555 new_spte, iter->level, true); in tdp_mmu_set_spte_atomic()
561 struct tdp_iter *iter) in tdp_mmu_zap_spte_atomic() argument
571 ret = tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE); in tdp_mmu_zap_spte_atomic()
575 kvm_flush_remote_tlbs_gfn(kvm, iter->gfn, iter->level); in tdp_mmu_zap_spte_atomic()
584 __kvm_tdp_mmu_write_spte(iter->sptep, 0); in tdp_mmu_zap_spte_atomic()
623 static inline void tdp_mmu_iter_set_spte(struct kvm *kvm, struct tdp_iter *iter, in tdp_mmu_iter_set_spte() argument
626 WARN_ON_ONCE(iter->yielded); in tdp_mmu_iter_set_spte()
627 iter->old_spte = tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep, in tdp_mmu_iter_set_spte()
628 iter->old_spte, new_spte, in tdp_mmu_iter_set_spte()
629 iter->gfn, iter->level); in tdp_mmu_iter_set_spte()
660 struct tdp_iter *iter, in tdp_mmu_iter_cond_resched() argument
663 WARN_ON_ONCE(iter->yielded); in tdp_mmu_iter_cond_resched()
666 if (iter->next_last_level_gfn == iter->yielded_gfn) in tdp_mmu_iter_cond_resched()
682 WARN_ON_ONCE(iter->gfn > iter->next_last_level_gfn); in tdp_mmu_iter_cond_resched()
684 iter->yielded = true; in tdp_mmu_iter_cond_resched()
687 return iter->yielded; in tdp_mmu_iter_cond_resched()
704 struct tdp_iter iter; in __tdp_mmu_zap_root() local
709 for_each_tdp_pte_min_level(iter, root, zap_level, start, end) { in __tdp_mmu_zap_root()
711 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared)) in __tdp_mmu_zap_root()
714 if (!is_shadow_present_pte(iter.old_spte)) in __tdp_mmu_zap_root()
717 if (iter.level > zap_level) in __tdp_mmu_zap_root()
721 tdp_mmu_iter_set_spte(kvm, &iter, 0); in __tdp_mmu_zap_root()
722 else if (tdp_mmu_set_spte_atomic(kvm, &iter, 0)) in __tdp_mmu_zap_root()
794 struct tdp_iter iter; in tdp_mmu_zap_leafs() local
802 for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end) { in tdp_mmu_zap_leafs()
804 tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) { in tdp_mmu_zap_leafs()
809 if (!is_shadow_present_pte(iter.old_spte) || in tdp_mmu_zap_leafs()
810 !is_last_spte(iter.old_spte, iter.level)) in tdp_mmu_zap_leafs()
813 tdp_mmu_iter_set_spte(kvm, &iter, 0); in tdp_mmu_zap_leafs()
954 struct tdp_iter *iter) in tdp_mmu_map_handle_target_level() argument
956 struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(iter->sptep)); in tdp_mmu_map_handle_target_level()
965 new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL); in tdp_mmu_map_handle_target_level()
967 wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn, in tdp_mmu_map_handle_target_level()
968 fault->pfn, iter->old_spte, fault->prefetch, true, in tdp_mmu_map_handle_target_level()
971 if (new_spte == iter->old_spte) in tdp_mmu_map_handle_target_level()
973 else if (tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte)) in tdp_mmu_map_handle_target_level()
975 else if (is_shadow_present_pte(iter->old_spte) && in tdp_mmu_map_handle_target_level()
976 !is_last_spte(iter->old_spte, iter->level)) in tdp_mmu_map_handle_target_level()
977 kvm_flush_remote_tlbs_gfn(vcpu->kvm, iter->gfn, iter->level); in tdp_mmu_map_handle_target_level()
992 trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn, in tdp_mmu_map_handle_target_level()
996 trace_kvm_mmu_set_spte(iter->level, iter->gfn, in tdp_mmu_map_handle_target_level()
997 rcu_dereference(iter->sptep)); in tdp_mmu_map_handle_target_level()
1015 static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter, in tdp_mmu_link_sp() argument
1022 ret = tdp_mmu_set_spte_atomic(kvm, iter, spte); in tdp_mmu_link_sp()
1026 tdp_mmu_iter_set_spte(kvm, iter, spte); in tdp_mmu_link_sp()
1034 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
1045 struct tdp_iter iter; in kvm_tdp_mmu_map() local
1055 tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) { in kvm_tdp_mmu_map()
1059 disallowed_hugepage_adjust(fault, iter.old_spte, iter.level); in kvm_tdp_mmu_map()
1065 if (is_removed_spte(iter.old_spte)) in kvm_tdp_mmu_map()
1068 if (iter.level == fault->goal_level) in kvm_tdp_mmu_map()
1072 if (is_shadow_present_pte(iter.old_spte) && in kvm_tdp_mmu_map()
1073 !is_large_pte(iter.old_spte)) in kvm_tdp_mmu_map()
1081 tdp_mmu_init_child_sp(sp, &iter); in kvm_tdp_mmu_map()
1085 if (is_shadow_present_pte(iter.old_spte)) in kvm_tdp_mmu_map()
1086 r = tdp_mmu_split_huge_page(kvm, &iter, sp, true); in kvm_tdp_mmu_map()
1088 r = tdp_mmu_link_sp(kvm, &iter, sp, true); in kvm_tdp_mmu_map()
1100 fault->req_level >= iter.level) { in kvm_tdp_mmu_map()
1112 WARN_ON_ONCE(iter.level == fault->goal_level); in kvm_tdp_mmu_map()
1116 ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter); in kvm_tdp_mmu_map()
1135 typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
1143 struct tdp_iter iter; in kvm_tdp_mmu_handle_gfn() local
1153 tdp_root_for_each_leaf_pte(iter, root, range->start, range->end) in kvm_tdp_mmu_handle_gfn()
1154 ret |= handler(kvm, &iter, range); in kvm_tdp_mmu_handle_gfn()
1170 static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter, in age_gfn_range() argument
1176 if (!is_accessed_spte(iter->old_spte)) in age_gfn_range()
1179 if (spte_ad_enabled(iter->old_spte)) { in age_gfn_range()
1180 iter->old_spte = tdp_mmu_clear_spte_bits(iter->sptep, in age_gfn_range()
1181 iter->old_spte, in age_gfn_range()
1183 iter->level); in age_gfn_range()
1184 new_spte = iter->old_spte & ~shadow_accessed_mask; in age_gfn_range()
1190 if (is_writable_pte(iter->old_spte)) in age_gfn_range()
1191 kvm_set_pfn_dirty(spte_to_pfn(iter->old_spte)); in age_gfn_range()
1193 new_spte = mark_spte_for_access_track(iter->old_spte); in age_gfn_range()
1194 iter->old_spte = kvm_tdp_mmu_write_spte(iter->sptep, in age_gfn_range()
1195 iter->old_spte, new_spte, in age_gfn_range()
1196 iter->level); in age_gfn_range()
1199 trace_kvm_tdp_mmu_spte_changed(iter->as_id, iter->gfn, iter->level, in age_gfn_range()
1200 iter->old_spte, new_spte); in age_gfn_range()
1209 static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter, in test_age_gfn() argument
1212 return is_accessed_spte(iter->old_spte); in test_age_gfn()
1220 static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter, in set_spte_gfn() argument
1228 if (iter->level != PG_LEVEL_4K || in set_spte_gfn()
1229 !is_shadow_present_pte(iter->old_spte)) in set_spte_gfn()
1238 tdp_mmu_iter_set_spte(kvm, iter, 0); in set_spte_gfn()
1241 new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte, in set_spte_gfn()
1244 tdp_mmu_iter_set_spte(kvm, iter, new_spte); in set_spte_gfn()
1274 struct tdp_iter iter; in wrprot_gfn_range() local
1282 for_each_tdp_pte_min_level(iter, root, min_level, start, end) { in wrprot_gfn_range()
1284 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) in wrprot_gfn_range()
1287 if (!is_shadow_present_pte(iter.old_spte) || in wrprot_gfn_range()
1288 !is_last_spte(iter.old_spte, iter.level) || in wrprot_gfn_range()
1289 !(iter.old_spte & PT_WRITABLE_MASK)) in wrprot_gfn_range()
1292 new_spte = iter.old_spte & ~PT_WRITABLE_MASK; in wrprot_gfn_range()
1294 if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte)) in wrprot_gfn_range()
1344 struct tdp_iter *iter, in tdp_mmu_alloc_sp_for_split() argument
1369 iter->yielded = true; in tdp_mmu_alloc_sp_for_split()
1383 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter, in tdp_mmu_split_huge_page() argument
1386 const u64 huge_spte = iter->old_spte; in tdp_mmu_split_huge_page()
1387 const int level = iter->level; in tdp_mmu_split_huge_page()
1405 ret = tdp_mmu_link_sp(kvm, iter, sp, shared); in tdp_mmu_split_huge_page()
1417 trace_kvm_mmu_split_huge_page(iter->gfn, huge_spte, level, ret); in tdp_mmu_split_huge_page()
1427 struct tdp_iter iter; in tdp_mmu_split_huge_pages_root() local
1443 for_each_tdp_pte_min_level(iter, root, target_level + 1, start, end) { in tdp_mmu_split_huge_pages_root()
1445 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared)) in tdp_mmu_split_huge_pages_root()
1448 if (!is_shadow_present_pte(iter.old_spte) || !is_large_pte(iter.old_spte)) in tdp_mmu_split_huge_pages_root()
1452 sp = tdp_mmu_alloc_sp_for_split(kvm, &iter, shared); in tdp_mmu_split_huge_pages_root()
1455 trace_kvm_mmu_split_huge_page(iter.gfn, in tdp_mmu_split_huge_pages_root()
1456 iter.old_spte, in tdp_mmu_split_huge_pages_root()
1457 iter.level, ret); in tdp_mmu_split_huge_pages_root()
1461 if (iter.yielded) in tdp_mmu_split_huge_pages_root()
1465 tdp_mmu_init_child_sp(sp, &iter); in tdp_mmu_split_huge_pages_root()
1467 if (tdp_mmu_split_huge_page(kvm, &iter, sp, shared)) in tdp_mmu_split_huge_pages_root()
1520 struct tdp_iter iter; in clear_dirty_gfn_range() local
1525 tdp_root_for_each_leaf_pte(iter, root, start, end) { in clear_dirty_gfn_range()
1527 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) in clear_dirty_gfn_range()
1530 if (!is_shadow_present_pte(iter.old_spte)) in clear_dirty_gfn_range()
1534 spte_ad_need_write_protect(iter.old_spte)); in clear_dirty_gfn_range()
1536 if (!(iter.old_spte & dbit)) in clear_dirty_gfn_range()
1539 if (tdp_mmu_set_spte_atomic(kvm, &iter, iter.old_spte & ~dbit)) in clear_dirty_gfn_range()
1583 struct tdp_iter iter; in clear_dirty_pt_masked() local
1589 tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask), in clear_dirty_pt_masked()
1595 spte_ad_need_write_protect(iter.old_spte)); in clear_dirty_pt_masked()
1597 if (iter.level > PG_LEVEL_4K || in clear_dirty_pt_masked()
1598 !(mask & (1UL << (iter.gfn - gfn)))) in clear_dirty_pt_masked()
1601 mask &= ~(1UL << (iter.gfn - gfn)); in clear_dirty_pt_masked()
1603 if (!(iter.old_spte & dbit)) in clear_dirty_pt_masked()
1606 iter.old_spte = tdp_mmu_clear_spte_bits(iter.sptep, in clear_dirty_pt_masked()
1607 iter.old_spte, dbit, in clear_dirty_pt_masked()
1608 iter.level); in clear_dirty_pt_masked()
1610 trace_kvm_tdp_mmu_spte_changed(iter.as_id, iter.gfn, iter.level, in clear_dirty_pt_masked()
1611 iter.old_spte, in clear_dirty_pt_masked()
1612 iter.old_spte & ~dbit); in clear_dirty_pt_masked()
1613 kvm_set_pfn_dirty(spte_to_pfn(iter.old_spte)); in clear_dirty_pt_masked()
1643 struct tdp_iter iter; in zap_collapsible_spte_range() local
1648 for_each_tdp_pte_min_level(iter, root, PG_LEVEL_2M, start, end) { in zap_collapsible_spte_range()
1650 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) in zap_collapsible_spte_range()
1653 if (iter.level > KVM_MAX_HUGEPAGE_LEVEL || in zap_collapsible_spte_range()
1654 !is_shadow_present_pte(iter.old_spte)) in zap_collapsible_spte_range()
1662 if (is_last_spte(iter.old_spte, iter.level)) in zap_collapsible_spte_range()
1672 if (iter.gfn < start || iter.gfn >= end) in zap_collapsible_spte_range()
1676 iter.gfn, PG_LEVEL_NUM); in zap_collapsible_spte_range()
1677 if (max_mapping_level < iter.level) in zap_collapsible_spte_range()
1681 if (tdp_mmu_zap_spte_atomic(kvm, &iter)) in zap_collapsible_spte_range()
1711 struct tdp_iter iter; in write_protect_gfn() local
1719 for_each_tdp_pte_min_level(iter, root, min_level, gfn, gfn + 1) { in write_protect_gfn()
1720 if (!is_shadow_present_pte(iter.old_spte) || in write_protect_gfn()
1721 !is_last_spte(iter.old_spte, iter.level)) in write_protect_gfn()
1724 new_spte = iter.old_spte & in write_protect_gfn()
1727 if (new_spte == iter.old_spte) in write_protect_gfn()
1730 tdp_mmu_iter_set_spte(kvm, &iter, new_spte); in write_protect_gfn()
1767 struct tdp_iter iter; in kvm_tdp_mmu_get_walk() local
1774 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { in kvm_tdp_mmu_get_walk()
1775 leaf = iter.level; in kvm_tdp_mmu_get_walk()
1776 sptes[leaf] = iter.old_spte; in kvm_tdp_mmu_get_walk()
1796 struct tdp_iter iter; in kvm_tdp_mmu_fast_pf_get_last_sptep() local
1801 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { in kvm_tdp_mmu_fast_pf_get_last_sptep()
1802 *spte = iter.old_spte; in kvm_tdp_mmu_fast_pf_get_last_sptep()
1803 sptep = iter.sptep; in kvm_tdp_mmu_fast_pf_get_last_sptep()