Lines Matching refs:hyp_vm
263 struct pkvm_hyp_vm *hyp_vm; in pkvm_load_hyp_vcpu() local
266 hyp_vm = get_vm_by_handle(handle); in pkvm_load_hyp_vcpu()
267 if (!hyp_vm || hyp_vm->nr_vcpus <= vcpu_idx) in pkvm_load_hyp_vcpu()
270 hyp_vcpu = hyp_vm->vcpus[vcpu_idx]; in pkvm_load_hyp_vcpu()
271 hyp_page_ref_inc(hyp_virt_to_page(hyp_vm)); in pkvm_load_hyp_vcpu()
279 struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu); in pkvm_put_hyp_vcpu() local
282 hyp_page_ref_dec(hyp_virt_to_page(hyp_vm)); in pkvm_put_hyp_vcpu()
301 static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm, in init_pkvm_hyp_vm() argument
304 hyp_vm->host_kvm = host_kvm; in init_pkvm_hyp_vm()
305 hyp_vm->kvm.created_vcpus = nr_vcpus; in init_pkvm_hyp_vm()
306 hyp_vm->kvm.arch.vtcr = host_mmu.arch.vtcr; in init_pkvm_hyp_vm()
310 struct pkvm_hyp_vm *hyp_vm, in init_pkvm_hyp_vcpu() argument
326 hyp_vcpu->vcpu.kvm = &hyp_vm->kvm; in init_pkvm_hyp_vcpu()
330 hyp_vcpu->vcpu.arch.hw_mmu = &hyp_vm->kvm.arch.mmu; in init_pkvm_hyp_vcpu()
357 struct pkvm_hyp_vm *hyp_vm) in insert_vm_table_entry() argument
359 struct kvm_s2_mmu *mmu = &hyp_vm->kvm.arch.mmu; in insert_vm_table_entry()
376 hyp_vm->kvm.arch.pkvm.handle = idx_to_vm_handle(idx); in insert_vm_table_entry()
381 mmu->arch = &hyp_vm->kvm.arch; in insert_vm_table_entry()
382 mmu->pgt = &hyp_vm->pgt; in insert_vm_table_entry()
384 vm_table[idx] = hyp_vm; in insert_vm_table_entry()
385 return hyp_vm->kvm.arch.pkvm.handle; in insert_vm_table_entry()
469 struct pkvm_hyp_vm *hyp_vm = NULL; in __pkvm_init_vm() local
490 hyp_vm = map_donated_memory(vm_hva, vm_size); in __pkvm_init_vm()
491 if (!hyp_vm) in __pkvm_init_vm()
498 init_pkvm_hyp_vm(host_kvm, hyp_vm, nr_vcpus); in __pkvm_init_vm()
501 ret = insert_vm_table_entry(host_kvm, hyp_vm); in __pkvm_init_vm()
505 ret = kvm_guest_prepare_stage2(hyp_vm, pgd); in __pkvm_init_vm()
510 return hyp_vm->kvm.arch.pkvm.handle; in __pkvm_init_vm()
513 remove_vm_table_entry(hyp_vm->kvm.arch.pkvm.handle); in __pkvm_init_vm()
517 unmap_donated_memory(hyp_vm, vm_size); in __pkvm_init_vm()
539 struct pkvm_hyp_vm *hyp_vm; in __pkvm_init_vcpu() local
549 hyp_vm = get_vm_by_handle(handle); in __pkvm_init_vcpu()
550 if (!hyp_vm) { in __pkvm_init_vcpu()
555 idx = hyp_vm->nr_vcpus; in __pkvm_init_vcpu()
556 if (idx >= hyp_vm->kvm.created_vcpus) { in __pkvm_init_vcpu()
561 ret = init_pkvm_hyp_vcpu(hyp_vcpu, hyp_vm, host_vcpu, idx); in __pkvm_init_vcpu()
565 hyp_vm->vcpus[idx] = hyp_vcpu; in __pkvm_init_vcpu()
566 hyp_vm->nr_vcpus++; in __pkvm_init_vcpu()
591 struct pkvm_hyp_vm *hyp_vm; in __pkvm_teardown_vm() local
598 hyp_vm = get_vm_by_handle(handle); in __pkvm_teardown_vm()
599 if (!hyp_vm) { in __pkvm_teardown_vm()
604 if (WARN_ON(hyp_page_count(hyp_vm))) { in __pkvm_teardown_vm()
609 host_kvm = hyp_vm->host_kvm; in __pkvm_teardown_vm()
612 __kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu); in __pkvm_teardown_vm()
618 reclaim_guest_pages(hyp_vm, mc); in __pkvm_teardown_vm()
619 unpin_host_vcpus(hyp_vm->vcpus, hyp_vm->nr_vcpus); in __pkvm_teardown_vm()
622 for (idx = 0; idx < hyp_vm->nr_vcpus; ++idx) { in __pkvm_teardown_vm()
623 struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx]; in __pkvm_teardown_vm()
628 vm_size = pkvm_get_hyp_vm_size(hyp_vm->kvm.created_vcpus); in __pkvm_teardown_vm()
629 teardown_donated_memory(mc, hyp_vm, vm_size); in __pkvm_teardown_vm()