Home
last modified time | relevance | path

Searched refs:mmu_lock (Results 1 – 25 of 30) sorted by relevance

12

/linux-6.1.9/virt/kvm/
Dkvm_mm.h14 #define KVM_MMU_LOCK_INIT(kvm) rwlock_init(&(kvm)->mmu_lock)
15 #define KVM_MMU_LOCK(kvm) write_lock(&(kvm)->mmu_lock)
16 #define KVM_MMU_UNLOCK(kvm) write_unlock(&(kvm)->mmu_lock)
17 #define KVM_MMU_READ_LOCK(kvm) read_lock(&(kvm)->mmu_lock)
18 #define KVM_MMU_READ_UNLOCK(kvm) read_unlock(&(kvm)->mmu_lock)
20 #define KVM_MMU_LOCK_INIT(kvm) spin_lock_init(&(kvm)->mmu_lock)
21 #define KVM_MMU_LOCK(kvm) spin_lock(&(kvm)->mmu_lock)
22 #define KVM_MMU_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock)
23 #define KVM_MMU_READ_LOCK(kvm) spin_lock(&(kvm)->mmu_lock)
24 #define KVM_MMU_READ_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock)
/linux-6.1.9/arch/riscv/kvm/
Dmmu.c301 cond_resched_lock(&kvm->mmu_lock); in gstage_unmap_range()
340 spin_lock(&kvm->mmu_lock); in gstage_wp_memory_region()
342 spin_unlock(&kvm->mmu_lock); in gstage_wp_memory_region()
372 spin_lock(&kvm->mmu_lock); in kvm_riscv_gstage_ioremap()
374 spin_unlock(&kvm->mmu_lock); in kvm_riscv_gstage_ioremap()
388 spin_lock(&kvm->mmu_lock); in kvm_riscv_gstage_iounmap()
390 spin_unlock(&kvm->mmu_lock); in kvm_riscv_gstage_iounmap()
434 spin_lock(&kvm->mmu_lock); in kvm_arch_flush_shadow_memslot()
436 spin_unlock(&kvm->mmu_lock); in kvm_arch_flush_shadow_memslot()
540 spin_lock(&kvm->mmu_lock); in kvm_arch_prepare_memory_region()
[all …]
/linux-6.1.9/arch/powerpc/kvm/
Dbook3s_hv_nested.c745 spin_lock(&kvm->mmu_lock); in kvmhv_remove_nested()
751 spin_unlock(&kvm->mmu_lock); in kvmhv_remove_nested()
770 spin_lock(&kvm->mmu_lock); in kvmhv_release_all_nested()
780 spin_unlock(&kvm->mmu_lock); in kvmhv_release_all_nested()
797 spin_lock(&kvm->mmu_lock); in kvmhv_flush_nested()
799 spin_unlock(&kvm->mmu_lock); in kvmhv_flush_nested()
814 spin_lock(&kvm->mmu_lock); in kvmhv_get_nested()
818 spin_unlock(&kvm->mmu_lock); in kvmhv_get_nested()
832 spin_lock(&kvm->mmu_lock); in kvmhv_get_nested()
841 spin_unlock(&kvm->mmu_lock); in kvmhv_get_nested()
[all …]
Dbook3s_mmu_hpte.c63 spin_lock(&vcpu3s->mmu_lock); in kvmppc_mmu_hpte_cache_map()
92 spin_unlock(&vcpu3s->mmu_lock); in kvmppc_mmu_hpte_cache_map()
110 spin_lock(&vcpu3s->mmu_lock); in invalidate_pte()
114 spin_unlock(&vcpu3s->mmu_lock); in invalidate_pte()
127 spin_unlock(&vcpu3s->mmu_lock); in invalidate_pte()
369 spin_lock_init(&vcpu3s->mmu_lock); in kvmppc_mmu_hpte_init()
Dbook3s_64_mmu_radix.c641 spin_lock(&kvm->mmu_lock); in kvmppc_create_pte()
777 spin_unlock(&kvm->mmu_lock); in kvmppc_create_pte()
865 spin_lock(&kvm->mmu_lock); in kvmppc_book3s_instantiate_page()
870 spin_unlock(&kvm->mmu_lock); in kvmppc_book3s_instantiate_page()
1000 spin_lock(&kvm->mmu_lock); in kvmppc_book3s_radix_page_fault()
1004 spin_unlock(&kvm->mmu_lock); in kvmppc_book3s_radix_page_fault()
1110 spin_lock(&kvm->mmu_lock); in kvm_radix_test_clear_dirty()
1122 spin_unlock(&kvm->mmu_lock); in kvm_radix_test_clear_dirty()
1137 spin_unlock(&kvm->mmu_lock); in kvm_radix_test_clear_dirty()
1182 spin_lock(&kvm->mmu_lock); in kvmppc_radix_flush_memslot()
[all …]
Dbook3s_hv_rm_mmu.c248 arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); in kvmppc_do_h_enter()
263 arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); in kvmppc_do_h_enter()
277 arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); in kvmppc_do_h_enter()
938 arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); in kvmppc_do_h_page_init_zero()
950 arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); in kvmppc_do_h_page_init_zero()
966 arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); in kvmppc_do_h_page_init_copy()
981 arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); in kvmppc_do_h_page_init_copy()
Dbook3s_64_mmu_host.c153 spin_lock(&kvm->mmu_lock); in kvmppc_mmu_map_page()
205 spin_unlock(&kvm->mmu_lock); in kvmppc_mmu_map_page()
Dbook3s_64_mmu_hv.c617 spin_lock(&kvm->mmu_lock); in kvmppc_book3s_hv_page_fault()
622 spin_unlock(&kvm->mmu_lock); in kvmppc_book3s_hv_page_fault()
751 spin_lock(&kvm->mmu_lock); in kvmppc_rmap_reset()
758 spin_unlock(&kvm->mmu_lock); in kvmppc_rmap_reset()
1391 spin_lock(&kvm->mmu_lock); in resize_hpt_pivot()
1398 spin_unlock(&kvm->mmu_lock); in resize_hpt_pivot()
De500_mmu_host.c462 spin_lock(&kvm->mmu_lock); in kvmppc_e500_shadow_map()
502 spin_unlock(&kvm->mmu_lock); in kvmppc_e500_shadow_map()
/linux-6.1.9/arch/arm64/kvm/
Dmmu.c68 cond_resched_rwlock_write(&kvm->mmu_lock); in stage2_apply_range()
217 lockdep_assert_held_write(&kvm->mmu_lock); in __unmap_stage2_range()
251 write_lock(&kvm->mmu_lock); in stage2_flush_vm()
257 write_unlock(&kvm->mmu_lock); in stage2_flush_vm()
779 write_lock(&kvm->mmu_lock); in stage2_unmap_vm()
785 write_unlock(&kvm->mmu_lock); in stage2_unmap_vm()
795 write_lock(&kvm->mmu_lock); in kvm_free_stage2_pgd()
802 write_unlock(&kvm->mmu_lock); in kvm_free_stage2_pgd()
842 write_lock(&kvm->mmu_lock); in kvm_phys_addr_ioremap()
845 write_unlock(&kvm->mmu_lock); in kvm_phys_addr_ioremap()
[all …]
/linux-6.1.9/arch/x86/kvm/mmu/
Dtdp_mmu.c42 lockdep_assert_held_read(&kvm->mmu_lock); in kvm_lockdep_assert_mmu_lock_held()
44 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_lockdep_assert_mmu_lock_held()
99 read_lock(&kvm->mmu_lock); in tdp_mmu_zap_root_work()
120 read_unlock(&kvm->mmu_lock); in tdp_mmu_zap_root_work()
317 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_tdp_mmu_get_vcpu_root_hpa()
401 lockdep_assert_held_write(&kvm->mmu_lock); in tdp_mmu_unlink_sp()
656 lockdep_assert_held_read(&kvm->mmu_lock); in tdp_mmu_set_spte_atomic()
730 lockdep_assert_held_write(&kvm->mmu_lock); in __tdp_mmu_set_spte()
822 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { in tdp_mmu_iter_cond_resched()
829 cond_resched_rwlock_read(&kvm->mmu_lock); in tdp_mmu_iter_cond_resched()
[all …]
Dpage_track.c226 write_lock(&kvm->mmu_lock); in kvm_page_track_register_notifier()
228 write_unlock(&kvm->mmu_lock); in kvm_page_track_register_notifier()
244 write_lock(&kvm->mmu_lock); in kvm_page_track_unregister_notifier()
246 write_unlock(&kvm->mmu_lock); in kvm_page_track_unregister_notifier()
Dmmu.c2000 if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) { in mmu_sync_children()
2007 cond_resched_rwlock_write(&vcpu->kvm->mmu_lock); in mmu_sync_children()
2446 lockdep_assert_held_write(&kvm->mmu_lock); in __kvm_mmu_prepare_zap_page()
2613 write_lock(&kvm->mmu_lock); in kvm_mmu_change_mmu_pages()
2624 write_unlock(&kvm->mmu_lock); in kvm_mmu_change_mmu_pages()
2635 write_lock(&kvm->mmu_lock); in kvm_mmu_unprotect_page()
2643 write_unlock(&kvm->mmu_lock); in kvm_mmu_unprotect_page()
3462 write_lock(&kvm->mmu_lock); in kvm_mmu_free_roots()
3487 write_unlock(&kvm->mmu_lock); in kvm_mmu_free_roots()
3556 write_lock(&vcpu->kvm->mmu_lock); in mmu_alloc_direct_roots()
[all …]
Dpaging_tmpl.h872 write_lock(&vcpu->kvm->mmu_lock); in FNAME()
883 write_unlock(&vcpu->kvm->mmu_lock); in FNAME()
921 write_lock(&vcpu->kvm->mmu_lock); in FNAME()
956 write_unlock(&vcpu->kvm->mmu_lock); in FNAME()
/linux-6.1.9/drivers/misc/habanalabs/common/
Dcommand_buffer.c47 mutex_lock(&hdev->mmu_lock); in cb_map_mem()
54 mutex_unlock(&hdev->mmu_lock); in cb_map_mem()
60 mutex_unlock(&hdev->mmu_lock); in cb_map_mem()
69 mutex_lock(&hdev->mmu_lock); in cb_unmap_mem()
72 mutex_unlock(&hdev->mmu_lock); in cb_unmap_mem()
Dmemory.c1213 mutex_lock(&hdev->mmu_lock); in map_device_va()
1218 mutex_unlock(&hdev->mmu_lock); in map_device_va()
1224 mutex_unlock(&hdev->mmu_lock); in map_device_va()
1365 mutex_lock(&hdev->mmu_lock); in unmap_device_va()
1378 mutex_unlock(&hdev->mmu_lock); in unmap_device_va()
2773 mutex_lock(&hdev->mmu_lock); in hl_vm_ctx_fini()
2779 mutex_unlock(&hdev->mmu_lock); in hl_vm_ctx_fini()
/linux-6.1.9/arch/mips/kvm/
Dmmu.c522 spin_lock(&kvm->mmu_lock); in _kvm_mips_map_page_fast()
557 spin_unlock(&kvm->mmu_lock); in _kvm_mips_map_page_fast()
639 spin_lock(&kvm->mmu_lock); in kvm_mips_map_page()
647 spin_unlock(&kvm->mmu_lock); in kvm_mips_map_page()
677 spin_unlock(&kvm->mmu_lock); in kvm_mips_map_page()
Dmips.c208 spin_lock(&kvm->mmu_lock); in kvm_arch_flush_shadow_memslot()
213 spin_unlock(&kvm->mmu_lock); in kvm_arch_flush_shadow_memslot()
243 spin_lock(&kvm->mmu_lock); in kvm_arch_commit_memory_region()
249 spin_unlock(&kvm->mmu_lock); in kvm_arch_commit_memory_region()
/linux-6.1.9/arch/x86/kvm/
Ddebugfs.c110 write_lock(&kvm->mmu_lock); in kvm_mmu_rmaps_stat_show()
130 write_unlock(&kvm->mmu_lock); in kvm_mmu_rmaps_stat_show()
/linux-6.1.9/drivers/misc/habanalabs/common/mmu/
Dmmu.c50 mutex_init(&hdev->mmu_lock); in hl_mmu_init()
94 mutex_destroy(&hdev->mmu_lock); in hl_mmu_fini()
612 mutex_lock(&hdev->mmu_lock); in hl_mmu_get_tlb_info()
614 mutex_unlock(&hdev->mmu_lock); in hl_mmu_get_tlb_info()
709 mutex_lock(&hdev->mmu_lock); in hl_mmu_prefetch_work_function()
713 mutex_unlock(&hdev->mmu_lock); in hl_mmu_prefetch_work_function()
/linux-6.1.9/Documentation/virt/kvm/
Dlocking.rst34 - kvm->arch.mmu_lock is an rwlock. kvm->arch.tdp_mmu_pages_lock and
35 kvm->arch.mmu_unsync_pages_lock are taken inside kvm->arch.mmu_lock, and
36 cannot be taken without already holding kvm->arch.mmu_lock (typically with
245 ``kvm->mmu_lock``
/linux-6.1.9/arch/powerpc/include/asm/
Dkvm_book3s_64.h654 VM_WARN(!spin_is_locked(&kvm->mmu_lock), in find_kvm_secondary_pte()
666 VM_WARN(!spin_is_locked(&kvm->mmu_lock), in find_kvm_host_pte()
Dkvm_book3s.h134 spinlock_t mmu_lock; member
/linux-6.1.9/drivers/gpu/drm/i915/gvt/
Dkvmgt.c1564 write_lock(&kvm->mmu_lock); in intel_gvt_page_track_add()
1573 write_unlock(&kvm->mmu_lock); in intel_gvt_page_track_add()
1594 write_lock(&kvm->mmu_lock); in intel_gvt_page_track_remove()
1603 write_unlock(&kvm->mmu_lock); in intel_gvt_page_track_remove()
1629 write_lock(&kvm->mmu_lock); in kvmgt_page_track_flush_slot()
1638 write_unlock(&kvm->mmu_lock); in kvmgt_page_track_flush_slot()
/linux-6.1.9/include/linux/
Dkvm_host.h691 rwlock_t mmu_lock; member
693 spinlock_t mmu_lock;
1948 lockdep_assert_held(&kvm->mmu_lock); in mmu_invalidate_retry_hva()

12