/linux-6.1.9/drivers/virtio/ |
D | virtio_mem.c | 276 static void virtio_mem_retry(struct virtio_mem *vm); 277 static int virtio_mem_create_resource(struct virtio_mem *vm); 278 static void virtio_mem_delete_resource(struct virtio_mem *vm); 284 static int register_virtio_mem_device(struct virtio_mem *vm) in register_virtio_mem_device() argument 293 list_add_rcu(&vm->next, &virtio_mem_devices); in register_virtio_mem_device() 303 static void unregister_virtio_mem_device(struct virtio_mem *vm) in unregister_virtio_mem_device() argument 307 list_del_rcu(&vm->next); in unregister_virtio_mem_device() 334 static unsigned long virtio_mem_phys_to_bb_id(struct virtio_mem *vm, in virtio_mem_phys_to_bb_id() argument 337 return addr / vm->bbm.bb_size; in virtio_mem_phys_to_bb_id() 343 static uint64_t virtio_mem_bb_id_to_phys(struct virtio_mem *vm, in virtio_mem_bb_id_to_phys() argument [all …]
|
/linux-6.1.9/tools/testing/selftests/kvm/lib/aarch64/ |
D | processor.c | 19 static uint64_t page_align(struct kvm_vm *vm, uint64_t v) in page_align() argument 21 return (v + vm->page_size) & ~(vm->page_size - 1); in page_align() 24 static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva) in pgd_index() argument 26 unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift; in pgd_index() 27 uint64_t mask = (1UL << (vm->va_bits - shift)) - 1; in pgd_index() 32 static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva) in pud_index() argument 34 unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift; in pud_index() 35 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1; in pud_index() 37 TEST_ASSERT(vm->pgtable_levels == 4, in pud_index() 38 "Mode %d does not have 4 page table levels", vm->mode); in pud_index() [all …]
|
/linux-6.1.9/drivers/gpu/drm/i915/gt/ |
D | intel_gtt.c | 32 struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz) in alloc_pt_lmem() argument 48 obj = __i915_gem_object_create_lmem_with_ps(vm->i915, sz, sz, in alloc_pt_lmem() 49 vm->lmem_pt_obj_flags); in alloc_pt_lmem() 56 obj->base.resv = i915_vm_resv_get(vm); in alloc_pt_lmem() 57 obj->shares_resv_from = vm; in alloc_pt_lmem() 63 struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz) in alloc_pt_dma() argument 67 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1))) in alloc_pt_dma() 68 i915_gem_shrink_all(vm->i915); in alloc_pt_dma() 70 obj = i915_gem_object_create_internal(vm->i915, sz); in alloc_pt_dma() 77 obj->base.resv = i915_vm_resv_get(vm); in alloc_pt_dma() [all …]
|
D | intel_ggtt.c | 29 static inline bool suspend_retains_ptes(struct i915_address_space *vm) in suspend_retains_ptes() argument 31 return GRAPHICS_VER(vm->i915) >= 8 && in suspend_retains_ptes() 32 !HAS_LMEM(vm->i915) && in suspend_retains_ptes() 33 vm->is_ggtt; in suspend_retains_ptes() 57 struct drm_i915_private *i915 = ggtt->vm.i915; in ggtt_init_hw() 59 i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); in ggtt_init_hw() 61 ggtt->vm.is_ggtt = true; in ggtt_init_hw() 64 ggtt->vm.has_read_only = IS_VALLEYVIEW(i915); in ggtt_init_hw() 67 ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust; in ggtt_init_hw() 73 ggtt->vm.cleanup(&ggtt->vm); in ggtt_init_hw() [all …]
|
D | intel_gtt.h | 64 #define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT) 215 void (*bind_vma)(struct i915_address_space *vm, 224 void (*unbind_vma)(struct i915_address_space *vm, 285 (*alloc_pt_dma)(struct i915_address_space *vm, int sz); 287 (*alloc_scratch_dma)(struct i915_address_space *vm, int sz); 295 void (*allocate_va_range)(struct i915_address_space *vm, 298 void (*clear_range)(struct i915_address_space *vm, 300 void (*insert_page)(struct i915_address_space *vm, 305 void (*insert_entries)(struct i915_address_space *vm, 309 void (*raw_insert_page)(struct i915_address_space *vm, [all …]
|
D | gen8_ppgtt.c | 60 struct drm_i915_private *i915 = ppgtt->vm.i915; in gen8_ppgtt_notify_vgt() 61 struct intel_uncore *uncore = ppgtt->vm.gt->uncore; in gen8_ppgtt_notify_vgt() 72 if (i915_vm_is_4lvl(&ppgtt->vm)) { in gen8_ppgtt_notify_vgt() 150 static unsigned int gen8_pd_top_count(const struct i915_address_space *vm) in gen8_pd_top_count() argument 152 unsigned int shift = __gen8_pte_shift(vm->top); in gen8_pd_top_count() 154 return (vm->total + (1ull << shift) - 1) >> shift; in gen8_pd_top_count() 158 gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx) in gen8_pdp_for_page_index() argument 160 struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm); in gen8_pdp_for_page_index() 162 if (vm->top == 2) in gen8_pdp_for_page_index() 165 return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top)); in gen8_pdp_for_page_index() [all …]
|
D | gen6_ppgtt.c | 23 dma_addr_t addr = pt ? px_dma(pt) : px_dma(ppgtt->base.vm.scratch[1]); in gen6_write_pde() 74 static void gen6_ppgtt_clear_range(struct i915_address_space *vm, in gen6_ppgtt_clear_range() argument 77 struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); in gen6_ppgtt_clear_range() 79 const gen6_pte_t scratch_pte = vm->scratch[0]->encode; in gen6_ppgtt_clear_range() 110 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, in gen6_ppgtt_insert_entries() argument 115 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); in gen6_ppgtt_insert_entries() 120 const u32 pte_encode = vm->pte_encode(0, cache_level, flags); in gen6_ppgtt_insert_entries() 166 gen6_ggtt_invalidate(ppgtt->base.vm.gt->ggtt); in gen6_flush_pd() 172 static void gen6_alloc_va_range(struct i915_address_space *vm, in gen6_alloc_va_range() argument 176 struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); in gen6_alloc_va_range() [all …]
|
/linux-6.1.9/tools/testing/selftests/kvm/lib/ |
D | kvm_util.c | 122 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size) in vm_enable_dirty_ring() argument 124 if (vm_check_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL)) in vm_enable_dirty_ring() 125 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL, ring_size); in vm_enable_dirty_ring() 127 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING, ring_size); in vm_enable_dirty_ring() 128 vm->dirty_ring_size = ring_size; in vm_enable_dirty_ring() 131 static void vm_open(struct kvm_vm *vm) in vm_open() argument 133 vm->kvm_fd = _open_kvm_dev_path_or_exit(O_RDWR); in vm_open() 137 vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, (void *)vm->type); in vm_open() 138 TEST_ASSERT(vm->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm->fd)); in vm_open() 190 struct kvm_vm *vm; in ____vm_create() local [all …]
|
/linux-6.1.9/drivers/gpu/drm/lima/ |
D | lima_vm.c | 18 struct lima_vm *vm; member 35 static void lima_vm_unmap_range(struct lima_vm *vm, u32 start, u32 end) in lima_vm_unmap_range() argument 43 vm->bts[pbe].cpu[bte] = 0; in lima_vm_unmap_range() 47 static int lima_vm_map_page(struct lima_vm *vm, dma_addr_t pa, u32 va) in lima_vm_map_page() argument 52 if (!vm->bts[pbe].cpu) { in lima_vm_map_page() 57 vm->bts[pbe].cpu = dma_alloc_wc( in lima_vm_map_page() 58 vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT, in lima_vm_map_page() 59 &vm->bts[pbe].dma, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); in lima_vm_map_page() 60 if (!vm->bts[pbe].cpu) in lima_vm_map_page() 63 pts = vm->bts[pbe].dma; in lima_vm_map_page() [all …]
|
/linux-6.1.9/drivers/virt/acrn/ |
D | vm.c | 25 struct acrn_vm *acrn_vm_create(struct acrn_vm *vm, in acrn_vm_create() argument 37 mutex_init(&vm->regions_mapping_lock); in acrn_vm_create() 38 INIT_LIST_HEAD(&vm->ioreq_clients); in acrn_vm_create() 39 spin_lock_init(&vm->ioreq_clients_lock); in acrn_vm_create() 40 vm->vmid = vm_param->vmid; in acrn_vm_create() 41 vm->vcpu_num = vm_param->vcpu_num; in acrn_vm_create() 43 if (acrn_ioreq_init(vm, vm_param->ioreq_buf) < 0) { in acrn_vm_create() 45 vm->vmid = ACRN_INVALID_VMID; in acrn_vm_create() 50 list_add(&vm->list, &acrn_vm_list); in acrn_vm_create() 53 acrn_ioeventfd_init(vm); in acrn_vm_create() [all …]
|
D | irqfd.c | 32 struct acrn_vm *vm; member 43 struct acrn_vm *vm = irqfd->vm; in acrn_irqfd_inject() local 45 acrn_msi_inject(vm, irqfd->msi.msi_addr, in acrn_irqfd_inject() 53 lockdep_assert_held(&irqfd->vm->irqfds_lock); in hsm_irqfd_shutdown() 65 struct acrn_vm *vm; in hsm_irqfd_shutdown_work() local 68 vm = irqfd->vm; in hsm_irqfd_shutdown_work() 69 mutex_lock(&vm->irqfds_lock); in hsm_irqfd_shutdown_work() 72 mutex_unlock(&vm->irqfds_lock); in hsm_irqfd_shutdown_work() 81 struct acrn_vm *vm; in hsm_irqfd_wakeup() local 84 vm = irqfd->vm; in hsm_irqfd_wakeup() [all …]
|
D | ioreq.c | 39 static int ioreq_complete_request(struct acrn_vm *vm, u16 vcpu, in ioreq_complete_request() argument 64 ret = hcall_notify_req_finish(vm->vmid, vcpu); in ioreq_complete_request() 79 if (vcpu >= client->vm->vcpu_num) in acrn_ioreq_complete_request() 84 acrn_req = (struct acrn_io_request *)client->vm->ioreq_buf; in acrn_ioreq_complete_request() 88 ret = ioreq_complete_request(client->vm, vcpu, acrn_req); in acrn_ioreq_complete_request() 93 int acrn_ioreq_request_default_complete(struct acrn_vm *vm, u16 vcpu) in acrn_ioreq_request_default_complete() argument 97 spin_lock_bh(&vm->ioreq_clients_lock); in acrn_ioreq_request_default_complete() 98 if (vm->default_client) in acrn_ioreq_request_default_complete() 99 ret = acrn_ioreq_complete_request(vm->default_client, in acrn_ioreq_request_default_complete() 101 spin_unlock_bh(&vm->ioreq_clients_lock); in acrn_ioreq_request_default_complete() [all …]
|
D | ioeventfd.c | 43 static void acrn_ioeventfd_shutdown(struct acrn_vm *vm, struct hsm_ioeventfd *p) in acrn_ioeventfd_shutdown() argument 45 lockdep_assert_held(&vm->ioeventfds_lock); in acrn_ioeventfd_shutdown() 52 static bool hsm_ioeventfd_is_conflict(struct acrn_vm *vm, in hsm_ioeventfd_is_conflict() argument 57 lockdep_assert_held(&vm->ioeventfds_lock); in hsm_ioeventfd_is_conflict() 60 list_for_each_entry(p, &vm->ioeventfds, list) in hsm_ioeventfd_is_conflict() 76 static int acrn_ioeventfd_assign(struct acrn_vm *vm, in acrn_ioeventfd_assign() argument 121 mutex_lock(&vm->ioeventfds_lock); in acrn_ioeventfd_assign() 123 if (hsm_ioeventfd_is_conflict(vm, p)) { in acrn_ioeventfd_assign() 129 ret = acrn_ioreq_range_add(vm->ioeventfd_client, p->type, in acrn_ioeventfd_assign() 134 list_add_tail(&p->list, &vm->ioeventfds); in acrn_ioeventfd_assign() [all …]
|
/linux-6.1.9/tools/testing/selftests/kvm/lib/s390x/ |
D | processor.c | 13 void virt_arch_pgd_alloc(struct kvm_vm *vm) in virt_arch_pgd_alloc() argument 17 TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x", in virt_arch_pgd_alloc() 18 vm->page_size); in virt_arch_pgd_alloc() 20 if (vm->pgd_created) in virt_arch_pgd_alloc() 23 paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION, in virt_arch_pgd_alloc() 25 memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size); in virt_arch_pgd_alloc() 27 vm->pgd = paddr; in virt_arch_pgd_alloc() 28 vm->pgd_created = true; in virt_arch_pgd_alloc() 36 static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri) in virt_alloc_region() argument 40 taddr = vm_phy_pages_alloc(vm, ri < 4 ? PAGES_PER_REGION : 1, in virt_alloc_region() [all …]
|
/linux-6.1.9/drivers/gpu/drm/i915/selftests/ |
D | mock_gtt.c | 27 static void mock_insert_page(struct i915_address_space *vm, in mock_insert_page() argument 35 static void mock_insert_entries(struct i915_address_space *vm, in mock_insert_entries() argument 41 static void mock_bind_ppgtt(struct i915_address_space *vm, in mock_bind_ppgtt() argument 51 static void mock_unbind_ppgtt(struct i915_address_space *vm, in mock_unbind_ppgtt() argument 56 static void mock_cleanup(struct i915_address_space *vm) in mock_cleanup() argument 60 static void mock_clear_range(struct i915_address_space *vm, in mock_clear_range() argument 73 ppgtt->vm.gt = to_gt(i915); in mock_ppgtt() 74 ppgtt->vm.i915 = i915; in mock_ppgtt() 75 ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE); in mock_ppgtt() 76 ppgtt->vm.dma = i915->drm.dev; in mock_ppgtt() [all …]
|
/linux-6.1.9/sound/pci/ctxfi/ |
D | ctvmem.c | 30 get_vm_block(struct ct_vm *vm, unsigned int size, struct ct_atc *atc) in get_vm_block() argument 36 if (size > vm->size) { in get_vm_block() 42 mutex_lock(&vm->lock); in get_vm_block() 43 list_for_each(pos, &vm->unused) { in get_vm_block() 48 if (pos == &vm->unused) in get_vm_block() 53 list_move(&entry->list, &vm->used); in get_vm_block() 54 vm->size -= size; in get_vm_block() 65 list_add(&block->list, &vm->used); in get_vm_block() 68 vm->size -= size; in get_vm_block() 71 mutex_unlock(&vm->lock); in get_vm_block() [all …]
|
/linux-6.1.9/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_vm.c | 98 struct amdgpu_vm *vm; member 117 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm, in amdgpu_vm_set_pasid() argument 122 if (vm->pasid == pasid) in amdgpu_vm_set_pasid() 125 if (vm->pasid) { in amdgpu_vm_set_pasid() 126 r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid)); in amdgpu_vm_set_pasid() 130 vm->pasid = 0; in amdgpu_vm_set_pasid() 134 r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, in amdgpu_vm_set_pasid() 139 vm->pasid = pasid; in amdgpu_vm_set_pasid() 156 struct amdgpu_vm *vm = vm_bo->vm; in amdgpu_vm_bo_evicted() local 160 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_evicted() [all …]
|
/linux-6.1.9/drivers/gpu/drm/i915/display/ |
D | intel_dpt.c | 16 struct i915_address_space vm; member 23 #define i915_is_dpt(vm) ((vm)->is_dpt) argument 26 i915_vm_to_dpt(struct i915_address_space *vm) in i915_vm_to_dpt() argument 28 BUILD_BUG_ON(offsetof(struct i915_dpt, vm)); in i915_vm_to_dpt() 29 GEM_BUG_ON(!i915_is_dpt(vm)); in i915_vm_to_dpt() 30 return container_of(vm, struct i915_dpt, vm); in i915_vm_to_dpt() 33 #define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT) 40 static void dpt_insert_page(struct i915_address_space *vm, in dpt_insert_page() argument 46 struct i915_dpt *dpt = i915_vm_to_dpt(vm); in dpt_insert_page() 50 vm->pte_encode(addr, level, flags)); in dpt_insert_page() [all …]
|
/linux-6.1.9/drivers/gpu/drm/radeon/ |
D | radeon_vm.c | 130 struct radeon_vm *vm, in radeon_vm_get_bos() argument 136 list = kvmalloc_array(vm->max_pde_used + 2, in radeon_vm_get_bos() 142 list[0].robj = vm->page_directory; in radeon_vm_get_bos() 145 list[0].tv.bo = &vm->page_directory->tbo; in radeon_vm_get_bos() 150 for (i = 0, idx = 1; i <= vm->max_pde_used; i++) { in radeon_vm_get_bos() 151 if (!vm->page_tables[i].bo) in radeon_vm_get_bos() 154 list[idx].robj = vm->page_tables[i].bo; in radeon_vm_get_bos() 179 struct radeon_vm *vm, int ring) in radeon_vm_grab_id() argument 182 struct radeon_vm_id *vm_id = &vm->ids[ring]; in radeon_vm_grab_id() 238 struct radeon_vm *vm, in radeon_vm_flush() argument [all …]
|
/linux-6.1.9/tools/testing/selftests/kvm/lib/riscv/ |
D | processor.c | 16 static uint64_t page_align(struct kvm_vm *vm, uint64_t v) in page_align() argument 18 return (v + vm->page_size) & ~(vm->page_size - 1); in page_align() 21 static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry) in pte_addr() argument 27 static uint64_t ptrs_per_pte(struct kvm_vm *vm) in ptrs_per_pte() argument 46 static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level) in pte_index() argument 50 TEST_ASSERT(level < vm->pgtable_levels, in pte_index() 56 void virt_arch_pgd_alloc(struct kvm_vm *vm) in virt_arch_pgd_alloc() argument 58 if (!vm->pgd_created) { in virt_arch_pgd_alloc() 59 vm_paddr_t paddr = vm_phy_pages_alloc(vm, in virt_arch_pgd_alloc() 60 page_align(vm, ptrs_per_pte(vm) * 8) / vm->page_size, in virt_arch_pgd_alloc() [all …]
|
/linux-6.1.9/tools/testing/selftests/kvm/x86_64/ |
D | nx_huge_pages_test.c | 77 static void check_2m_page_count(struct kvm_vm *vm, int expected_pages_2m) in check_2m_page_count() argument 81 actual_pages_2m = vm_get_stat(vm, "pages_2m"); in check_2m_page_count() 88 static void check_split_count(struct kvm_vm *vm, int expected_splits) in check_split_count() argument 92 actual_splits = vm_get_stat(vm, "nx_lpage_splits"); in check_split_count() 114 struct kvm_vm *vm; in run_test() local 119 vm = vm_create(1); in run_test() 122 r = __vm_disable_nx_huge_pages(vm); in run_test() 132 vcpu = vm_vcpu_add(vm, 0, guest_code); in run_test() 134 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_HUGETLB, in run_test() 138 nr_bytes = HPAGE_SLOT_NPAGES * vm->page_size; in run_test() [all …]
|
/linux-6.1.9/tools/testing/selftests/kvm/include/ |
D | kvm_util_base.h | 51 struct kvm_vm *vm; member 98 #define kvm_for_each_vcpu(vm, i, vcpu) \ argument 99 for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++) \ 100 if (!((vcpu) = vm->vcpus[i])) \ 105 memslot2region(struct kvm_vm *vm, uint32_t memslot); 215 static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { } in static_assert_is_vm() argument 217 #define __vm_ioctl(vm, cmd, arg) \ argument 219 static_assert_is_vm(vm); \ 220 kvm_do_ioctl((vm)->fd, cmd, arg); \ 223 #define _vm_ioctl(vm, cmd, name, arg) \ argument [all …]
|
/linux-6.1.9/tools/testing/selftests/kvm/lib/x86_64/ |
D | vmx.c | 75 vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva) in vcpu_alloc_vmx() argument 77 vm_vaddr_t vmx_gva = vm_vaddr_alloc_page(vm); in vcpu_alloc_vmx() 78 struct vmx_pages *vmx = addr_gva2hva(vm, vmx_gva); in vcpu_alloc_vmx() 81 vmx->vmxon = (void *)vm_vaddr_alloc_page(vm); in vcpu_alloc_vmx() 82 vmx->vmxon_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmxon); in vcpu_alloc_vmx() 83 vmx->vmxon_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmxon); in vcpu_alloc_vmx() 86 vmx->vmcs = (void *)vm_vaddr_alloc_page(vm); in vcpu_alloc_vmx() 87 vmx->vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmcs); in vcpu_alloc_vmx() 88 vmx->vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmcs); in vcpu_alloc_vmx() 91 vmx->msr = (void *)vm_vaddr_alloc_page(vm); in vcpu_alloc_vmx() [all …]
|
D | processor.c | 122 void virt_arch_pgd_alloc(struct kvm_vm *vm) in virt_arch_pgd_alloc() argument 124 TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use " in virt_arch_pgd_alloc() 125 "unknown or unsupported guest mode, mode: 0x%x", vm->mode); in virt_arch_pgd_alloc() 128 if (!vm->pgd_created) { in virt_arch_pgd_alloc() 129 vm->pgd = vm_alloc_page_table(vm); in virt_arch_pgd_alloc() 130 vm->pgd_created = true; in virt_arch_pgd_alloc() 134 static void *virt_get_pte(struct kvm_vm *vm, uint64_t pt_pfn, uint64_t vaddr, in virt_get_pte() argument 137 uint64_t *page_table = addr_gpa2hva(vm, pt_pfn << vm->page_shift); in virt_get_pte() 143 static uint64_t *virt_create_upper_pte(struct kvm_vm *vm, in virt_create_upper_pte() argument 150 uint64_t *pte = virt_get_pte(vm, pt_pfn, vaddr, current_level); in virt_create_upper_pte() [all …]
|
/linux-6.1.9/drivers/gpu/drm/imx/dcss/ |
D | dcss-ss.c | 126 void dcss_ss_sync_set(struct dcss_ss *ss, struct videomode *vm, in dcss_ss_sync_set() argument 135 lrc_x = vm->hfront_porch + vm->hback_porch + vm->hsync_len + in dcss_ss_sync_set() 136 vm->hactive - 1; in dcss_ss_sync_set() 137 lrc_y = vm->vfront_porch + vm->vback_porch + vm->vsync_len + in dcss_ss_sync_set() 138 vm->vactive - 1; in dcss_ss_sync_set() 142 hsync_start = vm->hfront_porch + vm->hback_porch + vm->hsync_len + in dcss_ss_sync_set() 143 vm->hactive - 1; in dcss_ss_sync_set() 144 hsync_end = vm->hsync_len - 1; in dcss_ss_sync_set() 150 vsync_start = vm->vfront_porch - 1; in dcss_ss_sync_set() 151 vsync_end = vm->vfront_porch + vm->vsync_len - 1; in dcss_ss_sync_set() [all …]
|