Home
last modified time | relevance | path

Searched refs:fences (Results 1 – 25 of 64) sorted by relevance

123

/linux-6.6.21/drivers/gpu/drm/i915/ !
Di915_deps.c38 if (deps->fences != &deps->single) in i915_deps_reset_fences()
39 kfree(deps->fences); in i915_deps_reset_fences()
42 deps->fences = &deps->single; in i915_deps_reset_fences()
52 deps->fences = NULL; in i915_deps_init()
69 dma_fence_put(deps->fences[i]); in i915_deps_fini()
71 if (deps->fences != &deps->single) in i915_deps_fini()
72 kfree(deps->fences); in i915_deps_fini()
89 memcpy(new_fences, deps->fences, in i915_deps_grow()
91 swap(new_fences, deps->fences); in i915_deps_grow()
96 deps->fences[deps->num_deps++] = dma_fence_get(fence); in i915_deps_grow()
[all …]
/linux-6.6.21/drivers/dma-buf/ !
Ddma-resv.c142 RCU_INIT_POINTER(obj->fences, NULL); in dma_resv_init()
156 dma_resv_list_free(rcu_dereference_protected(obj->fences, true)); in dma_resv_fini()
164 return rcu_dereference_check(obj->fences, dma_resv_held(obj)); in dma_resv_fences_list()
228 rcu_assign_pointer(obj->fences, new); in dma_resv_reserve_fences()
258 struct dma_resv_list *fences = dma_resv_fences_list(obj); in dma_resv_reset_max_fences() local
263 if (fences) in dma_resv_reset_max_fences()
264 fences->max_fences = fences->num_fences; in dma_resv_reset_max_fences()
363 cursor->fences = dma_resv_fences_list(cursor->obj); in dma_resv_iter_restart_unlocked()
364 if (cursor->fences) in dma_resv_iter_restart_unlocked()
365 cursor->num_fences = cursor->fences->num_fences; in dma_resv_iter_restart_unlocked()
[all …]
Ddma-fence-unwrap.c64 struct dma_fence **fences, in __dma_fence_unwrap_merge() argument
76 dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) { in __dma_fence_unwrap_merge()
106 fences[i] = dma_fence_unwrap_first(fences[i], &iter[i]); in __dma_fence_unwrap_merge()
117 while (fences[i] && dma_fence_is_signaled(fences[i])) in __dma_fence_unwrap_merge()
118 fences[i] = dma_fence_unwrap_next(&iter[i]); in __dma_fence_unwrap_merge()
120 next = fences[i]; in __dma_fence_unwrap_merge()
139 fences[i] = dma_fence_unwrap_next(&iter[i]); in __dma_fence_unwrap_merge()
142 fences[sel] = dma_fence_unwrap_next(&iter[sel]); in __dma_fence_unwrap_merge()
149 fences[sel] = dma_fence_unwrap_next(&iter[sel]); in __dma_fence_unwrap_merge()
Ddma-fence-array.c87 if (dma_fence_add_callback(array->fences[i], &cb[i].cb, in dma_fence_array_enable_signaling()
89 int error = array->fences[i]->error; in dma_fence_array_enable_signaling()
120 dma_fence_put(array->fences[i]); in dma_fence_array_release()
122 kfree(array->fences); in dma_fence_array_release()
133 dma_fence_set_deadline(array->fences[i], deadline); in dma_fence_array_set_deadline()
166 struct dma_fence **fences, in dma_fence_array_create() argument
173 WARN_ON(!num_fences || !fences); in dma_fence_array_create()
188 array->fences = fences; in dma_fence_array_create()
204 WARN_ON(dma_fence_is_container(fences[num_fences])); in dma_fence_array_create()
228 if (array->fences[i]->context != context) in dma_fence_match_context()
[all …]
Dst-dma-fence-chain.c102 struct dma_fence **fences; member
124 fc->fences = kvmalloc_array(count, sizeof(*fc->fences), in fence_chains_init()
126 if (!fc->fences) { in fence_chains_init()
133 fc->fences[i] = mock_fence(); in fence_chains_init()
134 if (!fc->fences[i]) { in fence_chains_init()
140 fc->fences[i], in fence_chains_init()
157 dma_fence_put(fc->fences[i]); in fence_chains_init()
160 kvfree(fc->fences); in fence_chains_init()
171 dma_fence_signal(fc->fences[i]); in fence_chains_fini()
172 dma_fence_put(fc->fences[i]); in fence_chains_fini()
[all …]
Dst-dma-fence-unwrap.c49 struct dma_fence **fences; in mock_array() local
53 fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL); in mock_array()
54 if (!fences) in mock_array()
59 fences[i] = va_arg(valist, typeof(*fences)); in mock_array()
62 array = dma_fence_array_create(num_fences, fences, in mock_array()
70 kfree(fences); in mock_array()
75 dma_fence_put(va_arg(valist, typeof(*fences))); in mock_array()
Dst-dma-resv.c228 cursor.fences = (void*)~0; in test_for_each_unlocked()
247 struct dma_fence *f, **fences = NULL; in test_get_fences() local
274 r = dma_resv_get_fences(&resv, usage, &i, &fences); in test_get_fences()
280 if (i != 1 || fences[0] != f) { in test_get_fences()
288 dma_fence_put(fences[i]); in test_get_fences()
289 kfree(fences); in test_get_fences()
Ddma-fence.c811 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count, in dma_fence_test_signaled_any() argument
817 struct dma_fence *fence = fences[i]; in dma_fence_test_signaled_any()
848 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count, in dma_fence_wait_any_timeout() argument
855 if (WARN_ON(!fences || !count || timeout < 0)) in dma_fence_wait_any_timeout()
860 if (dma_fence_is_signaled(fences[i])) { in dma_fence_wait_any_timeout()
876 struct dma_fence *fence = fences[i]; in dma_fence_wait_any_timeout()
894 if (dma_fence_test_signaled_any(fences, count, idx)) in dma_fence_wait_any_timeout()
907 dma_fence_remove_callback(fences[i], &cb[i].base); in dma_fence_wait_any_timeout()
Dst-dma-fence.c446 struct dma_fence __rcu **fences; member
477 rcu_assign_pointer(t->fences[t->id], f1); in thread_signal_callback()
482 f2 = dma_fence_get_rcu_safe(&t->fences[!t->id]); in thread_signal_callback()
514 rcu_assign_pointer(t->fences[t->id], NULL); in thread_signal_callback()
538 t[i].fences = f; in race_signal_callback()
/linux-6.6.21/drivers/gpu/host1x/ !
Dintr.c35 if (!list_empty(&sp->fences.list)) { in host1x_intr_update_hw_state()
36 fence = list_first_entry(&sp->fences.list, struct host1x_syncpt_fence, list); in host1x_intr_update_hw_state()
47 struct host1x_fence_list *fence_list = &fence->sp->fences; in host1x_intr_add_fence_locked()
57 struct host1x_fence_list *fence_list = &fence->sp->fences; in host1x_intr_remove_fence()
83 spin_lock(&sp->fences.lock); in host1x_intr_handle_interrupt()
85 list_for_each_entry_safe(fence, tmp, &sp->fences.list, list) { in host1x_intr_handle_interrupt()
98 spin_unlock(&sp->fences.lock); in host1x_intr_handle_interrupt()
110 spin_lock_init(&syncpt->fences.lock); in host1x_intr_init()
111 INIT_LIST_HEAD(&syncpt->fences.list); in host1x_intr_init()
Ddebug.c96 spin_lock_irqsave(&m->syncpt[i].fences.lock, irqflags); in show_syncpts()
97 list_for_each(pos, &m->syncpt[i].fences.list) in show_syncpts()
99 spin_unlock_irqrestore(&m->syncpt[i].fences.lock, irqflags); in show_syncpts()
/linux-6.6.21/Documentation/driver-api/ !
Dsync_file.rst9 the fences(struct dma_fence) that are needed to synchronize between drivers or
29 in-fences and out-fences
33 the driver to userspace we call the fences it contains 'out-fences'. They are
37 Out-fences are fences that the driver creates.
40 userspace we call these fence(s) 'in-fences'. Receiving in-fences means that
42 the in-fences.
72 of the Sync File to the kernel. The kernel can then retrieve the fences
Ddma-buf.rst153 :doc: DMA fences overview
227 * Future fences, used in HWC1 to signal when a buffer isn't used by the display
231 * Proxy fences, proposed to handle &drm_syncobj for which the fence has not yet
234 * Userspace fences or gpu futexes, fine-grained locking within a command buffer
240 batch DMA fences for memory management instead of context preemption DMA
241 fences which get reattached when the compute job is rescheduled.
244 fences and controls when they fire. Mixing indefinite fences with normal
245 in-kernel DMA fences does not work, even when a fallback timeout is included to
251 * Only userspace knows about all dependencies in indefinite fences and when
255 for memory management needs, which means we must support indefinite fences being
[all …]
/linux-6.6.21/drivers/gpu/drm/amd/amdgpu/ !
Damdgpu_sync.c54 hash_init(sync->fences); in amdgpu_sync_create()
137 hash_for_each_possible(sync->fences, e, node, f->context) { in amdgpu_sync_add_later()
169 hash_add(sync->fences, &e->node, f->context); in amdgpu_sync_fence()
287 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_peek_fence()
327 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_get_fence()
358 hash_for_each_safe(source->fences, i, tmp, e, node) { in amdgpu_sync_clone()
386 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_push_to_job()
409 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_wait()
433 hash_for_each_safe(sync->fences, i, tmp, e, node) in amdgpu_sync_free()
Damdgpu_ids.c207 struct dma_fence **fences; in amdgpu_vmid_grab_idle() local
215 fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_KERNEL); in amdgpu_vmid_grab_idle()
216 if (!fences) in amdgpu_vmid_grab_idle()
226 fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r); in amdgpu_vmid_grab_idle()
227 if (!fences[i]) in amdgpu_vmid_grab_idle()
241 dma_fence_get(fences[j]); in amdgpu_vmid_grab_idle()
243 array = dma_fence_array_create(i, fences, fence_context, in amdgpu_vmid_grab_idle()
247 dma_fence_put(fences[j]); in amdgpu_vmid_grab_idle()
248 kfree(fences); in amdgpu_vmid_grab_idle()
257 kfree(fences); in amdgpu_vmid_grab_idle()
Damdgpu_fence.c186 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; in amdgpu_fence_emit()
300 ptr = &drv->fences[last_seq]; in amdgpu_fence_process()
351 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; in amdgpu_fence_wait_empty()
428 fence = drv->fences[last_seq]; in amdgpu_fence_last_unsignaled_time_us()
452 fence = drv->fences[seq]; in amdgpu_fence_update_start_timestamp()
528 ring->fence_drv.fences = kcalloc(ring->num_hw_submission * 2, sizeof(void *), in amdgpu_fence_driver_init_ring()
531 if (!ring->fence_drv.fences) in amdgpu_fence_driver_init_ring()
664 dma_fence_put(ring->fence_drv.fences[j]); in amdgpu_fence_driver_sw_fini()
665 kfree(ring->fence_drv.fences); in amdgpu_fence_driver_sw_fini()
666 ring->fence_drv.fences = NULL; in amdgpu_fence_driver_sw_fini()
[all …]
Damdgpu_ctx.c197 res = ktime_add(res, amdgpu_ctx_fence_time(centity->fences[i])); in amdgpu_ctx_entity_time()
214 entity = kzalloc(struct_size(entity, fences, amdgpu_sched_jobs), in amdgpu_ctx_init_entity()
281 res = ktime_add(res, amdgpu_ctx_fence_time(entity->fences[i])); in amdgpu_ctx_fini_entity()
282 dma_fence_put(entity->fences[i]); in amdgpu_ctx_fini_entity()
758 other = centity->fences[idx]; in amdgpu_ctx_add_fence()
764 centity->fences[idx] = fence; in amdgpu_ctx_add_fence()
798 fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]); in amdgpu_ctx_get_fence()
860 other = dma_fence_get(centity->fences[idx]); in amdgpu_ctx_wait_prev_fence()
Damdgpu_jpeg.c79 unsigned int fences = 0; in amdgpu_jpeg_idle_work_handler() local
87 fences += amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec[j]); in amdgpu_jpeg_idle_work_handler()
90 if (!fences && !atomic_read(&adev->jpeg.total_submission_cnt)) in amdgpu_jpeg_idle_work_handler()
/linux-6.6.21/drivers/gpu/drm/i915/selftests/ !
Di915_sw_fence.c453 struct i915_sw_fence **fences; in test_chain() local
457 fences = kmalloc_array(nfences, sizeof(*fences), GFP_KERNEL); in test_chain()
458 if (!fences) in test_chain()
462 fences[i] = alloc_fence(); in test_chain()
463 if (!fences[i]) { in test_chain()
470 ret = i915_sw_fence_await_sw_fence_gfp(fences[i], in test_chain()
471 fences[i - 1], in test_chain()
478 i915_sw_fence_commit(fences[i]); in test_chain()
484 if (i915_sw_fence_done(fences[i])) { in test_chain()
490 i915_sw_fence_commit(fences[0]); in test_chain()
[all …]
/linux-6.6.21/drivers/gpu/drm/ !
Ddrm_suballoc.c225 struct dma_fence **fences, in drm_suballoc_next_hole() argument
248 fences[i] = NULL; in drm_suballoc_next_hole()
257 fences[i] = sa->fence; in drm_suballoc_next_hole()
316 struct dma_fence *fences[DRM_SUBALLOC_MAX_QUEUES]; in drm_suballoc_new() local
353 } while (drm_suballoc_next_hole(sa_manager, fences, tries)); in drm_suballoc_new()
356 if (fences[i]) in drm_suballoc_new()
357 fences[count++] = dma_fence_get(fences[i]); in drm_suballoc_new()
363 t = dma_fence_wait_any_timeout(fences, count, intr, in drm_suballoc_new()
367 dma_fence_put(fences[i]); in drm_suballoc_new()
/linux-6.6.21/include/linux/ !
Ddma-fence-array.h43 struct dma_fence **fences; member
80 struct dma_fence **fences,
Ddma-resv.h178 struct dma_resv_list __rcu *fences; member
210 struct dma_resv_list *fences; member
476 unsigned int *num_fences, struct dma_fence ***fences);
/linux-6.6.21/drivers/gpu/drm/i915/gem/ !
Di915_gem_execbuffer.c312 struct eb_fence *fences; member
2796 __free_fence_array(struct eb_fence *fences, unsigned int n) in __free_fence_array() argument
2799 drm_syncobj_put(ptr_mask_bits(fences[n].syncobj, 2)); in __free_fence_array()
2800 dma_fence_put(fences[n].dma_fence); in __free_fence_array()
2801 dma_fence_chain_free(fences[n].chain_fence); in __free_fence_array()
2803 kvfree(fences); in __free_fence_array()
2835 f = krealloc(eb->fences, in add_timeline_fence_array()
2841 eb->fences = f; in add_timeline_fence_array()
2964 f = krealloc(eb->fences, in add_fence_array()
2970 eb->fences = f; in add_fence_array()
[all …]
/linux-6.6.21/drivers/gpu/drm/virtio/ !
Dvirtgpu_fence.c111 list_add_tail(&fence->node, &drv->fences); in virtio_gpu_fence_emit()
136 list_for_each_entry_safe(curr, tmp, &drv->fences, node) { in virtio_gpu_fence_event_process()
146 list_for_each_entry_safe(curr, tmp, &drv->fences, node) { in virtio_gpu_fence_event_process()
/linux-6.6.21/drivers/gpu/drm/radeon/ !
Dradeon_trace.h36 __field(u32, fences)
42 __entry->fences = radeon_fence_count_emitted(
47 __entry->fences)

123