/linux-6.1.9/drivers/gpu/drm/tests/ |
D | drm_framebuffer_test.c | 31 .handles = { 1, 0, 0 }, .pitches = { 4 * 600, 0, 0 }, 36 .handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 }, 41 .handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH + 1, 0, 0 }, 46 .handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH - 1, 0, 0 }, 51 .handles = { 1, 0, 0 }, .pitches = { 4 * (MAX_WIDTH + 1), 0, 0 }, 56 .handles = { 0, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 }, 61 .handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 }, 66 .handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 }, 71 .handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 }, 76 .handles = { 1, 0, 0 }, .offsets = { UINT_MAX - 1, 0, 0 }, [all …]
|
/linux-6.1.9/drivers/staging/media/atomisp/pci/runtime/rmgr/src/ |
D | rmgr_vbuf.c | 143 pool->handles = kvmalloc(bytes_needed, GFP_KERNEL); in ia_css_rmgr_init_vbuf() 144 if (pool->handles) in ia_css_rmgr_init_vbuf() 145 memset(pool->handles, 0, bytes_needed); in ia_css_rmgr_init_vbuf() 151 pool->handles = NULL; in ia_css_rmgr_init_vbuf() 170 if (pool->handles) { in ia_css_rmgr_uninit_vbuf() 173 if (pool->handles[i]) { in ia_css_rmgr_uninit_vbuf() 176 pool->handles[i]->vptr, in ia_css_rmgr_uninit_vbuf() 177 pool->handles[i]->count); in ia_css_rmgr_uninit_vbuf() 179 hmm_free(pool->handles[i]->vptr); in ia_css_rmgr_uninit_vbuf() 181 ia_css_rmgr_refcount_release_vbuf(&pool->handles[i]); in ia_css_rmgr_uninit_vbuf() [all …]
|
/linux-6.1.9/drivers/gpu/drm/lima/ |
D | lima_ctx.c | 26 err = xa_alloc(&mgr->handles, id, ctx, xa_limit_32b, GFP_KERNEL); in lima_ctx_create() 58 ctx = xa_erase(&mgr->handles, id); in lima_ctx_free() 72 ctx = xa_load(&mgr->handles, id); in lima_ctx_get() 87 xa_init_flags(&mgr->handles, XA_FLAGS_ALLOC); in lima_ctx_mgr_init() 95 xa_for_each(&mgr->handles, id, ctx) { in lima_ctx_mgr_fini() 99 xa_destroy(&mgr->handles); in lima_ctx_mgr_fini()
|
/linux-6.1.9/drivers/misc/habanalabs/common/ |
D | memory_mgr.c | 25 buf = idr_find(&mmg->handles, lower_32_bits(handle >> PAGE_SHIFT)); in hl_mmap_mem_buf_get() 68 idr_remove(&buf->mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT)); in hl_mmap_mem_buf_release() 87 idr_remove(&buf->mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT)); in hl_mmap_mem_buf_remove_idr_locked() 119 buf = idr_find(&mmg->handles, lower_32_bits(handle >> PAGE_SHIFT)); in hl_mmap_mem_buf_put_handle() 161 rc = idr_alloc(&mmg->handles, buf, 1, 0, GFP_ATOMIC); in hl_mmap_mem_buf_alloc() 186 idr_remove(&mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT)); in hl_mmap_mem_buf_alloc() 319 idr_init(&mmg->handles); 336 idp = &mmg->handles; 348 idr_destroy(&mmg->handles);
|
D | context.c | 19 idr_remove(&mgr->handles, handle->id); in hl_encaps_handle_do_release() 39 idr_remove(&mgr->handles, handle->id); in hl_encaps_handle_do_release_sob() 49 idr_init(&mgr->handles); in hl_encaps_sig_mgr_init() 59 idp = &mgr->handles; in hl_encaps_sig_mgr_fini() 68 idr_destroy(&mgr->handles); in hl_encaps_sig_mgr_fini() 154 rc = idr_alloc(&ctx_mgr->handles, ctx, 1, 0, GFP_KERNEL); in hl_ctx_create() 181 idr_remove(&ctx_mgr->handles, ctx->handle); in hl_ctx_create() 410 idr_init(&ctx_mgr->handles); in hl_ctx_mgr_init() 428 idp = &ctx_mgr->handles; in hl_ctx_mgr_fini() 433 idr_destroy(&ctx_mgr->handles); in hl_ctx_mgr_fini()
|
/linux-6.1.9/drivers/gpu/drm/armada/ |
D | armada_fb.c | 102 (mode->handles[0] != mode->handles[1] || in armada_fb_create() 103 mode->handles[0] != mode->handles[2])) { in armada_fb_create() 108 obj = armada_gem_object_lookup(dfile, mode->handles[0]); in armada_fb_create()
|
/linux-6.1.9/drivers/xen/xenbus/ |
D | xenbus_client.c | 68 grant_handle_t handles[XENBUS_MAX_RING_GRANTS]; member 548 grant_handle_t *handles, in __xenbus_map_ring() argument 561 handles[i] = INVALID_GRANT_HANDLE; in __xenbus_map_ring() 573 handles[i] = info->map[i].handle; in __xenbus_map_ring() 580 if (handles[i] != INVALID_GRANT_HANDLE) { in __xenbus_map_ring() 583 GNTMAP_host_map, handles[i]); in __xenbus_map_ring() 612 static int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t *handles, in xenbus_unmap_ring() argument 624 GNTMAP_host_map, handles[i]); in xenbus_unmap_ring() 633 handles[i], unmap[i].status); in xenbus_unmap_ring() 676 err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles, in xenbus_map_ring_hvm() [all …]
|
/linux-6.1.9/drivers/infiniband/core/ |
D | uverbs_std_types_device.c | 73 u32 *handles; in gather_objects_handle() local 78 handles = uverbs_zalloc(attrs, out_len); in gather_objects_handle() 79 if (IS_ERR(handles)) in gather_objects_handle() 80 return handles; in gather_objects_handle() 92 handles[count] = obj_id; in gather_objects_handle() 98 return handles; in gather_objects_handle() 108 u32 *handles; in UVERBS_HANDLER() local 123 handles = gather_objects_handle(attrs->ufile, uapi_object, attrs, in UVERBS_HANDLER() 125 if (IS_ERR(handles)) in UVERBS_HANDLER() 126 return PTR_ERR(handles); in UVERBS_HANDLER() [all …]
|
/linux-6.1.9/drivers/gpu/drm/ |
D | drm_framebuffer.c | 137 r.handles[0] = or->handle; in drm_mode_addfb() 209 if (!r->handles[i]) { in framebuffer_check() 267 if (r->handles[i]) { in framebuffer_check() 600 for (i = 0; i < ARRAY_SIZE(r->handles); i++) { in drm_mode_getfb2_ioctl() 601 r->handles[i] = 0; in drm_mode_getfb2_ioctl() 632 r->handles[i] = r->handles[j]; in drm_mode_getfb2_ioctl() 637 if (r->handles[i]) in drm_mode_getfb2_ioctl() 642 &r->handles[i]); in drm_mode_getfb2_ioctl() 646 &r->handles[i]); in drm_mode_getfb2_ioctl() 656 for (i = 0; i < ARRAY_SIZE(r->handles); i++) { in drm_mode_getfb2_ioctl() [all …]
|
D | drm_syncobj.c | 1188 uint32_t i, *handles; in drm_syncobj_array_find() local 1192 handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL); in drm_syncobj_array_find() 1193 if (handles == NULL) in drm_syncobj_array_find() 1196 if (copy_from_user(handles, user_handles, in drm_syncobj_array_find() 1209 syncobjs[i] = drm_syncobj_find(file_private, handles[i]); in drm_syncobj_array_find() 1216 kfree(handles); in drm_syncobj_array_find() 1225 kfree(handles); in drm_syncobj_array_find() 1259 u64_to_user_ptr(args->handles), in drm_syncobj_wait_ioctl() 1293 u64_to_user_ptr(args->handles), in drm_syncobj_timeline_wait_ioctl() 1327 u64_to_user_ptr(args->handles), in drm_syncobj_reset_ioctl() [all …]
|
D | drm_prime.c | 131 p = &prime_fpriv->handles.rb_node; in drm_prime_add_buf_handle() 143 rb_insert_color(&member->handle_rb, &prime_fpriv->handles); in drm_prime_add_buf_handle() 153 rb = prime_fpriv->handles.rb_node; in drm_prime_lookup_buf_by_handle() 200 rb = prime_fpriv->handles.rb_node; in drm_prime_remove_buf_handle() 206 rb_erase(&member->handle_rb, &prime_fpriv->handles); in drm_prime_remove_buf_handle() 226 prime_fpriv->handles = RB_ROOT; in drm_prime_init_file_private()
|
/linux-6.1.9/drivers/gpu/drm/radeon/ |
D | radeon_uvd.c | 222 atomic_set(&rdev->uvd.handles[i], 0); in radeon_uvd_init() 259 uint32_t handle = atomic_read(&rdev->uvd.handles[i]); in radeon_uvd_suspend() 276 atomic_set(&rdev->uvd.handles[i], 0); in radeon_uvd_suspend() 334 uint32_t handle = atomic_read(&rdev->uvd.handles[i]); in radeon_uvd_free_handles() 351 atomic_set(&rdev->uvd.handles[i], 0); in radeon_uvd_free_handles() 509 if (atomic_read(&p->rdev->uvd.handles[i]) == handle) { in radeon_uvd_cs_msg() 514 if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) { in radeon_uvd_cs_msg() 535 if (atomic_read(&p->rdev->uvd.handles[i]) == handle) { in radeon_uvd_cs_msg() 550 atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0); in radeon_uvd_cs_msg() 850 if (!atomic_read(&rdev->uvd.handles[i])) in radeon_uvd_count_handles()
|
D | radeon_vce.c | 166 atomic_set(&rdev->vce.handles[i], 0); in radeon_vce_init() 204 if (atomic_read(&rdev->vce.handles[i])) in radeon_vce_suspend() 319 uint32_t handle = atomic_read(&rdev->vce.handles[i]); in radeon_vce_free_handles() 331 atomic_set(&rdev->vce.handles[i], 0); in radeon_vce_free_handles() 527 if (atomic_read(&p->rdev->vce.handles[i]) == handle) { in radeon_vce_validate_handle() 538 if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) { in radeon_vce_validate_handle() 679 atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0); in radeon_vce_cs_parse()
|
/linux-6.1.9/Documentation/devicetree/bindings/display/ti/ |
D | ti,omap5-dss.txt | 51 - clocks: handles to fclk and iclk 69 - clocks: handles to fclk and pll clock 92 - clocks: handles to fclk and pll clock
|
D | ti,omap4-dss.txt | 51 - clocks: handles to fclk and iclk 88 - clocks: handles to fclk and pll clock 111 - clocks: handles to fclk and pll clock
|
/linux-6.1.9/Documentation/admin-guide/sysctl/ |
D | fs.rst | 112 handles that the Linux kernel will allocate. When you get lots 113 of error messages about running out of file handles, you might 116 Historically,the kernel was able to allocate file handles 118 file-nr denote the number of allocated file handles, the number 119 of allocated but unused file handles, and the maximum number of 120 file handles. Linux 2.6 always reports 0 as the number of free 121 file handles -- this is not an error, it just means that the 122 number of allocated file handles exactly matches the number of 123 used file handles. 133 This denotes the maximum number of file-handles a process can [all …]
|
/linux-6.1.9/drivers/gpu/drm/v3d/ |
D | v3d_gem.c | 301 u32 *handles; in v3d_lookup_bos() local 323 handles = kvmalloc_array(job->bo_count, sizeof(u32), GFP_KERNEL); in v3d_lookup_bos() 324 if (!handles) { in v3d_lookup_bos() 330 if (copy_from_user(handles, in v3d_lookup_bos() 341 handles[i]); in v3d_lookup_bos() 344 i, handles[i]); in v3d_lookup_bos() 355 kvfree(handles); in v3d_lookup_bos() 587 u32 count, u64 handles) in v3d_get_multisync_post_deps() argument 602 post_deps = u64_to_user_ptr(handles); in v3d_get_multisync_post_deps()
|
/linux-6.1.9/drivers/platform/x86/ |
D | sony-laptop.c | 814 static struct sony_nc_handles *handles; variable 822 for (i = 0; i < ARRAY_SIZE(handles->cap); i++) { in sony_nc_handles_show() 824 handles->cap[i]); in sony_nc_handles_show() 835 handles = kzalloc(sizeof(*handles), GFP_KERNEL); in sony_nc_handles_setup() 836 if (!handles) in sony_nc_handles_setup() 839 for (i = 0; i < ARRAY_SIZE(handles->cap); i++) { in sony_nc_handles_setup() 846 handles->cap[i] = result; in sony_nc_handles_setup() 851 sysfs_attr_init(&handles->devattr.attr); in sony_nc_handles_setup() 852 handles->devattr.attr.name = "handles"; in sony_nc_handles_setup() 853 handles->devattr.attr.mode = S_IRUGO; in sony_nc_handles_setup() [all …]
|
/linux-6.1.9/Documentation/dev-tools/ |
D | kcov.rst | 229 a list of such handles to the KCOV_REMOTE_ENABLE ioctl in the handles 231 device to the code sections, that are referenced by those handles. 241 Internally kcov stores handles as u64 integers. The top byte of a handle 245 handles as they don't belong to a particular subsystem. The bytes 4-7 are 252 task_struct. However non common handles allow to collect coverage 264 __aligned_u64 handles[0]; 316 arg->handles[0] = kcov_remote_handle(KCOV_SUBSYSTEM_USB,
|
/linux-6.1.9/drivers/gpu/drm/panfrost/ |
D | panfrost_drv.c | 198 u32 *handles; in panfrost_copy_in_sync() local 207 handles = kvmalloc_array(in_fence_count, sizeof(u32), GFP_KERNEL); in panfrost_copy_in_sync() 208 if (!handles) { in panfrost_copy_in_sync() 214 if (copy_from_user(handles, in panfrost_copy_in_sync() 225 ret = drm_syncobj_find_fence(file_priv, handles[i], 0, 0, in panfrost_copy_in_sync() 237 kvfree(handles); in panfrost_copy_in_sync()
|
/linux-6.1.9/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_vce.c | 197 atomic_set(&adev->vce.handles[i], 0); in amdgpu_vce_sw_init() 275 if (atomic_read(&adev->vce.handles[i])) in amdgpu_vce_suspend() 419 uint32_t handle = atomic_read(&adev->vce.handles[i]); in amdgpu_vce_free_handles() 429 atomic_set(&adev->vce.handles[i], 0); in amdgpu_vce_free_handles() 706 if (atomic_read(&p->adev->vce.handles[i]) == handle) { in amdgpu_vce_validate_handle() 717 if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) { in amdgpu_vce_validate_handle() 962 atomic_set(&p->adev->vce.handles[i], 0); in amdgpu_vce_ring_parse_cs() 1054 atomic_set(&p->adev->vce.handles[i], 0); in amdgpu_vce_ring_parse_cs_vm()
|
D | amdgpu_uvd.c | 343 atomic_set(&adev->uvd.handles[i], 0); in amdgpu_uvd_sw_init() 438 if (atomic_read(&adev->uvd.handles[i])) in amdgpu_uvd_suspend() 526 uint32_t handle = atomic_read(&adev->uvd.handles[i]); in amdgpu_uvd_free_handles() 542 atomic_set(&adev->uvd.handles[i], 0); in amdgpu_uvd_free_handles() 850 if (atomic_read(&adev->uvd.handles[i]) == handle) { in amdgpu_uvd_cs_msg() 856 if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { in amdgpu_uvd_cs_msg() 874 if (atomic_read(&adev->uvd.handles[i]) == handle) { in amdgpu_uvd_cs_msg() 889 atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); in amdgpu_uvd_cs_msg() 1386 if (atomic_read(&adev->uvd.handles[i])) in amdgpu_uvd_used_handles()
|
/linux-6.1.9/tools/testing/selftests/tpm2/ |
D | tpm2.py | 690 handles = [] 705 handles.append(handle) 708 return handles, more_data 711 handles = [] 716 handles += next_handles 719 return handles
|
D | tpm2_tests.py | 276 handles = space2.get_cap(tpm2.TPM2_CAP_HANDLES, tpm2.HR_TRANSIENT) 278 self.assertEqual(len(handles), 2) 280 log.debug("%08x" % (handles[0])) 281 log.debug("%08x" % (handles[1]))
|
/linux-6.1.9/Documentation/core-api/ |
D | entry.rst | 93 syscall_exit_to_user_mode() handles all work which needs to be done before 95 that it invokes exit_to_user_mode() which again handles the state 168 return true, handles NOHZ tick state and interrupt time accounting. This 172 irq_exit_rcu() handles interrupt time accounting, undoes the preemption 173 count update and eventually handles soft interrupts and NOHZ tick state. 183 before it handles soft interrupts, whose handlers must run in BH context rather
|