/linux-6.6.21/arch/mips/ar7/ |
D | prom.c | 149 struct psp_env_chunk *chunks = (struct psp_env_chunk *)psp_env_data; in parse_psp_env() local 151 memcpy_fromio(chunks, psp_env_base, PSP_ENV_SIZE); in parse_psp_env() 156 if ((chunks[i].num == 0xff) || ((i + chunks[i].len) > n)) in parse_psp_env() 158 value = chunks[i].data; in parse_psp_env() 159 if (chunks[i].num) { in parse_psp_env() 160 name = lookup_psp_var_map(chunks[i].num); in parse_psp_env() 167 i += chunks[i].len; in parse_psp_env()
|
/linux-6.6.21/drivers/gpu/drm/radeon/ |
D | radeon_cs.c | 296 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks); in radeon_cs_parser_init() 303 p->chunks = kvcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL); in radeon_cs_parser_init() 304 if (p->chunks == NULL) { in radeon_cs_parser_init() 317 p->chunks[i].length_dw = user_chunk.length_dw; in radeon_cs_parser_init() 319 p->chunk_relocs = &p->chunks[i]; in radeon_cs_parser_init() 322 p->chunk_ib = &p->chunks[i]; in radeon_cs_parser_init() 324 if (p->chunks[i].length_dw == 0) in radeon_cs_parser_init() 328 p->chunk_const_ib = &p->chunks[i]; in radeon_cs_parser_init() 330 if (p->chunks[i].length_dw == 0) in radeon_cs_parser_init() 334 p->chunk_flags = &p->chunks[i]; in radeon_cs_parser_init() [all …]
|
/linux-6.6.21/drivers/comedi/drivers/ni_routing/tools/ |
D | convert_csv_to_c.py | 228 chunks = [ self.output_file_top, 244 chunks.append('\t&{},'.format(dev_table_name)) 273 chunks.append('\tNULL,') # terminate list 274 chunks.append('};') 275 return '\n'.join(chunks) 416 chunks = [ self.output_file_top, 432 chunks.append('\t&{},'.format(fam_table_name)) 462 chunks.append('\tNULL,') # terminate list 463 chunks.append('};') 464 return '\n'.join(chunks)
|
/linux-6.6.21/drivers/net/ethernet/netronome/nfp/nfpcore/ |
D | nfp_nsp.c | 505 } *chunks; in nfp_nsp_command_buf_dma_sg() local 517 chunks = kcalloc(nseg, sizeof(*chunks), GFP_KERNEL); in nfp_nsp_command_buf_dma_sg() 518 if (!chunks) in nfp_nsp_command_buf_dma_sg() 526 chunks[i].chunk = kmalloc(chunk_size, in nfp_nsp_command_buf_dma_sg() 528 if (!chunks[i].chunk) in nfp_nsp_command_buf_dma_sg() 531 chunks[i].len = min_t(u64, chunk_size, max_size - off); in nfp_nsp_command_buf_dma_sg() 536 memcpy(chunks[i].chunk, arg->in_buf + off, coff); in nfp_nsp_command_buf_dma_sg() 538 memset(chunks[i].chunk + coff, 0, chunk_size - coff); in nfp_nsp_command_buf_dma_sg() 540 off += chunks[i].len; in nfp_nsp_command_buf_dma_sg() 548 addr = dma_map_single(dev, chunks[i].chunk, chunks[i].len, in nfp_nsp_command_buf_dma_sg() [all …]
|
/linux-6.6.21/scripts/gdb/linux/ |
D | timerlist.py | 163 chunks = [] 169 chunks.append(buf[start:end]) 171 chunks.append(',') 175 chunks[0] = chunks[0][0] # Cut off the first 0 177 return "".join(str(chunks))
|
/linux-6.6.21/net/xdp/ |
D | xdp_umem.c | 157 u64 chunks, npgs; in xdp_umem_reg() local 192 chunks = div_u64_rem(size, chunk_size, &chunks_rem); in xdp_umem_reg() 193 if (!chunks || chunks > U32_MAX) in xdp_umem_reg() 205 umem->chunks = chunks; in xdp_umem_reg()
|
/linux-6.6.21/drivers/infiniband/hw/usnic/ |
D | usnic_vnic.c | 44 struct usnic_vnic_res_chunk chunks[USNIC_VNIC_RES_TYPE_MAX]; member 117 for (i = 0; i < ARRAY_SIZE(vnic->chunks); i++) { in usnic_vnic_dump() 118 chunk = &vnic->chunks[i]; in usnic_vnic_dump() 222 return vnic->chunks[type].cnt; in usnic_vnic_res_cnt() 228 return vnic->chunks[type].free_cnt; in usnic_vnic_res_free_cnt() 254 src = &vnic->chunks[type]; in usnic_vnic_get_resources() 286 vnic->chunks[res->type].free_cnt++; in usnic_vnic_put_resources() 382 &vnic->chunks[res_type]); in usnic_vnic_discover_resources() 391 usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); in usnic_vnic_discover_resources() 427 usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); in usnic_vnic_release_resources()
|
/linux-6.6.21/mm/ |
D | zbud.c | 249 int chunks, i, freechunks; in zbud_alloc() local 258 chunks = size_to_chunks(size); in zbud_alloc() 262 for_each_unbuddied_list(i, chunks) { in zbud_alloc() 287 zhdr->first_chunks = chunks; in zbud_alloc() 289 zhdr->last_chunks = chunks; in zbud_alloc()
|
D | z3fold.c | 556 static inline enum buddy get_free_buddy(struct z3fold_header *zhdr, int chunks) in get_free_buddy() argument 562 chunks <= zhdr->start_middle - ZHDR_CHUNKS) in get_free_buddy() 629 short chunks = size_to_chunks(sz); in compact_single_buddy() local 639 new_bud = get_free_buddy(new_zhdr, chunks); in compact_single_buddy() 643 new_zhdr->first_chunks = chunks; in compact_single_buddy() 647 new_zhdr->middle_chunks = chunks; in compact_single_buddy() 653 new_zhdr->last_chunks = chunks; in compact_single_buddy() 791 int chunks = size_to_chunks(size), i; in __z3fold_alloc() local 797 for_each_unbuddied_list(i, chunks) { in __z3fold_alloc() 853 l = &unbuddied[chunks]; in __z3fold_alloc() [all …]
|
/linux-6.6.21/lib/ |
D | genalloc.c | 160 INIT_LIST_HEAD(&pool->chunks); in gen_pool_create() 203 list_add_rcu(&chunk->next_chunk, &pool->chunks); in gen_pool_add_owner() 223 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_virt_to_phys() 249 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { in gen_pool_destroy() 297 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_alloc_algo_owner() 503 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_free_owner() 538 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) in gen_pool_for_each_chunk() 561 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) { in gen_pool_has_addr() 586 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) in gen_pool_avail() 605 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) in gen_pool_size()
|
/linux-6.6.21/tools/testing/selftests/drivers/net/mlxsw/spectrum/ |
D | devlink_lib_spectrum.sh | 90 devlink_resource_size_set 32000 kvd linear chunks 99 devlink_resource_size_set 32000 kvd linear chunks 108 devlink_resource_size_set 49152 kvd linear chunks
|
/linux-6.6.21/arch/x86/kernel/cpu/resctrl/ |
D | monitor.c | 226 u64 shift = 64 - width, chunks; in mbm_overflow_count() local 228 chunks = (cur_msr << shift) - (prev_msr << shift); in mbm_overflow_count() 229 return chunks >> shift; in mbm_overflow_count() 238 u64 msr_val, chunks; in resctrl_arch_rmid_read() local 250 am->chunks += mbm_overflow_count(am->prev_msr, msr_val, in resctrl_arch_rmid_read() 252 chunks = get_corrected_mbm_count(rmid, am->chunks); in resctrl_arch_rmid_read() 255 chunks = msr_val; in resctrl_arch_rmid_read() 258 *val = chunks * hw_res->mon_scale; in resctrl_arch_rmid_read()
|
/linux-6.6.21/net/sctp/ |
D | chunk.c | 43 INIT_LIST_HEAD(&msg->chunks); in sctp_datamsg_init() 65 list_for_each_entry(chunk, &msg->chunks, frag_list) in sctp_datamsg_free() 81 list_for_each_safe(pos, temp, &msg->chunks) { in sctp_datamsg_destroy() 280 list_add_tail(&chunk->frag_list, &msg->chunks); in sctp_datamsg_from_user() 289 list_for_each_safe(pos, temp, &msg->chunks) { in sctp_datamsg_from_user()
|
D | auth.c | 186 struct sctp_chunks_param *chunks, in sctp_auth_make_key_vector() argument 197 if (chunks) in sctp_auth_make_key_vector() 198 chunks_len = ntohs(chunks->param_hdr.length); in sctp_auth_make_key_vector() 209 if (chunks) { in sctp_auth_make_key_vector() 210 memcpy(new->data + offset, chunks, chunks_len); in sctp_auth_make_key_vector() 656 switch (param->chunks[i]) { in __sctp_auth_cid() 664 if (param->chunks[i] == chunk) in __sctp_auth_cid() 772 p->chunks[nchunks] = chunk_id; in sctp_auth_ep_add_chunkid()
|
/linux-6.6.21/kernel/ |
D | audit_tree.c | 17 struct list_head chunks; member 101 INIT_LIST_HEAD(&tree->chunks); in alloc_tree() 435 list_add(&chunk->owners[0].list, &tree->chunks); in create_chunk() 507 list_add(&p->list, &tree->chunks); in tag_chunk() 572 while (!list_empty(&victim->chunks)) { in prune_tree_chunks() 577 p = list_first_entry(&victim->chunks, struct audit_node, list); in prune_tree_chunks() 618 for (p = tree->chunks.next; p != &tree->chunks; p = q) { in trim_marked() 623 list_add(p, &tree->chunks); in trim_marked() 705 list_for_each_entry(node, &tree->chunks, list) { in audit_trim_trees() 845 list_for_each_entry(node, &tree->chunks, list) in audit_add_tree_rule() [all …]
|
/linux-6.6.21/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_cs.c | 194 chunk_array_user = u64_to_user_ptr(cs->in.chunks); in amdgpu_cs_pass1() 202 p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), in amdgpu_cs_pass1() 204 if (!p->chunks) { in amdgpu_cs_pass1() 221 p->chunks[i].chunk_id = user_chunk.chunk_id; in amdgpu_cs_pass1() 222 p->chunks[i].length_dw = user_chunk.length_dw; in amdgpu_cs_pass1() 224 size = p->chunks[i].length_dw; in amdgpu_cs_pass1() 227 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), in amdgpu_cs_pass1() 229 if (p->chunks[i].kdata == NULL) { in amdgpu_cs_pass1() 235 if (copy_from_user(p->chunks[i].kdata, cdata, size)) { in amdgpu_cs_pass1() 242 switch (p->chunks[i].chunk_id) { in amdgpu_cs_pass1() [all …]
|
/linux-6.6.21/drivers/infiniband/ulp/rtrs/ |
D | README | 28 session. A session is associated with a set of memory chunks reserved on the 36 chunks reserved for him on the server side. Their number, size and addresses 45 which of the memory chunks has been accessed and at which offset the message 80 the server (number of memory chunks which are going to be allocated for that 122 1. When processing a write request client selects one of the memory chunks 139 1. When processing a write request client selects one of the memory chunks 144 using the IMM field, Server invalidate rkey associated to the memory chunks 162 1. When processing a read request client selects one of the memory chunks 181 1. When processing a read request client selects one of the memory chunks 186 Server invalidate rkey associated to the memory chunks first, when it finishes,
|
D | rtrs-srv.c | 601 int nr, nr_sgt, chunks; in map_cont_bufs() local 604 chunks = chunks_per_mr * srv_path->mrs_num; in map_cont_bufs() 607 srv->queue_depth - chunks); in map_cont_bufs() 614 sg_set_page(s, srv->chunks[chunks + i], in map_cont_bufs() 649 srv_path->dma_addr[chunks + i] = sg_dma_address(s); in map_cont_bufs() 1035 data = page_address(srv->chunks[buf_id]); in process_read() 1088 data = page_address(srv->chunks[buf_id]); in process_write() 1163 data = page_address(srv->chunks[msg_id]) + off; in rtrs_srv_inv_rkey_done() 1265 data = page_address(srv->chunks[msg_id]) + off; in rtrs_srv_rdma_done() 1365 __free_pages(srv->chunks[i], get_order(max_chunk_size)); in free_srv() [all …]
|
/linux-6.6.21/drivers/infiniband/hw/efa/ |
D | efa_verbs.c | 110 struct pbl_chunk *chunks; member 1285 chunk_list->chunks = kcalloc(chunk_list_size, in pbl_chunk_list_create() 1286 sizeof(*chunk_list->chunks), in pbl_chunk_list_create() 1288 if (!chunk_list->chunks) in pbl_chunk_list_create() 1297 chunk_list->chunks[i].buf = kzalloc(EFA_CHUNK_SIZE, GFP_KERNEL); in pbl_chunk_list_create() 1298 if (!chunk_list->chunks[i].buf) in pbl_chunk_list_create() 1301 chunk_list->chunks[i].length = EFA_CHUNK_USED_SIZE; in pbl_chunk_list_create() 1303 chunk_list->chunks[chunk_list_size - 1].length = in pbl_chunk_list_create() 1310 cur_chunk_buf = chunk_list->chunks[0].buf; in pbl_chunk_list_create() 1318 cur_chunk_buf = chunk_list->chunks[chunk_idx].buf; in pbl_chunk_list_create() [all …]
|
/linux-6.6.21/drivers/net/wireless/intel/iwlwifi/pcie/ |
D | ctxt-info-gen3.c | 296 len0 = pnvm_data->chunks[0].len; in iwl_pcie_load_payloads_continuously() 297 len1 = pnvm_data->chunks[1].len; in iwl_pcie_load_payloads_continuously() 312 memcpy(dram->block, pnvm_data->chunks[0].data, len0); in iwl_pcie_load_payloads_continuously() 313 memcpy((u8 *)dram->block + len0, pnvm_data->chunks[1].data, len1); in iwl_pcie_load_payloads_continuously() 346 len = pnvm_data->chunks[i].len; in iwl_pcie_load_payloads_segments() 347 data = pnvm_data->chunks[i].data; in iwl_pcie_load_payloads_segments()
|
/linux-6.6.21/drivers/md/ |
D | md-bitmap.c | 810 unsigned long chunks, int with_super, in md_bitmap_storage_alloc() argument 817 bytes = DIV_ROUND_UP(chunks, 8); in md_bitmap_storage_alloc() 1109 unsigned long chunks = bitmap->counts.chunks; in md_bitmap_init_from_disk() local 1121 for (i = 0; i < chunks ; i++) { in md_bitmap_init_from_disk() 1191 for (i = 0; i < chunks; i++) { in md_bitmap_init_from_disk() 1217 bit_cnt, chunks); in md_bitmap_init_from_disk() 1354 for (j = 0; j < counts->chunks; j++) { in md_bitmap_daemon_work() 2066 for (j = 0; j < counts->chunks; j++) { in md_bitmap_copy_from_slot() 2139 unsigned long chunks; in md_bitmap_resize() local 2163 bytes = DIV_ROUND_UP(bitmap->counts.chunks, 8); in md_bitmap_resize() [all …]
|
/linux-6.6.21/tools/testing/selftests/bpf/ |
D | generate_udp_fragments.py | 46 chunks = [frag[i : i + 10] for i in range(0, len(frag), 10)] 47 chunks_fmted = [", ".join([str(hex(b)) for b in chunk]) for chunk in chunks]
|
/linux-6.6.21/drivers/net/wireless/ti/wlcore/ |
D | boot.c | 240 u32 chunks, addr, len; in wlcore_boot_upload_firmware() local 245 chunks = be32_to_cpup((__be32 *) fw); in wlcore_boot_upload_firmware() 248 wl1271_debug(DEBUG_BOOT, "firmware chunks to be uploaded: %u", chunks); in wlcore_boot_upload_firmware() 250 while (chunks--) { in wlcore_boot_upload_firmware() 261 chunks, addr, len); in wlcore_boot_upload_firmware()
|
/linux-6.6.21/drivers/gpu/drm/sprd/ |
D | sprd_dsi.c | 460 u32 chunks = 0; in sprd_dsi_dpi_video() local 539 chunks = vm->hactive / video_size; in sprd_dsi_dpi_video() 542 if (total_bytes >= (bytes_per_chunk * chunks)) { in sprd_dsi_dpi_video() 544 bytes_per_chunk * chunks; in sprd_dsi_dpi_video() 550 if (bytes_left > (pkt_header * chunks)) { in sprd_dsi_dpi_video() 552 pkt_header * chunks) / chunks; in sprd_dsi_dpi_video() 560 chunks = 1; in sprd_dsi_dpi_video() 571 dsi_reg_wr(ctx, VIDEO_PKT_CONFIG, VIDEO_LINE_CHUNK_NUM, 16, chunks); in sprd_dsi_dpi_video()
|
/linux-6.6.21/drivers/virt/vboxguest/ |
D | vboxguest_core.c | 356 u32 i, chunks; in vbg_balloon_work() local 384 chunks = req->balloon_chunks; in vbg_balloon_work() 385 if (chunks > gdev->mem_balloon.max_chunks) { in vbg_balloon_work() 387 __func__, chunks, gdev->mem_balloon.max_chunks); in vbg_balloon_work() 391 if (chunks > gdev->mem_balloon.chunks) { in vbg_balloon_work() 393 for (i = gdev->mem_balloon.chunks; i < chunks; i++) { in vbg_balloon_work() 398 gdev->mem_balloon.chunks++; in vbg_balloon_work() 402 for (i = gdev->mem_balloon.chunks; i-- > chunks;) { in vbg_balloon_work() 407 gdev->mem_balloon.chunks--; in vbg_balloon_work() 1641 balloon_info->u.out.balloon_chunks = gdev->mem_balloon.chunks; in vbg_ioctl_check_balloon()
|