Home
last modified time | relevance | path

Searched refs:chunks (Results 1 – 25 of 144) sorted by relevance

123456

/linux-5.19.10/arch/mips/ar7/
Dprom.c149 struct psp_env_chunk *chunks = (struct psp_env_chunk *)psp_env_data; in parse_psp_env() local
151 memcpy_fromio(chunks, psp_env_base, PSP_ENV_SIZE); in parse_psp_env()
156 if ((chunks[i].num == 0xff) || ((i + chunks[i].len) > n)) in parse_psp_env()
158 value = chunks[i].data; in parse_psp_env()
159 if (chunks[i].num) { in parse_psp_env()
160 name = lookup_psp_var_map(chunks[i].num); in parse_psp_env()
167 i += chunks[i].len; in parse_psp_env()
/linux-5.19.10/drivers/gpu/drm/radeon/
Dradeon_cs.c295 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks); in radeon_cs_parser_init()
302 p->chunks = kvcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL); in radeon_cs_parser_init()
303 if (p->chunks == NULL) { in radeon_cs_parser_init()
316 p->chunks[i].length_dw = user_chunk.length_dw; in radeon_cs_parser_init()
318 p->chunk_relocs = &p->chunks[i]; in radeon_cs_parser_init()
321 p->chunk_ib = &p->chunks[i]; in radeon_cs_parser_init()
323 if (p->chunks[i].length_dw == 0) in radeon_cs_parser_init()
327 p->chunk_const_ib = &p->chunks[i]; in radeon_cs_parser_init()
329 if (p->chunks[i].length_dw == 0) in radeon_cs_parser_init()
333 p->chunk_flags = &p->chunks[i]; in radeon_cs_parser_init()
[all …]
/linux-5.19.10/drivers/net/ethernet/netronome/nfp/nfpcore/
Dnfp_nsp.c504 } *chunks; in nfp_nsp_command_buf_dma_sg() local
516 chunks = kcalloc(nseg, sizeof(*chunks), GFP_KERNEL); in nfp_nsp_command_buf_dma_sg()
517 if (!chunks) in nfp_nsp_command_buf_dma_sg()
525 chunks[i].chunk = kmalloc(chunk_size, in nfp_nsp_command_buf_dma_sg()
527 if (!chunks[i].chunk) in nfp_nsp_command_buf_dma_sg()
530 chunks[i].len = min_t(u64, chunk_size, max_size - off); in nfp_nsp_command_buf_dma_sg()
535 memcpy(chunks[i].chunk, arg->in_buf + off, coff); in nfp_nsp_command_buf_dma_sg()
537 memset(chunks[i].chunk + coff, 0, chunk_size - coff); in nfp_nsp_command_buf_dma_sg()
539 off += chunks[i].len; in nfp_nsp_command_buf_dma_sg()
547 addr = dma_map_single(dev, chunks[i].chunk, chunks[i].len, in nfp_nsp_command_buf_dma_sg()
[all …]
/linux-5.19.10/drivers/comedi/drivers/ni_routing/tools/
Dconvert_csv_to_c.py228 chunks = [ self.output_file_top,
244 chunks.append('\t&{},'.format(dev_table_name))
273 chunks.append('\tNULL,') # terminate list
274 chunks.append('};')
275 return '\n'.join(chunks)
416 chunks = [ self.output_file_top,
432 chunks.append('\t&{},'.format(fam_table_name))
462 chunks.append('\tNULL,') # terminate list
463 chunks.append('};')
464 return '\n'.join(chunks)
/linux-5.19.10/scripts/gdb/linux/
Dtimerlist.py162 chunks = []
168 chunks.append(buf[start:end])
170 chunks.append(',')
174 chunks[0] = chunks[0][0] # Cut off the first 0
176 return "".join(chunks)
/linux-5.19.10/drivers/infiniband/hw/usnic/
Dusnic_vnic.c44 struct usnic_vnic_res_chunk chunks[USNIC_VNIC_RES_TYPE_MAX]; member
117 for (i = 0; i < ARRAY_SIZE(vnic->chunks); i++) { in usnic_vnic_dump()
118 chunk = &vnic->chunks[i]; in usnic_vnic_dump()
222 return vnic->chunks[type].cnt; in usnic_vnic_res_cnt()
228 return vnic->chunks[type].free_cnt; in usnic_vnic_res_free_cnt()
254 src = &vnic->chunks[type]; in usnic_vnic_get_resources()
286 vnic->chunks[res->type].free_cnt++; in usnic_vnic_put_resources()
382 &vnic->chunks[res_type]); in usnic_vnic_discover_resources()
391 usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); in usnic_vnic_discover_resources()
427 usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); in usnic_vnic_release_resources()
/linux-5.19.10/arch/x86/kernel/cpu/resctrl/
Dmonitor.c282 u64 shift = 64 - width, chunks; in mbm_overflow_count() local
284 chunks = (cur_msr << shift) - (prev_msr << shift); in mbm_overflow_count()
285 return chunks >> shift; in mbm_overflow_count()
292 u64 chunks, tval; in __mon_event_count() local
322 chunks = mbm_overflow_count(m->prev_msr, tval, hw_res->mbm_width); in __mon_event_count()
323 m->chunks += chunks; in __mon_event_count()
326 rr->val += get_corrected_mbm_count(rmid, m->chunks); in __mon_event_count()
339 u64 tval, cur_bw, chunks; in mbm_bw_count() local
345 chunks = mbm_overflow_count(m->prev_bw_msr, tval, hw_res->mbm_width); in mbm_bw_count()
346 cur_bw = (get_corrected_mbm_count(rmid, chunks) * hw_res->mon_scale) >> 20; in mbm_bw_count()
/linux-5.19.10/net/xdp/
Dxdp_umem.c158 unsigned int chunks, chunks_rem; in xdp_umem_reg() local
193 chunks = (unsigned int)div_u64_rem(size, chunk_size, &chunks_rem); in xdp_umem_reg()
194 if (chunks == 0) in xdp_umem_reg()
206 umem->chunks = chunks; in xdp_umem_reg()
/linux-5.19.10/lib/
Dgenalloc.c160 INIT_LIST_HEAD(&pool->chunks); in gen_pool_create()
203 list_add_rcu(&chunk->next_chunk, &pool->chunks); in gen_pool_add_owner()
223 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_virt_to_phys()
249 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { in gen_pool_destroy()
297 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_alloc_algo_owner()
503 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_free_owner()
538 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) in gen_pool_for_each_chunk()
561 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) { in gen_pool_has_addr()
586 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) in gen_pool_avail()
605 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) in gen_pool_size()
/linux-5.19.10/tools/testing/selftests/drivers/net/mlxsw/spectrum/
Ddevlink_lib_spectrum.sh90 devlink_resource_size_set 32000 kvd linear chunks
99 devlink_resource_size_set 32000 kvd linear chunks
108 devlink_resource_size_set 49152 kvd linear chunks
/linux-5.19.10/drivers/gpu/drm/amd/amdgpu/
Damdgpu_cs.c140 chunk_array_user = u64_to_user_ptr(cs->in.chunks); in amdgpu_cs_parser_init()
148 p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), in amdgpu_cs_parser_init()
150 if (!p->chunks) { in amdgpu_cs_parser_init()
167 p->chunks[i].chunk_id = user_chunk.chunk_id; in amdgpu_cs_parser_init()
168 p->chunks[i].length_dw = user_chunk.length_dw; in amdgpu_cs_parser_init()
170 size = p->chunks[i].length_dw; in amdgpu_cs_parser_init()
173 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); in amdgpu_cs_parser_init()
174 if (p->chunks[i].kdata == NULL) { in amdgpu_cs_parser_init()
180 if (copy_from_user(p->chunks[i].kdata, cdata, size)) { in amdgpu_cs_parser_init()
185 switch (p->chunks[i].chunk_id) { in amdgpu_cs_parser_init()
[all …]
/linux-5.19.10/net/sctp/
Dchunk.c43 INIT_LIST_HEAD(&msg->chunks); in sctp_datamsg_init()
65 list_for_each_entry(chunk, &msg->chunks, frag_list) in sctp_datamsg_free()
81 list_for_each_safe(pos, temp, &msg->chunks) { in sctp_datamsg_destroy()
280 list_add_tail(&chunk->frag_list, &msg->chunks); in sctp_datamsg_from_user()
289 list_for_each_safe(pos, temp, &msg->chunks) { in sctp_datamsg_from_user()
Dauth.c186 struct sctp_chunks_param *chunks, in sctp_auth_make_key_vector() argument
197 if (chunks) in sctp_auth_make_key_vector()
198 chunks_len = ntohs(chunks->param_hdr.length); in sctp_auth_make_key_vector()
209 if (chunks) { in sctp_auth_make_key_vector()
210 memcpy(new->data + offset, chunks, chunks_len); in sctp_auth_make_key_vector()
656 switch (param->chunks[i]) { in __sctp_auth_cid()
664 if (param->chunks[i] == chunk) in __sctp_auth_cid()
772 p->chunks[nchunks] = chunk_id; in sctp_auth_ep_add_chunkid()
/linux-5.19.10/kernel/
Daudit_tree.c17 struct list_head chunks; member
101 INIT_LIST_HEAD(&tree->chunks); in alloc_tree()
435 list_add(&chunk->owners[0].list, &tree->chunks); in create_chunk()
507 list_add(&p->list, &tree->chunks); in tag_chunk()
572 while (!list_empty(&victim->chunks)) { in prune_tree_chunks()
577 p = list_first_entry(&victim->chunks, struct audit_node, list); in prune_tree_chunks()
618 for (p = tree->chunks.next; p != &tree->chunks; p = q) { in trim_marked()
623 list_add(p, &tree->chunks); in trim_marked()
705 list_for_each_entry(node, &tree->chunks, list) { in audit_trim_trees()
845 list_for_each_entry(node, &tree->chunks, list) in audit_add_tree_rule()
[all …]
/linux-5.19.10/drivers/infiniband/hw/efa/
Defa_verbs.c105 struct pbl_chunk *chunks; member
1268 chunk_list->chunks = kcalloc(chunk_list_size, in pbl_chunk_list_create()
1269 sizeof(*chunk_list->chunks), in pbl_chunk_list_create()
1271 if (!chunk_list->chunks) in pbl_chunk_list_create()
1280 chunk_list->chunks[i].buf = kzalloc(EFA_CHUNK_SIZE, GFP_KERNEL); in pbl_chunk_list_create()
1281 if (!chunk_list->chunks[i].buf) in pbl_chunk_list_create()
1284 chunk_list->chunks[i].length = EFA_CHUNK_USED_SIZE; in pbl_chunk_list_create()
1286 chunk_list->chunks[chunk_list_size - 1].length = in pbl_chunk_list_create()
1293 cur_chunk_buf = chunk_list->chunks[0].buf; in pbl_chunk_list_create()
1301 cur_chunk_buf = chunk_list->chunks[chunk_idx].buf; in pbl_chunk_list_create()
[all …]
/linux-5.19.10/mm/
Dzbud.c271 int chunks, i, freechunks; in zbud_alloc() local
280 chunks = size_to_chunks(size); in zbud_alloc()
284 for_each_unbuddied_list(i, chunks) { in zbud_alloc()
309 zhdr->first_chunks = chunks; in zbud_alloc()
311 zhdr->last_chunks = chunks; in zbud_alloc()
Dz3fold.c616 static inline enum buddy get_free_buddy(struct z3fold_header *zhdr, int chunks) in get_free_buddy() argument
622 chunks <= zhdr->start_middle - ZHDR_CHUNKS) in get_free_buddy()
689 short chunks = size_to_chunks(sz); in compact_single_buddy() local
699 new_bud = get_free_buddy(new_zhdr, chunks); in compact_single_buddy()
703 new_zhdr->first_chunks = chunks; in compact_single_buddy()
707 new_zhdr->middle_chunks = chunks; in compact_single_buddy()
713 new_zhdr->last_chunks = chunks; in compact_single_buddy()
851 int chunks = size_to_chunks(size), i; in __z3fold_alloc() local
857 for_each_unbuddied_list(i, chunks) { in __z3fold_alloc()
913 l = &unbuddied[chunks]; in __z3fold_alloc()
[all …]
/linux-5.19.10/drivers/infiniband/ulp/rtrs/
DREADME28 session. A session is associated with a set of memory chunks reserved on the
36 chunks reserved for him on the server side. Their number, size and addresses
45 which of the memory chunks has been accessed and at which offset the message
80 the server (number of memory chunks which are going to be allocated for that
122 1. When processing a write request client selects one of the memory chunks
139 1. When processing a write request client selects one of the memory chunks
144 using the IMM field, Server invalidate rkey associated to the memory chunks
162 1. When processing a read request client selects one of the memory chunks
181 1. When processing a read request client selects one of the memory chunks
186 Server invalidate rkey associated to the memory chunks first, when it finishes,
Drtrs-srv.c603 int nr, nr_sgt, chunks; in map_cont_bufs() local
605 chunks = chunks_per_mr * mri; in map_cont_bufs()
608 srv->queue_depth - chunks); in map_cont_bufs()
615 sg_set_page(s, srv->chunks[chunks + i], in map_cont_bufs()
650 srv_path->dma_addr[chunks + i] = sg_dma_address(s); in map_cont_bufs()
1031 data = page_address(srv->chunks[buf_id]); in process_read()
1084 data = page_address(srv->chunks[buf_id]); in process_write()
1159 data = page_address(srv->chunks[msg_id]) + off; in rtrs_srv_inv_rkey_done()
1261 data = page_address(srv->chunks[msg_id]) + off; in rtrs_srv_rdma_done()
1361 mempool_free(srv->chunks[i], chunk_pool); in free_srv()
[all …]
/linux-5.19.10/drivers/md/
Dmd-bitmap.c777 unsigned long chunks, int with_super, in md_bitmap_storage_alloc() argument
784 bytes = DIV_ROUND_UP(chunks, 8); in md_bitmap_storage_alloc()
1057 unsigned long i, chunks, index, oldindex, bit, node_offset = 0; in md_bitmap_init_from_disk() local
1067 chunks = bitmap->counts.chunks; in md_bitmap_init_from_disk()
1074 for (i = 0; i < chunks ; i++) { in md_bitmap_init_from_disk()
1105 for (i = 0; i < chunks; i++) { in md_bitmap_init_from_disk()
1169 bit_cnt, chunks); in md_bitmap_init_from_disk()
1289 for (j = 0; j < counts->chunks; j++) { in md_bitmap_daemon_work()
1996 for (j = 0; j < counts->chunks; j++) { in md_bitmap_copy_from_slot()
2069 unsigned long chunks; in md_bitmap_resize() local
[all …]
/linux-5.19.10/drivers/net/wireless/ti/wlcore/
Dboot.c240 u32 chunks, addr, len; in wlcore_boot_upload_firmware() local
245 chunks = be32_to_cpup((__be32 *) fw); in wlcore_boot_upload_firmware()
248 wl1271_debug(DEBUG_BOOT, "firmware chunks to be uploaded: %u", chunks); in wlcore_boot_upload_firmware()
250 while (chunks--) { in wlcore_boot_upload_firmware()
261 chunks, addr, len); in wlcore_boot_upload_firmware()
/linux-5.19.10/drivers/gpu/drm/sprd/
Dsprd_dsi.c463 u32 chunks = 0; in sprd_dsi_dpi_video() local
542 chunks = vm->hactive / video_size; in sprd_dsi_dpi_video()
545 if (total_bytes >= (bytes_per_chunk * chunks)) { in sprd_dsi_dpi_video()
547 bytes_per_chunk * chunks; in sprd_dsi_dpi_video()
553 if (bytes_left > (pkt_header * chunks)) { in sprd_dsi_dpi_video()
555 pkt_header * chunks) / chunks; in sprd_dsi_dpi_video()
563 chunks = 1; in sprd_dsi_dpi_video()
574 dsi_reg_wr(ctx, VIDEO_PKT_CONFIG, VIDEO_LINE_CHUNK_NUM, 16, chunks); in sprd_dsi_dpi_video()
/linux-5.19.10/drivers/virt/vboxguest/
Dvboxguest_core.c356 u32 i, chunks; in vbg_balloon_work() local
384 chunks = req->balloon_chunks; in vbg_balloon_work()
385 if (chunks > gdev->mem_balloon.max_chunks) { in vbg_balloon_work()
387 __func__, chunks, gdev->mem_balloon.max_chunks); in vbg_balloon_work()
391 if (chunks > gdev->mem_balloon.chunks) { in vbg_balloon_work()
393 for (i = gdev->mem_balloon.chunks; i < chunks; i++) { in vbg_balloon_work()
398 gdev->mem_balloon.chunks++; in vbg_balloon_work()
402 for (i = gdev->mem_balloon.chunks; i-- > chunks;) { in vbg_balloon_work()
407 gdev->mem_balloon.chunks--; in vbg_balloon_work()
1641 balloon_info->u.out.balloon_chunks = gdev->mem_balloon.chunks; in vbg_ioctl_check_balloon()
/linux-5.19.10/Documentation/admin-guide/device-mapper/
Dstriped.rst6 device across one or more underlying devices. Data is written in "chunks",
7 with consecutive chunks rotating among the underlying devices. This can
/linux-5.19.10/drivers/dma/sh/
Dshdma-base.c97 if (chunk->chunks == 1) { in shdma_tx_submit()
356 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { in __ld_cleanup()
372 BUG_ON(desc->chunks != 1); in __ld_cleanup()
567 int chunks = 0; in shdma_prep_sg() local
572 chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len); in shdma_prep_sg()
612 new->chunks = 1; in shdma_prep_sg()
614 new->chunks = chunks--; in shdma_prep_sg()

123456