/linux-6.1.9/kernel/bpf/ |
D | queue_stack_maps.c | 54 attr->value_size == 0 || in queue_stack_map_alloc_check() 59 if (attr->value_size > KMALLOC_MAX_SIZE) in queue_stack_map_alloc_check() 75 queue_size = sizeof(*qs) + size * attr->value_size; in queue_stack_map_alloc() 108 memset(value, 0, qs->map.value_size); in __queue_map_get() 113 ptr = &qs->elements[qs->tail * qs->map.value_size]; in __queue_map_get() 114 memcpy(value, ptr, qs->map.value_size); in __queue_map_get() 138 memset(value, 0, qs->map.value_size); in __stack_map_get() 147 ptr = &qs->elements[index * qs->map.value_size]; in __stack_map_get() 148 memcpy(value, ptr, qs->map.value_size); in __stack_map_get() 212 dst = &qs->elements[qs->head * qs->map.value_size]; in queue_stack_map_push_elem() [all …]
|
D | bloom_filter.c | 31 u32 value_size, u32 index) in hash() argument 39 h = jhash(value, value_size, bloom->hash_seed + index); in hash() 51 h = hash(bloom, value, map->value_size, i); in bloom_map_peek_elem() 69 h = hash(bloom, value, map->value_size, i); in bloom_map_push_elem() 100 if (attr->key_size != 0 || attr->value_size == 0 || in bloom_map_alloc() 156 if ((attr->value_size & (sizeof(u32) - 1)) == 0) in bloom_map_alloc() 158 attr->value_size / sizeof(u32); in bloom_map_alloc()
|
D | map_iter.c | 105 u32 key_acc_size, value_acc_size, key_size, value_size; in bpf_iter_attach_map() local 130 value_size = map->value_size; in bpf_iter_attach_map() 132 value_size = round_up(map->value_size, 8) * num_possible_cpus(); in bpf_iter_attach_map() 134 if (key_acc_size > key_size || value_acc_size > value_size) { in bpf_iter_attach_map()
|
D | map_in_map.c | 50 inner_map_meta->value_size = inner_map->value_size; in bpf_map_meta_alloc() 86 meta0->value_size == meta1->value_size && in bpf_map_meta_equal()
|
/linux-6.1.9/tools/testing/selftests/bpf/prog_tests/ |
D | btf.c | 69 __u32 value_size; member 137 .value_size = 180, 192 .value_size = 68, 217 .value_size = 16, 258 .value_size = 48, 303 .value_size = 48, 325 .value_size = 4, 347 .value_size = 4, 369 .value_size = 4, 394 .value_size = 4, [all …]
|
/linux-6.1.9/tools/bpf/bpftool/ |
D | map.c | 66 return malloc(round_up(info->value_size, 8) * in alloc_value() 69 return malloc(info->value_size); in alloc_value() 102 step = round_up(map_info->value_size, 8); in do_dump_btf() 142 print_hex_data_json(value, info->value_size); in print_entry_json() 157 step = round_up(info->value_size, 8); in print_entry_json() 171 info->value_size); in print_entry_json() 251 break_names = info->key_size > 16 || info->value_size > 16; in print_entry_plain() 252 single_line = info->key_size + info->value_size <= 24 && in print_entry_plain() 262 if (info->value_size) { in print_entry_plain() 264 fprint_hex(stdout, value, info->value_size, " "); in print_entry_plain() [all …]
|
/linux-6.1.9/tools/testing/selftests/bpf/benchs/ |
D | bench_bloom_filter_map.c | 41 __u8 value_size; member 45 .value_size = 8, 93 args.value_size = ret; in parse_arg() 136 val_size = args.value_size; in map_prepare_thread() 243 if (args.value_size < 8) { in check_args() 244 __u64 nr_unique_entries = 1ULL << (args.value_size * 8); in check_args() 279 bpf_map__set_value_size(skel->maps.array_map, args.value_size); in setup_skeleton() 281 bpf_map__set_value_size(skel->maps.bloom_map, args.value_size); in setup_skeleton() 283 bpf_map__set_value_size(skel->maps.hashmap, args.value_size); in setup_skeleton() 286 bpf_map__set_key_size(skel->maps.hashmap, args.value_size); in setup_skeleton() [all …]
|
/linux-6.1.9/tools/testing/selftests/bpf/map_tests/ |
D | array_map_batch_ops.c | 76 int err, step, value_size; in __test_map_lookup_and_update_batch() local 88 value_size = sizeof(__s64); in __test_map_lookup_and_update_batch() 90 value_size *= nr_cpus; in __test_map_lookup_and_update_batch() 93 values = calloc(max_entries, value_size); in __test_map_lookup_and_update_batch() 104 memset(values, 0, max_entries * value_size); in __test_map_lookup_and_update_batch() 115 values + total * value_size, in __test_map_lookup_and_update_batch()
|
D | map_in_map_batch_ops.c | 128 __u32 value_size = sizeof(__u32); in fetch_and_validate() local 131 fetched_keys = calloc(max_entries, value_size); in fetch_and_validate() 132 fetched_values = calloc(max_entries, value_size); in fetch_and_validate() 192 __u32 value_size = sizeof(__u32); in _map_in_map_batch_ops() local 197 outer_map_keys = calloc(max_entries, value_size); in _map_in_map_batch_ops() 198 inner_map_fds = calloc(max_entries, value_size); in _map_in_map_batch_ops()
|
D | htab_map_batch_ops.c | 84 int err, step, value_size; in __test_map_lookup_and_delete_batch() local 97 value_size = is_pcpu ? sizeof(value) : sizeof(int); in __test_map_lookup_and_delete_batch() 125 memset(values, 0, max_entries * value_size); in __test_map_lookup_and_delete_batch() 144 memset(values, 0, max_entries * value_size); in __test_map_lookup_and_delete_batch() 156 total * value_size, in __test_map_lookup_and_delete_batch() 210 memset(values, 0, max_entries * value_size); in __test_map_lookup_and_delete_batch() 219 total * value_size, in __test_map_lookup_and_delete_batch()
|
/linux-6.1.9/tools/lib/bpf/ |
D | libbpf_probes.c | 194 int key_size, value_size, max_entries; in probe_map_create() local 199 value_size = sizeof(__u32); in probe_map_create() 204 value_size = sizeof(__u64); in probe_map_create() 208 value_size = sizeof(__u64); in probe_map_create() 214 value_size = sizeof(__u64); in probe_map_create() 226 value_size = 8; in probe_map_create() 236 value_size = 0; in probe_map_create() 288 fd = bpf_map_create(map_type, NULL, key_size, value_size, max_entries, &opts); in probe_map_create()
|
/linux-6.1.9/tools/perf/util/bpf_skel/ |
D | func_latency.bpf.c | 13 __uint(value_size, sizeof(__u64)); 20 __uint(value_size, sizeof(__u8)); 27 __uint(value_size, sizeof(__u8)); 34 __uint(value_size, sizeof(__u64));
|
D | bperf_leader.bpf.c | 10 __uint(value_size, sizeof(int)); 17 __uint(value_size, sizeof(struct bpf_perf_event_value)); 24 __uint(value_size, sizeof(struct bpf_perf_event_value));
|
D | bperf_follower.bpf.c | 11 __uint(value_size, sizeof(struct bpf_perf_event_value)); 18 __uint(value_size, sizeof(struct bpf_perf_event_value)); 25 __uint(value_size, sizeof(__u32));
|
D | lock_contention.bpf.c | 37 __uint(value_size, MAX_STACKS * sizeof(__u64)); 53 __uint(value_size, sizeof(struct contention_data)); 60 __uint(value_size, sizeof(__u8)); 67 __uint(value_size, sizeof(__u8));
|
D | bpf_prog_profiler.bpf.c | 11 __uint(value_size, sizeof(int)); 18 __uint(value_size, sizeof(struct bpf_perf_event_value)); 26 __uint(value_size, sizeof(struct bpf_perf_event_value));
|
D | off_cpu.bpf.c | 38 __uint(value_size, MAX_STACKS * sizeof(__u64)); 52 __uint(value_size, sizeof(__u64)); 59 __uint(value_size, sizeof(__u8)); 66 __uint(value_size, sizeof(__u8)); 73 __uint(value_size, sizeof(__u8));
|
D | bperf_cgroup.bpf.c | 19 __uint(value_size, sizeof(int)); 27 __uint(value_size, sizeof(__u32)); 35 __uint(value_size, sizeof(struct bpf_perf_event_value)); 43 __uint(value_size, sizeof(struct bpf_perf_event_value));
|
/linux-6.1.9/drivers/net/ethernet/mellanox/mlxsw/ |
D | spectrum_dpipe.c | 137 match_value->value_size = sizeof(u32); in mlxsw_sp_erif_entry_prepare() 138 match_value->value = kmalloc(match_value->value_size, GFP_KERNEL); in mlxsw_sp_erif_entry_prepare() 143 action_value->value_size = sizeof(u32); in mlxsw_sp_erif_entry_prepare() 144 action_value->value = kmalloc(action_value->value_size, GFP_KERNEL); in mlxsw_sp_erif_entry_prepare() 421 match_value->value_size = sizeof(u32); in mlxsw_sp_dpipe_table_host_entry_prepare() 422 match_value->value = kmalloc(match_value->value_size, GFP_KERNEL); in mlxsw_sp_dpipe_table_host_entry_prepare() 432 match_value->value_size = sizeof(u32); in mlxsw_sp_dpipe_table_host_entry_prepare() 435 match_value->value_size = sizeof(struct in6_addr); in mlxsw_sp_dpipe_table_host_entry_prepare() 442 match_value->value = kmalloc(match_value->value_size, GFP_KERNEL); in mlxsw_sp_dpipe_table_host_entry_prepare() 447 action_value->value_size = sizeof(u64); in mlxsw_sp_dpipe_table_host_entry_prepare() [all …]
|
/linux-6.1.9/tools/bpf/bpftool/skeleton/ |
D | profiler.bpf.c | 11 __uint(value_size, sizeof(int)); 18 __uint(value_size, sizeof(struct bpf_perf_event_value)); 25 __uint(value_size, sizeof(struct bpf_perf_event_value)); 32 __uint(value_size, sizeof(u64));
|
/linux-6.1.9/drivers/md/persistent-data/ |
D | dm-btree-internal.h | 35 __le32 value_size; member 120 uint32_t value_size = le32_to_cpu(n->header.value_size); in value_ptr() local 121 return value_base(n) + (value_size * index); in value_ptr()
|
D | dm-btree-remove.c | 62 uint32_t value_size = le32_to_cpu(n->header.value_size); in node_shift() local 73 (nr_entries - shift) * value_size); in node_shift() 81 nr_entries * value_size); in node_shift() 88 uint32_t value_size = le32_to_cpu(left->header.value_size); in node_copy() local 89 if (value_size != le32_to_cpu(right->header.value_size)) { in node_copy() 107 shift * value_size); in node_copy() 119 shift * value_size); in node_copy() 131 uint32_t value_size = le32_to_cpu(n->header.value_size); in delete_at() local 141 nr_to_copy * value_size); in delete_at()
|
D | dm-btree.c | 83 static int insert_at(size_t value_size, struct btree_node *node, unsigned index, in insert_at() argument 102 array_insert(value_base(node), value_size, nr_entries, index, value); in insert_at() 114 static uint32_t calc_max_entries(size_t value_size, size_t block_size) in calc_max_entries() argument 117 size_t elt_size = sizeof(uint64_t) + value_size; /* key + value */ in calc_max_entries() 146 n->header.value_size = cpu_to_le32(info->value_type.size); in dm_btree_empty() 342 uint64_t *result_key, void *v, size_t value_size) in btree_lookup_raw() argument 366 memcpy(v, value_ptr(ro_node(s), i), value_size); in btree_lookup_raw() 509 size_t value_size = le32_to_cpu(dest->header.value_size); in copy_entries() local 511 memcpy(value_ptr(dest, dest_offset), value_ptr(src, src_offset), count * value_size); in copy_entries() 522 size_t value_size = le32_to_cpu(dest->header.value_size); in move_entries() local [all …]
|
/linux-6.1.9/scripts/dtc/ |
D | fdtput.c | 60 int value_size = 0; /* size of holding area */ in encode_value() local 83 if (upto + len > value_size) { in encode_value() 84 value_size = (upto + len) + 500; in encode_value() 85 value = realloc(value, value_size); in encode_value() 88 "%d bytes\n", value_size); in encode_value()
|
/linux-6.1.9/tools/perf/tests/ |
D | bpf-script-test-relocation.c | 23 unsigned int value_size; member 31 .value_size = sizeof(int),
|