/linux-6.1.9/drivers/s390/scsi/ |
D | zfcp_reqlist.h | 24 struct list_head buckets[ZFCP_REQ_LIST_BUCKETS]; member 50 INIT_LIST_HEAD(&rl->buckets[i]); in zfcp_reqlist_alloc() 66 if (!list_empty(&rl->buckets[i])) in zfcp_reqlist_isempty() 90 list_for_each_entry(req, &rl->buckets[i], list) in _zfcp_reqlist_find() 163 list_add_tail(&req->list, &rl->buckets[i]); in zfcp_reqlist_add() 180 list_splice_init(&rl->buckets[i], list); in zfcp_reqlist_move() 207 list_for_each_entry(req, &rl->buckets[i], list) in zfcp_reqlist_apply_for_all()
|
/linux-6.1.9/tools/lib/bpf/ |
D | hashmap.h | 57 struct hashmap_entry **buckets; member 67 .buckets = NULL, \ 157 for (cur = map->buckets[bkt]; cur; cur = cur->next) 169 for (cur = map->buckets[bkt]; \ 180 for (cur = map->buckets \ 181 ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \ 188 for (cur = map->buckets \ 189 ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \
|
D | hashmap.c | 45 map->buckets = NULL; in hashmap__init() 71 free(map->buckets); in hashmap__clear() 72 map->buckets = NULL; in hashmap__clear() 124 free(map->buckets); in hashmap_grow() 125 map->buckets = new_buckets; in hashmap_grow() 137 if (!map->buckets) in hashmap_find_entry() 140 for (prev_ptr = &map->buckets[hash], cur = *prev_ptr; in hashmap_find_entry() 200 hashmap_add_entry(&map->buckets[h], entry); in hashmap__insert()
|
/linux-6.1.9/tools/perf/util/ |
D | hashmap.h | 57 struct hashmap_entry **buckets; member 67 .buckets = NULL, \ 157 for (cur = map->buckets[bkt]; cur; cur = cur->next) 169 for (cur = map->buckets[bkt]; \ 180 for (cur = map->buckets \ 181 ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \ 188 for (cur = map->buckets \ 189 ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \
|
D | hashmap.c | 45 map->buckets = NULL; in hashmap__init() 71 free(map->buckets); in hashmap__clear() 72 map->buckets = NULL; in hashmap__clear() 124 free(map->buckets); in hashmap_grow() 125 map->buckets = new_buckets; in hashmap_grow() 137 if (!map->buckets) in hashmap_find_entry() 140 for (prev_ptr = &map->buckets[hash], cur = *prev_ptr; in hashmap_find_entry() 200 hashmap_add_entry(&map->buckets[h], entry); in hashmap__insert()
|
D | ftrace.h | 44 int buckets[]); 69 int buckets[] __maybe_unused) in perf_ftrace__latency_read_bpf()
|
D | bpf_ftrace.c | 122 int buckets[]) in perf_ftrace__latency_read_bpf() argument 138 buckets[idx] = 0; in perf_ftrace__latency_read_bpf() 143 buckets[idx] += hist[i]; in perf_ftrace__latency_read_bpf()
|
/linux-6.1.9/Documentation/networking/ |
D | nexthop-group-resilient.rst | 54 continuous. With a hash table, mapping between the hash table buckets and 56 the buckets that held it are simply reassigned to other next hops:: 70 choose a subset of buckets that are currently not used for forwarding 72 keeping the "busy" buckets intact. This way, established flows are ideally 80 certain number of buckets, according to its weight and the number of 81 buckets in the hash table. In accordance with the source code, we will call 86 Next hops that have fewer buckets than their wants count, are called 98 buckets: 105 underweight next hops. If, after considering all buckets in this manner, 109 There may not be enough "idle" buckets to satisfy the updated wants counts [all …]
|
/linux-6.1.9/block/ |
D | blk-stat.c | 86 for (bucket = 0; bucket < cb->buckets; bucket++) in blk_stat_timer_fn() 93 for (bucket = 0; bucket < cb->buckets; bucket++) { in blk_stat_timer_fn() 105 unsigned int buckets, void *data) in blk_stat_alloc_callback() argument 113 cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat), in blk_stat_alloc_callback() 119 cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat), in blk_stat_alloc_callback() 130 cb->buckets = buckets; in blk_stat_alloc_callback() 147 for (bucket = 0; bucket < cb->buckets; bucket++) in blk_stat_add_callback()
|
D | blk-stat.h | 45 unsigned int buckets; member 89 unsigned int buckets, void *data);
|
D | kyber-iosched.c | 136 atomic_t buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS]; member 216 unsigned int *buckets = kqd->latency_buckets[sched_domain][type]; in flush_latency_buckets() local 217 atomic_t *cpu_buckets = cpu_latency->buckets[sched_domain][type]; in flush_latency_buckets() 221 buckets[bucket] += atomic_xchg(&cpu_buckets[bucket], 0); in flush_latency_buckets() 232 unsigned int *buckets = kqd->latency_buckets[sched_domain][type]; in calculate_percentile() local 236 samples += buckets[bucket]; in calculate_percentile() 255 if (buckets[bucket] >= percentile_samples) in calculate_percentile() 257 percentile_samples -= buckets[bucket]; in calculate_percentile() 259 memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type])); in calculate_percentile() 637 atomic_inc(&cpu_latency->buckets[sched_domain][type][bucket]); in add_latency_sample()
|
/linux-6.1.9/tools/perf/ |
D | builtin-ftrace.c | 683 static void make_histogram(int buckets[], char *buf, size_t len, char *linebuf, in make_histogram() argument 740 buckets[i]++; in make_histogram() 751 static void display_histogram(int buckets[], bool use_nsec) in display_histogram() argument 760 total += buckets[i]; in display_histogram() 770 bar_len = buckets[0] * bar_total / total; in display_histogram() 772 0, 1, "us", buckets[0], bar_len, bar, bar_total - bar_len, ""); in display_histogram() 784 bar_len = buckets[i] * bar_total / total; in display_histogram() 786 start, stop, unit, buckets[i], bar_len, bar, in display_histogram() 790 bar_len = buckets[NUM_BUCKET - 1] * bar_total / total; in display_histogram() 792 1, "...", use_nsec ? "ms" : " s", buckets[NUM_BUCKET - 1], in display_histogram() [all …]
|
/linux-6.1.9/tools/testing/selftests/drivers/net/netdevsim/ |
D | nexthop.sh | 213 $IP nexthop add id 10 group 1/2 type resilient buckets 4 229 $IP nexthop add id 10 group 1,3/2,2 type resilient buckets 5 259 $IP nexthop add id 10 group 1/2 type resilient buckets 4 &> /dev/null 325 $IP nexthop add id 10 group 1/2 type resilient buckets 6 353 $IP nexthop add id 10 group 1/2 type resilient buckets 6 408 $IP nexthop add id 10 group 1/2 type resilient buckets 8 idle_timer 4 434 type resilient buckets 8 idle_timer 6 469 $IP nexthop add id 10 group 1/2 type resilient buckets 8 $timer 4 504 $IP nexthop add id 10 group 1/2 type resilient buckets 8 $timer 8 535 type resilient buckets 8 $timer 4 [all …]
|
/linux-6.1.9/net/ceph/crush/ |
D | crush.c | 111 if (map->buckets) { in crush_destroy() 114 if (map->buckets[b] == NULL) in crush_destroy() 116 crush_destroy_bucket(map->buckets[b]); in crush_destroy() 118 kfree(map->buckets); in crush_destroy()
|
D | mapper.c | 527 itemtype = map->buckets[-1-item]->type; in crush_choose_firstn() 540 in = map->buckets[-1-item]; in crush_choose_firstn() 564 map->buckets[-1-item], in crush_choose_firstn() 741 itemtype = map->buckets[-1-item]->type; in crush_choose_indep() 758 in = map->buckets[-1-item]; in crush_choose_indep() 778 map->buckets[-1-item], in crush_choose_indep() 865 if (!map->buckets[b]) in crush_init_workspace() 869 switch (map->buckets[b]->alg) { in crush_init_workspace() 877 v += map->buckets[b]->size * sizeof(__u32); in crush_init_workspace() 948 map->buckets[-1-curstep->arg1])) { in crush_do_rule() [all …]
|
/linux-6.1.9/net/netfilter/ipvs/ |
D | ip_vs_sh.c | 70 struct ip_vs_sh_bucket buckets[IP_VS_SH_TAB_SIZE]; member 108 struct ip_vs_dest *dest = rcu_dereference(s->buckets[hash].dest); in ip_vs_sh_get() 130 dest = rcu_dereference(s->buckets[ihash].dest); in ip_vs_sh_get_fallback() 145 dest = rcu_dereference(s->buckets[hash].dest); in ip_vs_sh_get_fallback() 172 b = &s->buckets[0]; in ip_vs_sh_reassign() 216 b = &s->buckets[0]; in ip_vs_sh_flush()
|
D | ip_vs_dh.c | 64 struct ip_vs_dh_bucket buckets[IP_VS_DH_TAB_SIZE]; member 90 return rcu_dereference(s->buckets[ip_vs_dh_hashkey(af, addr)].dest); in ip_vs_dh_get() 106 b = &s->buckets[0]; in ip_vs_dh_reassign() 140 b = &s->buckets[0]; in ip_vs_dh_flush()
|
/linux-6.1.9/fs/nfs/ |
D | pnfs_nfs.c | 102 p = kmalloc(struct_size(p, buckets, n), gfp_flags); in pnfs_alloc_commit_array() 109 for (b = &p->buckets[0]; n != 0; b++, n--) { in pnfs_alloc_commit_array() 259 struct pnfs_commit_bucket *buckets, in pnfs_bucket_scan_array() argument 267 cnt = pnfs_bucket_scan_ds_commit_list(&buckets[i], cinfo, max); in pnfs_bucket_scan_array() 288 cnt = pnfs_bucket_scan_array(cinfo, array->buckets, in pnfs_generic_scan_commit_lists() 304 struct pnfs_commit_bucket *buckets, in pnfs_bucket_recover_commit_reqs() argument 314 for (i = 0, b = buckets; i < nbuckets; i++, b++) { in pnfs_bucket_recover_commit_reqs() 343 array->buckets, in pnfs_generic_recover_commit_reqs() 355 pnfs_bucket_search_commit_reqs(struct pnfs_commit_bucket *buckets, in pnfs_bucket_search_commit_reqs() argument 364 for (i = 0, b = buckets; i < nbuckets; i++, b++) { in pnfs_bucket_search_commit_reqs() [all …]
|
D | nfs42xattr.c | 70 struct nfs4_xattr_bucket buckets[NFS4_XATTR_HASH_SIZE]; member 111 INIT_HLIST_HEAD(&cache->buckets[i].hlist); in nfs4_xattr_hash_init() 112 spin_lock_init(&cache->buckets[i].lock); in nfs4_xattr_hash_init() 113 cache->buckets[i].cache = cache; in nfs4_xattr_hash_init() 114 cache->buckets[i].draining = false; in nfs4_xattr_hash_init() 276 if (WARN_ON(!hlist_empty(&cache->buckets[i].hlist))) in nfs4_xattr_free_cache_cb() 278 cache->buckets[i].draining = false; in nfs4_xattr_free_cache_cb() 394 bucket = &cache->buckets[i]; in nfs4_xattr_discard_cache() 506 return &cache->buckets[jhash(name, strlen(name), 0) & in nfs4_xattr_hash_bucket() 507 (ARRAY_SIZE(cache->buckets) - 1)]; in nfs4_xattr_hash_bucket() [all …]
|
/linux-6.1.9/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ |
D | ipoib_vlan.c | 45 struct hlist_head buckets[1 << MLX5I_MAX_LOG_PKEY_SUP]; member 71 static struct qpn_to_netdev *mlx5i_find_qpn_to_netdev_node(struct hlist_head *buckets, in mlx5i_find_qpn_to_netdev_node() argument 74 struct hlist_head *h = &buckets[hash_32(qpn, MLX5I_MAX_LOG_PKEY_SUP)]; in mlx5i_find_qpn_to_netdev_node() 99 hlist_add_head(&new_node->hlist, &ht->buckets[key]); in mlx5i_pkey_add_qpn() 112 node = mlx5i_find_qpn_to_netdev_node(ht->buckets, qpn); in mlx5i_pkey_del_qpn() 131 node = mlx5i_find_qpn_to_netdev_node(ipriv->qpn_htbl->buckets, qpn); in mlx5i_pkey_get_netdev()
|
/linux-6.1.9/drivers/md/ |
D | dm-region-hash.c | 70 struct list_head *buckets; member 206 rh->buckets = vmalloc(array_size(nr_buckets, sizeof(*rh->buckets))); in dm_region_hash_create() 207 if (!rh->buckets) { in dm_region_hash_create() 214 INIT_LIST_HEAD(rh->buckets + i); in dm_region_hash_create() 228 vfree(rh->buckets); in dm_region_hash_create() 244 list_for_each_entry_safe(reg, nreg, rh->buckets + h, in dm_region_hash_destroy() 255 vfree(rh->buckets); in dm_region_hash_destroy() 274 struct list_head *bucket = rh->buckets + rh_hash(rh, region); in __rh_lookup() 285 list_add(®->hash_list, rh->buckets + rh_hash(rh, reg->key)); in __rh_insert()
|
/linux-6.1.9/drivers/net/wireless/broadcom/brcm80211/brcmfmac/ |
D | pno.c | 298 struct brcmf_gscan_bucket_config **buckets, in brcmf_pno_prep_fwconfig() argument 323 *buckets = NULL; in brcmf_pno_prep_fwconfig() 355 *buckets = fw_buckets; in brcmf_pno_prep_fwconfig() 396 struct brcmf_gscan_bucket_config *buckets; in brcmf_pno_config_sched_scans() local 403 n_buckets = brcmf_pno_prep_fwconfig(pi, &pno_cfg, &buckets, in brcmf_pno_config_sched_scans() 408 gsz = sizeof(*gscan_cfg) + (n_buckets - 1) * sizeof(*buckets); in brcmf_pno_config_sched_scans() 437 memcpy(&gscan_cfg->bucket[0], buckets, in brcmf_pno_config_sched_scans() 438 n_buckets * sizeof(*buckets)); in brcmf_pno_config_sched_scans() 463 kfree(buckets); in brcmf_pno_config_sched_scans()
|
/linux-6.1.9/kernel/bpf/ |
D | bpf_local_storage.c | 24 return &smap->buckets[hash_ptr(selem, smap->bucket_log)]; in select_bucket() 549 b = &smap->buckets[i]; in bpf_local_storage_map_free() 584 kvfree(smap->buckets); in bpf_local_storage_map_free() 623 smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets, in bpf_local_storage_map_alloc() 625 if (!smap->buckets) { in bpf_local_storage_map_alloc() 631 INIT_HLIST_HEAD(&smap->buckets[i].list); in bpf_local_storage_map_alloc() 632 raw_spin_lock_init(&smap->buckets[i].lock); in bpf_local_storage_map_alloc()
|
D | stackmap.c | 31 struct stack_map_bucket *buckets[]; member 233 bucket = READ_ONCE(smap->buckets[id]); in __bpf_get_stackid() 277 old_bucket = xchg(&smap->buckets[id], new_bucket); in __bpf_get_stackid() 578 bucket = xchg(&smap->buckets[id], NULL); in bpf_stackmap_copy() 586 old_bucket = xchg(&smap->buckets[id], bucket); in bpf_stackmap_copy() 605 if (id >= smap->n_buckets || !smap->buckets[id]) in stack_map_get_next_key() 611 while (id < smap->n_buckets && !smap->buckets[id]) in stack_map_get_next_key() 637 old_bucket = xchg(&smap->buckets[id], NULL); in stack_map_delete_elem()
|
/linux-6.1.9/drivers/net/ethernet/mellanox/mlx5/core/lag/ |
D | port_sel.c | 51 ft_attr.max_fte = ldev->ports * ldev->buckets; in mlx5_lag_create_port_sel_table() 78 for (j = 0; j < ldev->buckets; j++) { in mlx5_lag_create_port_sel_table() 81 idx = i * ldev->buckets + j; in mlx5_lag_create_port_sel_table() 346 for (j = 0; j < ldev->buckets; j++) { in mlx5_lag_destroy_definer() 347 idx = i * ldev->buckets + j; in mlx5_lag_destroy_definer() 575 for (j = 0; j < ldev->buckets; j++) { in __mlx5_lag_modify_definers_destinations() 576 idx = i * ldev->buckets + j; in __mlx5_lag_modify_definers_destinations()
|