/linux-6.1.9/net/mptcp/ |
D | token.c | 111 struct token_bucket *bucket; in mptcp_token_new_request() local 122 bucket = token_bucket(token); in mptcp_token_new_request() 123 spin_lock_bh(&bucket->lock); in mptcp_token_new_request() 124 if (__token_bucket_busy(bucket, token)) { in mptcp_token_new_request() 125 spin_unlock_bh(&bucket->lock); in mptcp_token_new_request() 129 hlist_nulls_add_head_rcu(&subflow_req->token_node, &bucket->req_chain); in mptcp_token_new_request() 130 bucket->chain_len++; in mptcp_token_new_request() 131 spin_unlock_bh(&bucket->lock); in mptcp_token_new_request() 156 struct token_bucket *bucket; in mptcp_token_new_connect() local 162 bucket = token_bucket(subflow->token); in mptcp_token_new_connect() [all …]
|
/linux-6.1.9/net/ceph/crush/ |
D | mapper.c | 74 static int bucket_perm_choose(const struct crush_bucket *bucket, in bucket_perm_choose() argument 78 unsigned int pr = r % bucket->size; in bucket_perm_choose() 83 dprintk("bucket %d new x=%d\n", bucket->id, x); in bucket_perm_choose() 88 s = crush_hash32_3(bucket->hash, x, bucket->id, 0) % in bucket_perm_choose() 89 bucket->size; in bucket_perm_choose() 95 for (i = 0; i < bucket->size; i++) in bucket_perm_choose() 100 for (i = 1; i < bucket->size; i++) in bucket_perm_choose() 112 if (p < bucket->size - 1) { in bucket_perm_choose() 113 i = crush_hash32_3(bucket->hash, x, bucket->id, p) % in bucket_perm_choose() 114 (bucket->size - p); in bucket_perm_choose() [all …]
|
/linux-6.1.9/block/ |
D | blk-stat.c | 56 int bucket, cpu; in blk_stat_add() local 69 bucket = cb->bucket_fn(rq); in blk_stat_add() 70 if (bucket < 0) in blk_stat_add() 73 stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket]; in blk_stat_add() 83 unsigned int bucket; in blk_stat_timer_fn() local 86 for (bucket = 0; bucket < cb->buckets; bucket++) in blk_stat_timer_fn() 87 blk_rq_stat_init(&cb->stat[bucket]); in blk_stat_timer_fn() 93 for (bucket = 0; bucket < cb->buckets; bucket++) { in blk_stat_timer_fn() 94 blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]); in blk_stat_timer_fn() 95 blk_rq_stat_init(&cpu_stat[bucket]); in blk_stat_timer_fn() [all …]
|
D | kyber-iosched.c | 218 unsigned int bucket; in flush_latency_buckets() local 220 for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++) in flush_latency_buckets() 221 buckets[bucket] += atomic_xchg(&cpu_buckets[bucket], 0); in flush_latency_buckets() 233 unsigned int bucket, samples = 0, percentile_samples; in calculate_percentile() local 235 for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++) in calculate_percentile() 236 samples += buckets[bucket]; in calculate_percentile() 254 for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS - 1; bucket++) { in calculate_percentile() 255 if (buckets[bucket] >= percentile_samples) in calculate_percentile() 257 percentile_samples -= buckets[bucket]; in calculate_percentile() 263 bucket + 1, 1 << KYBER_LATENCY_SHIFT, samples); in calculate_percentile() [all …]
|
/linux-6.1.9/net/sched/ |
D | sch_hhf.c | 329 static struct sk_buff *dequeue_head(struct wdrr_bucket *bucket) in dequeue_head() argument 331 struct sk_buff *skb = bucket->head; in dequeue_head() 333 bucket->head = skb->next; in dequeue_head() 339 static void bucket_add(struct wdrr_bucket *bucket, struct sk_buff *skb) in bucket_add() argument 341 if (bucket->head == NULL) in bucket_add() 342 bucket->head = skb; in bucket_add() 344 bucket->tail->next = skb; in bucket_add() 345 bucket->tail = skb; in bucket_add() 352 struct wdrr_bucket *bucket; in hhf_drop() local 355 bucket = &q->buckets[WDRR_BUCKET_FOR_HH]; in hhf_drop() [all …]
|
/linux-6.1.9/drivers/infiniband/sw/rdmavt/ |
D | trace_qp.h | 18 TP_PROTO(struct rvt_qp *qp, u32 bucket), 19 TP_ARGS(qp, bucket), 23 __field(u32, bucket) 28 __entry->bucket = bucket; 34 __entry->bucket 39 TP_PROTO(struct rvt_qp *qp, u32 bucket), 40 TP_ARGS(qp, bucket)); 43 TP_PROTO(struct rvt_qp *qp, u32 bucket), 44 TP_ARGS(qp, bucket));
|
/linux-6.1.9/drivers/interconnect/qcom/ |
D | bcm-voter.c | 64 size_t i, bucket; in bcm_aggregate() local 69 for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) { in bcm_aggregate() 72 temp = bcm_div(node->sum_avg[bucket] * bcm->aux_data.width, in bcm_aggregate() 74 agg_avg[bucket] = max(agg_avg[bucket], temp); in bcm_aggregate() 76 temp = bcm_div(node->max_peak[bucket] * bcm->aux_data.width, in bcm_aggregate() 78 agg_peak[bucket] = max(agg_peak[bucket], temp); in bcm_aggregate() 81 temp = agg_avg[bucket] * bcm->vote_scale; in bcm_aggregate() 82 bcm->vote_x[bucket] = bcm_div(temp, bcm->aux_data.unit); in bcm_aggregate() 84 temp = agg_peak[bucket] * bcm->vote_scale; in bcm_aggregate() 85 bcm->vote_y[bucket] = bcm_div(temp, bcm->aux_data.unit); in bcm_aggregate() [all …]
|
/linux-6.1.9/fs/nfs/ |
D | nfs42xattr.c | 87 struct nfs4_xattr_bucket *bucket; member 238 entry->bucket = NULL; in nfs4_xattr_alloc_entry() 388 struct nfs4_xattr_bucket *bucket; in nfs4_xattr_discard_cache() local 394 bucket = &cache->buckets[i]; in nfs4_xattr_discard_cache() 396 spin_lock(&bucket->lock); in nfs4_xattr_discard_cache() 397 bucket->draining = true; in nfs4_xattr_discard_cache() 398 hlist_for_each_entry_safe(entry, n, &bucket->hlist, hnode) { in nfs4_xattr_discard_cache() 403 spin_unlock(&bucket->lock); in nfs4_xattr_discard_cache() 511 nfs4_xattr_get_entry(struct nfs4_xattr_bucket *bucket, const char *name) in nfs4_xattr_get_entry() argument 517 hlist_for_each_entry(entry, &bucket->hlist, hnode) { in nfs4_xattr_get_entry() [all …]
|
D | pnfs_nfs.c | 63 pnfs_free_bucket_lseg(struct pnfs_commit_bucket *bucket) in pnfs_free_bucket_lseg() argument 65 if (list_empty(&bucket->committing) && list_empty(&bucket->written)) { in pnfs_free_bucket_lseg() 66 struct pnfs_layout_segment *freeme = bucket->lseg; in pnfs_free_bucket_lseg() 67 bucket->lseg = NULL; in pnfs_free_bucket_lseg() 81 struct pnfs_commit_bucket *bucket = NULL; in pnfs_generic_clear_request_commit() local 87 bucket = list_first_entry(&req->wb_list, in pnfs_generic_clear_request_commit() 91 if (bucket) in pnfs_generic_clear_request_commit() 92 pnfs_put_lseg(pnfs_free_bucket_lseg(bucket)); in pnfs_generic_clear_request_commit() 241 pnfs_bucket_scan_ds_commit_list(struct pnfs_commit_bucket *bucket, in pnfs_bucket_scan_ds_commit_list() argument 245 struct list_head *src = &bucket->written; in pnfs_bucket_scan_ds_commit_list() [all …]
|
/linux-6.1.9/net/9p/ |
D | error.c | 179 int bucket; in p9_error_init() local 182 for (bucket = 0; bucket < ERRHASHSZ; bucket++) in p9_error_init() 183 INIT_HLIST_HEAD(&hash_errmap[bucket]); in p9_error_init() 188 bucket = jhash(c->name, c->namelen, 0) % ERRHASHSZ; in p9_error_init() 190 hlist_add_head(&c->list, &hash_errmap[bucket]); in p9_error_init() 208 int bucket; in p9_errstr2errno() local 212 bucket = jhash(errstr, len, 0) % ERRHASHSZ; in p9_errstr2errno() 213 hlist_for_each_entry(c, &hash_errmap[bucket], list) { in p9_errstr2errno()
|
/linux-6.1.9/net/vmw_vsock/ |
D | diag.c | 52 unsigned int bucket; in vsock_diag_dump() local 63 bucket = cb->args[1]; in vsock_diag_dump() 72 while (bucket < ARRAY_SIZE(vsock_bind_table)) { in vsock_diag_dump() 73 struct list_head *head = &vsock_bind_table[bucket]; in vsock_diag_dump() 94 bucket++; in vsock_diag_dump() 98 bucket = 0; in vsock_diag_dump() 102 while (bucket < ARRAY_SIZE(vsock_connected_table)) { in vsock_diag_dump() 103 struct list_head *head = &vsock_connected_table[bucket]; in vsock_diag_dump() 128 bucket++; in vsock_diag_dump() 135 cb->args[1] = bucket; in vsock_diag_dump()
|
/linux-6.1.9/fs/dlm/ |
D | debug_fs.c | 370 unsigned bucket; member 429 unsigned bucket, entry; in table_seq_start() local 432 bucket = n >> 32; in table_seq_start() 435 if (bucket >= ls->ls_rsbtbl_size) in table_seq_start() 452 tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; in table_seq_start() 454 spin_lock(&ls->ls_rsbtbl[bucket].lock); in table_seq_start() 461 ri->bucket = bucket; in table_seq_start() 462 spin_unlock(&ls->ls_rsbtbl[bucket].lock); in table_seq_start() 467 spin_unlock(&ls->ls_rsbtbl[bucket].lock); in table_seq_start() 477 bucket++; in table_seq_start() [all …]
|
/linux-6.1.9/net/rxrpc/ |
D | proc.c | 250 unsigned int bucket, n; in rxrpc_peer_seq_start() local 260 bucket = *_pos >> shift; in rxrpc_peer_seq_start() 262 if (bucket >= HASH_SIZE(rxnet->peer_hash)) { in rxrpc_peer_seq_start() 267 if (bucket == 0) in rxrpc_peer_seq_start() 273 p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1); in rxrpc_peer_seq_start() 276 bucket++; in rxrpc_peer_seq_start() 278 *_pos = (bucket << shift) | n; in rxrpc_peer_seq_start() 285 unsigned int bucket, n; in rxrpc_peer_seq_next() local 292 bucket = *_pos >> shift; in rxrpc_peer_seq_next() 294 p = seq_hlist_next_rcu(v, &rxnet->peer_hash[bucket], _pos); in rxrpc_peer_seq_next() [all …]
|
/linux-6.1.9/kernel/dma/ |
D | debug.c | 263 static void put_hash_bucket(struct hash_bucket *bucket, in put_hash_bucket() argument 265 __releases(&bucket->lock) in put_hash_bucket() 267 spin_unlock_irqrestore(&bucket->lock, flags); in put_hash_bucket() 292 static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket, in __hash_bucket_find() argument 299 list_for_each_entry(entry, &bucket->list, list) { in __hash_bucket_find() 342 static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket, in bucket_find_exact() argument 345 return __hash_bucket_find(bucket, ref, exact_match); in bucket_find_exact() 348 static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket, in bucket_find_contain() argument 357 entry = __hash_bucket_find(*bucket, ref, containing_match); in bucket_find_contain() 365 put_hash_bucket(*bucket, *flags); in bucket_find_contain() [all …]
|
/linux-6.1.9/drivers/cpuidle/governors/ |
D | menu.c | 114 unsigned int bucket; member 122 int bucket = 0; in which_bucket() local 131 bucket = BUCKETS/2; in which_bucket() 134 return bucket; in which_bucket() 136 return bucket + 1; in which_bucket() 138 return bucket + 2; in which_bucket() 140 return bucket + 3; in which_bucket() 142 return bucket + 4; in which_bucket() 143 return bucket + 5; in which_bucket() 291 data->bucket = which_bucket(data->next_timer_ns, nr_iowaiters); in menu_select() [all …]
|
/linux-6.1.9/Documentation/userspace-api/media/v4l/ |
D | pixfmt-meta-vsp1-hgt.rst | 28 The Saturation position **n** (0 - 31) of the bucket in the matrix is 33 The Hue position **m** (0 - 5) of the bucket in the matrix depends on 101 - :cspan:`4` Histogram bucket (m=0, n=0) [31:0] 103 - :cspan:`4` Histogram bucket (m=0, n=1) [31:0] 107 - :cspan:`4` Histogram bucket (m=0, n=31) [31:0] 109 - :cspan:`4` Histogram bucket (m=1, n=0) [31:0] 113 - :cspan:`4` Histogram bucket (m=2, n=0) [31:0] 117 - :cspan:`4` Histogram bucket (m=3, n=0) [31:0] 121 - :cspan:`4` Histogram bucket (m=4, n=0) [31:0] 125 - :cspan:`4` Histogram bucket (m=5, n=0) [31:0] [all …]
|
/linux-6.1.9/fs/ocfs2/ |
D | xattr.c | 125 struct ocfs2_xattr_bucket *bucket; member 279 struct ocfs2_xattr_bucket *bucket, 301 struct ocfs2_xattr_bucket *bucket, 322 struct ocfs2_xattr_bucket *bucket; in ocfs2_xattr_bucket_new() local 327 bucket = kzalloc(sizeof(struct ocfs2_xattr_bucket), GFP_NOFS); in ocfs2_xattr_bucket_new() 328 if (bucket) { in ocfs2_xattr_bucket_new() 329 bucket->bu_inode = inode; in ocfs2_xattr_bucket_new() 330 bucket->bu_blocks = blks; in ocfs2_xattr_bucket_new() 333 return bucket; in ocfs2_xattr_bucket_new() 336 static void ocfs2_xattr_bucket_relse(struct ocfs2_xattr_bucket *bucket) in ocfs2_xattr_bucket_relse() argument [all …]
|
/linux-6.1.9/drivers/md/bcache/ |
D | alloc.c | 76 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b) in bch_inc_gen() 89 struct bucket *b; in bch_rescale_priorities() 125 static inline bool can_inc_bucket_gen(struct bucket *b) in can_inc_bucket_gen() 130 bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b) in bch_can_invalidate_bucket() 140 void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) in __bch_invalidate_one_bucket() 153 static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) in bch_invalidate_one_bucket() 181 struct bucket *b; in invalidate_buckets_lru() 218 struct bucket *b; in invalidate_buckets_fifo() 241 struct bucket *b; in invalidate_buckets_random() 302 static int bch_allocator_push(struct cache *ca, long bucket) in bch_allocator_push() argument [all …]
|
/linux-6.1.9/drivers/md/ |
D | dm-clone-target.c | 564 #define bucket_lock_irqsave(bucket, flags) \ argument 565 spin_lock_irqsave(&(bucket)->lock, flags) 567 #define bucket_unlock_irqrestore(bucket, flags) \ argument 568 spin_unlock_irqrestore(&(bucket)->lock, flags) 570 #define bucket_lock_irq(bucket) \ argument 571 spin_lock_irq(&(bucket)->lock) 573 #define bucket_unlock_irq(bucket) \ argument 574 spin_unlock_irq(&(bucket)->lock) 579 struct hash_table_bucket *bucket; in hash_table_init() local 588 bucket = clone->ht + i; in hash_table_init() [all …]
|
/linux-6.1.9/Documentation/networking/ |
D | nexthop-group-resilient.rst | 49 to choose a hash table bucket, then reads the next hop that this bucket 83 cause bucket allocation change, the wants counts for individual next hops 91 Each bucket maintains a last-used timer. Every time a packet is forwarded 92 through a bucket, this timer is updated to current jiffies value. One 94 amount of time that a bucket must not be hit by traffic in order for it to 104 upkeep changes the next hop that the bucket references to one of the 135 - Single-bucket notifications of the type 143 Some single-bucket notifications are forced, as indicated by the "force" 145 hop associated with the bucket was removed, and the bucket really must be 150 bucket should be migrated, but the HW discovers that the bucket has in fact [all …]
|
/linux-6.1.9/include/trace/events/ |
D | bcache.h | 68 __field(size_t, bucket ) 72 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0); 75 TP_printk("bucket %zu", __entry->bucket) 267 __field(size_t, bucket ) 273 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0); 279 __entry->bucket, __entry->block, __entry->keys) 370 __field(size_t, bucket ) 375 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0); 379 TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys) 429 TP_PROTO(struct cache *ca, size_t bucket), [all …]
|
/linux-6.1.9/kernel/bpf/ |
D | stackmap.c | 217 struct stack_map_bucket *bucket, *new_bucket, *old_bucket; in __bpf_get_stackid() local 233 bucket = READ_ONCE(smap->buckets[id]); in __bpf_get_stackid() 235 hash_matches = bucket && bucket->hash == hash; in __bpf_get_stackid() 251 if (hash_matches && bucket->nr == trace_nr && in __bpf_get_stackid() 252 memcmp(bucket->data, new_bucket->data, trace_len) == 0) { in __bpf_get_stackid() 256 if (bucket && !(flags & BPF_F_REUSE_STACKID)) { in __bpf_get_stackid() 261 if (hash_matches && bucket->nr == trace_nr && in __bpf_get_stackid() 262 memcmp(bucket->data, ips, trace_len) == 0) in __bpf_get_stackid() 264 if (bucket && !(flags & BPF_F_REUSE_STACKID)) in __bpf_get_stackid() 572 struct stack_map_bucket *bucket, *old_bucket; in bpf_stackmap_copy() local [all …]
|
/linux-6.1.9/lib/ |
D | stackdepot.c | 297 static inline struct stack_record *find_stack(struct stack_record *bucket, in find_stack() argument 303 for (found = bucket; found; found = found->next) { in find_stack() 424 struct stack_record *found = NULL, **bucket; in __stack_depot_save() local 445 bucket = &stack_table[hash & stack_hash_mask]; in __stack_depot_save() 452 found = find_stack(smp_load_acquire(bucket), entries, in __stack_depot_save() 481 found = find_stack(*bucket, entries, nr_entries, hash); in __stack_depot_save() 486 new->next = *bucket; in __stack_depot_save() 491 smp_store_release(bucket, new); in __stack_depot_save()
|
/linux-6.1.9/arch/sparc/kernel/ |
D | irq_64.c | 207 struct ino_bucket bucket; member 258 struct ino_bucket *bucket; in cookie_exists() local 269 bucket = (struct ino_bucket *) __va(cookie); in cookie_exists() 270 irq = bucket->__irq; in cookie_exists() 279 struct ino_bucket *bucket; in sysino_exists() local 282 bucket = &ivector_table[sysino]; in sysino_exists() 283 irq = bucket_get_irq(__pa(bucket)); in sysino_exists() 616 struct ino_bucket *bucket; in build_irq() local 623 bucket = &ivector_table[ino]; in build_irq() 624 irq = bucket_get_irq(__pa(bucket)); in build_irq() [all …]
|
/linux-6.1.9/net/atm/ |
D | proc.c | 69 int bucket; member 78 static int __vcc_walk(struct sock **sock, int family, int *bucket, loff_t l) in __vcc_walk() argument 83 for (*bucket = 0; *bucket < VCC_HTABLE_SIZE; ++*bucket) { in __vcc_walk() 84 struct hlist_head *head = &vcc_hash[*bucket]; in __vcc_walk() 98 if (!sk && ++*bucket < VCC_HTABLE_SIZE) { in __vcc_walk() 99 sk = sk_head(&vcc_hash[*bucket]); in __vcc_walk() 113 return __vcc_walk(&state->sk, family, &state->bucket, l) ? in vcc_walk()
|