Home
last modified time | relevance | path

Searched refs:bucket (Results 1 – 25 of 183) sorted by relevance

12345678

/linux-6.6.21/net/mptcp/
Dtoken.c111 struct token_bucket *bucket; in mptcp_token_new_request() local
122 bucket = token_bucket(token); in mptcp_token_new_request()
123 spin_lock_bh(&bucket->lock); in mptcp_token_new_request()
124 if (__token_bucket_busy(bucket, token)) { in mptcp_token_new_request()
125 spin_unlock_bh(&bucket->lock); in mptcp_token_new_request()
129 hlist_nulls_add_head_rcu(&subflow_req->token_node, &bucket->req_chain); in mptcp_token_new_request()
130 bucket->chain_len++; in mptcp_token_new_request()
131 spin_unlock_bh(&bucket->lock); in mptcp_token_new_request()
157 struct token_bucket *bucket; in mptcp_token_new_connect() local
163 bucket = token_bucket(subflow->token); in mptcp_token_new_connect()
[all …]
/linux-6.6.21/net/ceph/crush/
Dmapper.c74 static int bucket_perm_choose(const struct crush_bucket *bucket, in bucket_perm_choose() argument
78 unsigned int pr = r % bucket->size; in bucket_perm_choose()
83 dprintk("bucket %d new x=%d\n", bucket->id, x); in bucket_perm_choose()
88 s = crush_hash32_3(bucket->hash, x, bucket->id, 0) % in bucket_perm_choose()
89 bucket->size; in bucket_perm_choose()
95 for (i = 0; i < bucket->size; i++) in bucket_perm_choose()
100 for (i = 1; i < bucket->size; i++) in bucket_perm_choose()
112 if (p < bucket->size - 1) { in bucket_perm_choose()
113 i = crush_hash32_3(bucket->hash, x, bucket->id, p) % in bucket_perm_choose()
114 (bucket->size - p); in bucket_perm_choose()
[all …]
/linux-6.6.21/drivers/interconnect/qcom/
Dbcm-voter.c65 int bucket, i; in bcm_aggregate_mask() local
67 for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) { in bcm_aggregate_mask()
68 bcm->vote_x[bucket] = 0; in bcm_aggregate_mask()
69 bcm->vote_y[bucket] = 0; in bcm_aggregate_mask()
75 if (node->sum_avg[bucket] || node->max_peak[bucket]) { in bcm_aggregate_mask()
76 bcm->vote_x[bucket] = 0; in bcm_aggregate_mask()
77 bcm->vote_y[bucket] = bcm->enable_mask; in bcm_aggregate_mask()
94 size_t i, bucket; in bcm_aggregate() local
99 for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) { in bcm_aggregate()
102 temp = bcm_div(node->sum_avg[bucket] * bcm->aux_data.width, in bcm_aggregate()
[all …]
/linux-6.6.21/block/
Dblk-stat.c55 int bucket, cpu; in blk_stat_add() local
69 bucket = cb->bucket_fn(rq); in blk_stat_add()
70 if (bucket < 0) in blk_stat_add()
73 stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket]; in blk_stat_add()
83 unsigned int bucket; in blk_stat_timer_fn() local
86 for (bucket = 0; bucket < cb->buckets; bucket++) in blk_stat_timer_fn()
87 blk_rq_stat_init(&cb->stat[bucket]); in blk_stat_timer_fn()
93 for (bucket = 0; bucket < cb->buckets; bucket++) { in blk_stat_timer_fn()
94 blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]); in blk_stat_timer_fn()
95 blk_rq_stat_init(&cpu_stat[bucket]); in blk_stat_timer_fn()
[all …]
Dkyber-iosched.c216 unsigned int bucket; in flush_latency_buckets() local
218 for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++) in flush_latency_buckets()
219 buckets[bucket] += atomic_xchg(&cpu_buckets[bucket], 0); in flush_latency_buckets()
231 unsigned int bucket, samples = 0, percentile_samples; in calculate_percentile() local
233 for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++) in calculate_percentile()
234 samples += buckets[bucket]; in calculate_percentile()
252 for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS - 1; bucket++) { in calculate_percentile()
253 if (buckets[bucket] >= percentile_samples) in calculate_percentile()
255 percentile_samples -= buckets[bucket]; in calculate_percentile()
261 bucket + 1, 1 << KYBER_LATENCY_SHIFT, samples); in calculate_percentile()
[all …]
/linux-6.6.21/net/sched/
Dsch_hhf.c329 static struct sk_buff *dequeue_head(struct wdrr_bucket *bucket) in dequeue_head() argument
331 struct sk_buff *skb = bucket->head; in dequeue_head()
333 bucket->head = skb->next; in dequeue_head()
339 static void bucket_add(struct wdrr_bucket *bucket, struct sk_buff *skb) in bucket_add() argument
341 if (bucket->head == NULL) in bucket_add()
342 bucket->head = skb; in bucket_add()
344 bucket->tail->next = skb; in bucket_add()
345 bucket->tail = skb; in bucket_add()
352 struct wdrr_bucket *bucket; in hhf_drop() local
355 bucket = &q->buckets[WDRR_BUCKET_FOR_HH]; in hhf_drop()
[all …]
/linux-6.6.21/drivers/infiniband/sw/rdmavt/
Dtrace_qp.h18 TP_PROTO(struct rvt_qp *qp, u32 bucket),
19 TP_ARGS(qp, bucket),
23 __field(u32, bucket)
28 __entry->bucket = bucket;
34 __entry->bucket
39 TP_PROTO(struct rvt_qp *qp, u32 bucket),
40 TP_ARGS(qp, bucket));
43 TP_PROTO(struct rvt_qp *qp, u32 bucket),
44 TP_ARGS(qp, bucket));
/linux-6.6.21/fs/nfs/
Dnfs42xattr.c87 struct nfs4_xattr_bucket *bucket; member
238 entry->bucket = NULL; in nfs4_xattr_alloc_entry()
388 struct nfs4_xattr_bucket *bucket; in nfs4_xattr_discard_cache() local
394 bucket = &cache->buckets[i]; in nfs4_xattr_discard_cache()
396 spin_lock(&bucket->lock); in nfs4_xattr_discard_cache()
397 bucket->draining = true; in nfs4_xattr_discard_cache()
398 hlist_for_each_entry_safe(entry, n, &bucket->hlist, hnode) { in nfs4_xattr_discard_cache()
403 spin_unlock(&bucket->lock); in nfs4_xattr_discard_cache()
511 nfs4_xattr_get_entry(struct nfs4_xattr_bucket *bucket, const char *name) in nfs4_xattr_get_entry() argument
517 hlist_for_each_entry(entry, &bucket->hlist, hnode) { in nfs4_xattr_get_entry()
[all …]
Dpnfs_nfs.c63 pnfs_free_bucket_lseg(struct pnfs_commit_bucket *bucket) in pnfs_free_bucket_lseg() argument
65 if (list_empty(&bucket->committing) && list_empty(&bucket->written)) { in pnfs_free_bucket_lseg()
66 struct pnfs_layout_segment *freeme = bucket->lseg; in pnfs_free_bucket_lseg()
67 bucket->lseg = NULL; in pnfs_free_bucket_lseg()
81 struct pnfs_commit_bucket *bucket = NULL; in pnfs_generic_clear_request_commit() local
87 bucket = list_first_entry(&req->wb_list, in pnfs_generic_clear_request_commit()
91 if (bucket) in pnfs_generic_clear_request_commit()
92 pnfs_put_lseg(pnfs_free_bucket_lseg(bucket)); in pnfs_generic_clear_request_commit()
241 pnfs_bucket_scan_ds_commit_list(struct pnfs_commit_bucket *bucket, in pnfs_bucket_scan_ds_commit_list() argument
245 struct list_head *src = &bucket->written; in pnfs_bucket_scan_ds_commit_list()
[all …]
/linux-6.6.21/net/9p/
Derror.c179 int bucket; in p9_error_init() local
182 for (bucket = 0; bucket < ERRHASHSZ; bucket++) in p9_error_init()
183 INIT_HLIST_HEAD(&hash_errmap[bucket]); in p9_error_init()
188 bucket = jhash(c->name, c->namelen, 0) % ERRHASHSZ; in p9_error_init()
190 hlist_add_head(&c->list, &hash_errmap[bucket]); in p9_error_init()
208 int bucket; in p9_errstr2errno() local
212 bucket = jhash(errstr, len, 0) % ERRHASHSZ; in p9_errstr2errno()
213 hlist_for_each_entry(c, &hash_errmap[bucket], list) { in p9_errstr2errno()
/linux-6.6.21/net/vmw_vsock/
Ddiag.c52 unsigned int bucket; in vsock_diag_dump() local
63 bucket = cb->args[1]; in vsock_diag_dump()
72 while (bucket < ARRAY_SIZE(vsock_bind_table)) { in vsock_diag_dump()
73 struct list_head *head = &vsock_bind_table[bucket]; in vsock_diag_dump()
94 bucket++; in vsock_diag_dump()
98 bucket = 0; in vsock_diag_dump()
102 while (bucket < ARRAY_SIZE(vsock_connected_table)) { in vsock_diag_dump()
103 struct list_head *head = &vsock_connected_table[bucket]; in vsock_diag_dump()
128 bucket++; in vsock_diag_dump()
135 cb->args[1] = bucket; in vsock_diag_dump()
/linux-6.6.21/drivers/cpuidle/governors/
Dmenu.c116 unsigned int bucket; member
124 int bucket = 0; in which_bucket() local
133 bucket = BUCKETS/2; in which_bucket()
136 return bucket; in which_bucket()
138 return bucket + 1; in which_bucket()
140 return bucket + 2; in which_bucket()
142 return bucket + 3; in which_bucket()
144 return bucket + 4; in which_bucket()
145 return bucket + 5; in which_bucket()
293 data->bucket = which_bucket(data->next_timer_ns, nr_iowaiters); in menu_select()
[all …]
/linux-6.6.21/kernel/dma/
Ddebug.c264 static void put_hash_bucket(struct hash_bucket *bucket, in put_hash_bucket() argument
266 __releases(&bucket->lock) in put_hash_bucket()
268 spin_unlock_irqrestore(&bucket->lock, flags); in put_hash_bucket()
293 static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket, in __hash_bucket_find() argument
300 list_for_each_entry(entry, &bucket->list, list) { in __hash_bucket_find()
343 static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket, in bucket_find_exact() argument
346 return __hash_bucket_find(bucket, ref, exact_match); in bucket_find_exact()
349 static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket, in bucket_find_contain() argument
358 entry = __hash_bucket_find(*bucket, ref, containing_match); in bucket_find_contain()
366 put_hash_bucket(*bucket, *flags); in bucket_find_contain()
[all …]
/linux-6.6.21/fs/dlm/
Ddebug_fs.c417 unsigned bucket; member
484 unsigned bucket, entry; in table_seq_start() local
487 bucket = n >> 32; in table_seq_start()
490 if (bucket >= ls->ls_rsbtbl_size) in table_seq_start()
509 tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; in table_seq_start()
511 spin_lock(&ls->ls_rsbtbl[bucket].lock); in table_seq_start()
518 ri->bucket = bucket; in table_seq_start()
519 spin_unlock(&ls->ls_rsbtbl[bucket].lock); in table_seq_start()
524 spin_unlock(&ls->ls_rsbtbl[bucket].lock); in table_seq_start()
534 bucket++; in table_seq_start()
[all …]
/linux-6.6.21/Documentation/userspace-api/media/v4l/
Dmetafmt-vsp1-hgt.rst28 The Saturation position **n** (0 - 31) of the bucket in the matrix is
33 The Hue position **m** (0 - 5) of the bucket in the matrix depends on
101 - :cspan:`4` Histogram bucket (m=0, n=0) [31:0]
103 - :cspan:`4` Histogram bucket (m=0, n=1) [31:0]
107 - :cspan:`4` Histogram bucket (m=0, n=31) [31:0]
109 - :cspan:`4` Histogram bucket (m=1, n=0) [31:0]
113 - :cspan:`4` Histogram bucket (m=2, n=0) [31:0]
117 - :cspan:`4` Histogram bucket (m=3, n=0) [31:0]
121 - :cspan:`4` Histogram bucket (m=4, n=0) [31:0]
125 - :cspan:`4` Histogram bucket (m=5, n=0) [31:0]
[all …]
/linux-6.6.21/fs/ocfs2/
Dxattr.c121 struct ocfs2_xattr_bucket *bucket; member
275 struct ocfs2_xattr_bucket *bucket,
297 struct ocfs2_xattr_bucket *bucket,
318 struct ocfs2_xattr_bucket *bucket; in ocfs2_xattr_bucket_new() local
323 bucket = kzalloc(sizeof(struct ocfs2_xattr_bucket), GFP_NOFS); in ocfs2_xattr_bucket_new()
324 if (bucket) { in ocfs2_xattr_bucket_new()
325 bucket->bu_inode = inode; in ocfs2_xattr_bucket_new()
326 bucket->bu_blocks = blks; in ocfs2_xattr_bucket_new()
329 return bucket; in ocfs2_xattr_bucket_new()
332 static void ocfs2_xattr_bucket_relse(struct ocfs2_xattr_bucket *bucket) in ocfs2_xattr_bucket_relse() argument
[all …]
/linux-6.6.21/drivers/md/bcache/
Dalloc.c76 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b) in bch_inc_gen()
89 struct bucket *b; in bch_rescale_priorities()
125 static inline bool can_inc_bucket_gen(struct bucket *b) in can_inc_bucket_gen()
130 bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b) in bch_can_invalidate_bucket()
140 void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) in __bch_invalidate_one_bucket()
153 static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) in bch_invalidate_one_bucket()
181 struct bucket *b; in invalidate_buckets_lru()
218 struct bucket *b; in invalidate_buckets_fifo()
241 struct bucket *b; in invalidate_buckets_random()
302 static int bch_allocator_push(struct cache *ca, long bucket) in bch_allocator_push() argument
[all …]
/linux-6.6.21/drivers/md/
Ddm-clone-target.c564 #define bucket_lock_irqsave(bucket, flags) \ argument
565 spin_lock_irqsave(&(bucket)->lock, flags)
567 #define bucket_unlock_irqrestore(bucket, flags) \ argument
568 spin_unlock_irqrestore(&(bucket)->lock, flags)
570 #define bucket_lock_irq(bucket) \ argument
571 spin_lock_irq(&(bucket)->lock)
573 #define bucket_unlock_irq(bucket) \ argument
574 spin_unlock_irq(&(bucket)->lock)
579 struct hash_table_bucket *bucket; in hash_table_init() local
588 bucket = clone->ht + i; in hash_table_init()
[all …]
/linux-6.6.21/net/rxrpc/
Dproc.c245 unsigned int bucket, n; in rxrpc_peer_seq_start() local
255 bucket = *_pos >> shift; in rxrpc_peer_seq_start()
257 if (bucket >= HASH_SIZE(rxnet->peer_hash)) { in rxrpc_peer_seq_start()
262 if (bucket == 0) in rxrpc_peer_seq_start()
268 p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1); in rxrpc_peer_seq_start()
271 bucket++; in rxrpc_peer_seq_start()
273 *_pos = (bucket << shift) | n; in rxrpc_peer_seq_start()
280 unsigned int bucket, n; in rxrpc_peer_seq_next() local
287 bucket = *_pos >> shift; in rxrpc_peer_seq_next()
289 p = seq_hlist_next_rcu(v, &rxnet->peer_hash[bucket], _pos); in rxrpc_peer_seq_next()
[all …]
/linux-6.6.21/Documentation/networking/
Dnexthop-group-resilient.rst49 to choose a hash table bucket, then reads the next hop that this bucket
83 cause bucket allocation change, the wants counts for individual next hops
91 Each bucket maintains a last-used timer. Every time a packet is forwarded
92 through a bucket, this timer is updated to current jiffies value. One
94 amount of time that a bucket must not be hit by traffic in order for it to
104 upkeep changes the next hop that the bucket references to one of the
135 - Single-bucket notifications of the type
143 Some single-bucket notifications are forced, as indicated by the "force"
145 hop associated with the bucket was removed, and the bucket really must be
150 bucket should be migrated, but the HW discovers that the bucket has in fact
[all …]
/linux-6.6.21/include/trace/events/
Dbcache.h68 __field(size_t, bucket )
72 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
75 TP_printk("bucket %zu", __entry->bucket)
267 __field(size_t, bucket )
273 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
279 __entry->bucket, __entry->block, __entry->keys)
370 __field(size_t, bucket )
375 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
379 TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys)
429 TP_PROTO(struct cache *ca, size_t bucket),
[all …]
/linux-6.6.21/kernel/bpf/
Dstackmap.c214 struct stack_map_bucket *bucket, *new_bucket, *old_bucket; in __bpf_get_stackid() local
230 bucket = READ_ONCE(smap->buckets[id]); in __bpf_get_stackid()
232 hash_matches = bucket && bucket->hash == hash; in __bpf_get_stackid()
248 if (hash_matches && bucket->nr == trace_nr && in __bpf_get_stackid()
249 memcmp(bucket->data, new_bucket->data, trace_len) == 0) { in __bpf_get_stackid()
253 if (bucket && !(flags & BPF_F_REUSE_STACKID)) { in __bpf_get_stackid()
258 if (hash_matches && bucket->nr == trace_nr && in __bpf_get_stackid()
259 memcmp(bucket->data, ips, trace_len) == 0) in __bpf_get_stackid()
261 if (bucket && !(flags & BPF_F_REUSE_STACKID)) in __bpf_get_stackid()
578 struct stack_map_bucket *bucket, *old_bucket; in bpf_stackmap_copy() local
[all …]
/linux-6.6.21/lib/
Dstackdepot.c343 static inline struct stack_record *find_stack(struct stack_record *bucket, in find_stack() argument
349 for (found = bucket; found; found = found->next) { in find_stack()
362 struct stack_record *found = NULL, **bucket; in __stack_depot_save() local
383 bucket = &stack_table[hash & stack_hash_mask]; in __stack_depot_save()
390 found = find_stack(smp_load_acquire(bucket), entries, nr_entries, hash); in __stack_depot_save()
417 found = find_stack(*bucket, entries, nr_entries, hash); in __stack_depot_save()
423 new->next = *bucket; in __stack_depot_save()
428 smp_store_release(bucket, new); in __stack_depot_save()
/linux-6.6.21/arch/sparc/kernel/
Dirq_64.c207 struct ino_bucket bucket; member
258 struct ino_bucket *bucket; in cookie_exists() local
269 bucket = (struct ino_bucket *) __va(cookie); in cookie_exists()
270 irq = bucket->__irq; in cookie_exists()
279 struct ino_bucket *bucket; in sysino_exists() local
282 bucket = &ivector_table[sysino]; in sysino_exists()
283 irq = bucket_get_irq(__pa(bucket)); in sysino_exists()
616 struct ino_bucket *bucket; in build_irq() local
623 bucket = &ivector_table[ino]; in build_irq()
624 irq = bucket_get_irq(__pa(bucket)); in build_irq()
[all …]
/linux-6.6.21/net/atm/
Dproc.c69 int bucket; member
78 static int __vcc_walk(struct sock **sock, int family, int *bucket, loff_t l) in __vcc_walk() argument
83 for (*bucket = 0; *bucket < VCC_HTABLE_SIZE; ++*bucket) { in __vcc_walk()
84 struct hlist_head *head = &vcc_hash[*bucket]; in __vcc_walk()
98 if (!sk && ++*bucket < VCC_HTABLE_SIZE) { in __vcc_walk()
99 sk = sk_head(&vcc_hash[*bucket]); in __vcc_walk()
113 return __vcc_walk(&state->sk, family, &state->bucket, l) ? in vcc_walk()

12345678