Home
last modified time | relevance | path

Searched refs:rbnode (Results 1 – 16 of 16) sorted by relevance

/linux-6.1.9/drivers/base/regmap/
Dregcache-rbtree.c41 struct regcache_rbtree_node *rbnode, in regcache_rbtree_get_base_top_reg() argument
44 *base = rbnode->base_reg; in regcache_rbtree_get_base_top_reg()
45 *top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride); in regcache_rbtree_get_base_top_reg()
49 struct regcache_rbtree_node *rbnode, unsigned int idx) in regcache_rbtree_get_register() argument
51 return regcache_get_val(map, rbnode->block, idx); in regcache_rbtree_get_register()
55 struct regcache_rbtree_node *rbnode, in regcache_rbtree_set_register() argument
58 set_bit(idx, rbnode->cache_present); in regcache_rbtree_set_register()
59 regcache_set_val(map, rbnode->block, idx, val); in regcache_rbtree_set_register()
67 struct regcache_rbtree_node *rbnode; in regcache_rbtree_lookup() local
70 rbnode = rbtree_ctx->cached_rbnode; in regcache_rbtree_lookup()
[all …]
/linux-6.1.9/net/ipv4/
Dinet_fragment.c48 RB_CLEAR_NODE(&skb->rbnode); in fragcb_clear()
71 rb_link_node(&skb->rbnode, &q->last_run_head->rbnode, in fragrun_create()
72 &q->last_run_head->rbnode.rb_right); in fragrun_create()
74 rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node); in fragrun_create()
75 rb_insert_color(&skb->rbnode, &q->rb_fragments); in fragrun_create()
269 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); in inet_frag_rbtree_purge()
272 rb_erase(&skb->rbnode, root); in inet_frag_rbtree_purge()
431 rb_link_node(&skb->rbnode, parent, rbn); in inet_frag_queue_insert()
432 rb_insert_color(&skb->rbnode, &q->rb_fragments); in inet_frag_queue_insert()
453 if (RB_EMPTY_NODE(&skb->rbnode)) in inet_frag_reasm_prepare()
[all …]
Dtcp_input.c4741 rb_erase(&skb->rbnode, &tp->out_of_order_queue); in tcp_ofo_queue()
4821 rb_link_node(&skb->rbnode, NULL, p); in tcp_data_queue_ofo()
4822 rb_insert_color(&skb->rbnode, &tp->out_of_order_queue); in tcp_data_queue_ofo()
4844 parent = &tp->ooo_last_skb->rbnode; in tcp_data_queue_ofo()
4876 rb_replace_node(&skb1->rbnode, &skb->rbnode, in tcp_data_queue_ofo()
4895 rb_link_node(&skb->rbnode, parent, p); in tcp_data_queue_ofo()
4896 rb_insert_color(&skb->rbnode, &tp->out_of_order_queue); in tcp_data_queue_ofo()
4908 rb_erase(&skb1->rbnode, &tp->out_of_order_queue); in tcp_data_queue_ofo()
5140 rb_erase(&skb->rbnode, root); in tcp_collapse_one()
5163 rb_link_node(&skb->rbnode, parent, p); in tcp_rbtree_insert()
[all …]
/linux-6.1.9/drivers/infiniband/hw/hfi1/
Dmmu_rb.c78 struct mmu_rb_node *rbnode; in hfi1_mmu_rb_unregister() local
99 rbnode = rb_entry(node, struct mmu_rb_node, node); in hfi1_mmu_rb_unregister()
102 list_move(&rbnode->list, &del_list); in hfi1_mmu_rb_unregister()
198 struct mmu_rb_node *rbnode, *ptr; in hfi1_mmu_rb_evict() local
209 list_for_each_entry_safe_reverse(rbnode, ptr, &handler->lru_list, in hfi1_mmu_rb_evict()
211 if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg, in hfi1_mmu_rb_evict()
213 __mmu_int_rb_remove(rbnode, &handler->root); in hfi1_mmu_rb_evict()
215 list_move(&rbnode->list, &del_list); in hfi1_mmu_rb_evict()
223 rbnode = list_first_entry(&del_list, struct mmu_rb_node, list); in hfi1_mmu_rb_evict()
224 list_del(&rbnode->list); in hfi1_mmu_rb_evict()
[all …]
/linux-6.1.9/net/netfilter/
Dnf_conncount.c320 struct rb_node **rbnode, *parent; in insert_tree() local
330 rbnode = &(root->rb_node); in insert_tree()
331 while (*rbnode) { in insert_tree()
333 rbconn = rb_entry(*rbnode, struct nf_conncount_rb, node); in insert_tree()
335 parent = *rbnode; in insert_tree()
338 rbnode = &((*rbnode)->rb_left); in insert_tree()
340 rbnode = &((*rbnode)->rb_right); in insert_tree()
388 rb_link_node_rcu(&rbconn->node, parent, rbnode); in insert_tree()
/linux-6.1.9/net/sched/
Dsch_etf.c188 rb_link_node(&nskb->rbnode, parent, p); in etf_enqueue_timesortedlist()
189 rb_insert_color_cached(&nskb->rbnode, &q->head, leftmost); in etf_enqueue_timesortedlist()
211 rb_erase_cached(&skb->rbnode, &q->head); in timesortedlist_drop()
235 rb_erase_cached(&skb->rbnode, &q->head); in timesortedlist_remove()
427 rb_erase_cached(&skb->rbnode, &q->head); in timesortedlist_clear()
Dsch_fq.c386 rb_erase(&skb->rbnode, &flow->t_root); in fq_erase_head()
432 rb_link_node(&skb->rbnode, parent, p); in flow_queue_add()
433 rb_insert_color(&skb->rbnode, &flow->t_root); in flow_queue_add()
654 rb_erase(&skb->rbnode, &flow->t_root); in fq_flow_purge()
Dsch_netem.c369 rb_erase(&skb->rbnode, &q->t_root); in tfifo_reset()
402 rb_link_node(&nskb->rbnode, parent, p); in tfifo_enqueue()
403 rb_insert_color(&nskb->rbnode, &q->t_root); in tfifo_enqueue()
672 rb_erase(&skb->rbnode, &q->t_root); in netem_erase_head()
/linux-6.1.9/mm/
Dzswap.c182 struct rb_node rbnode; member
293 RB_CLEAR_NODE(&entry->rbnode); in zswap_entry_cache_alloc()
311 entry = rb_entry(node, struct zswap_entry, rbnode); in zswap_rb_search()
334 myentry = rb_entry(parent, struct zswap_entry, rbnode); in zswap_rb_insert()
344 rb_link_node(&entry->rbnode, parent, link); in zswap_rb_insert()
345 rb_insert_color(&entry->rbnode, root); in zswap_rb_insert()
351 if (!RB_EMPTY_NODE(&entry->rbnode)) { in zswap_rb_erase()
352 rb_erase(&entry->rbnode, root); in zswap_rb_erase()
353 RB_CLEAR_NODE(&entry->rbnode); in zswap_rb_erase()
1395 rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode) in zswap_frontswap_invalidate_area()
/linux-6.1.9/fs/btrfs/
Dbackref.c243 ref = rb_entry(parent, struct prelim_ref, rbnode); in prelim_ref_insert()
279 rb_link_node(&newref->rbnode, parent, p); in prelim_ref_insert()
280 rb_insert_color_cached(&newref->rbnode, root, leftmost); in prelim_ref_insert()
292 &preftree->root.rb_root, rbnode) { in prelim_release()
406 ref = rb_entry(parent, struct prelim_ref, rbnode); in is_shared_data_backref()
711 ref = rb_entry(rnode, struct prelim_ref, rbnode); in resolve_indirect_refs()
718 rb_erase_cached(&ref->rbnode, &preftrees->indirect.root); in resolve_indirect_refs()
803 ref = rb_entry(node, struct prelim_ref, rbnode); in add_missing_keys()
1342 ref = rb_entry(node, struct prelim_ref, rbnode); in find_parent_nodes()
1343 node = rb_next(&ref->rbnode); in find_parent_nodes()
Dbackref.h89 struct rb_node rbnode; member
/linux-6.1.9/net/mptcp/
Dprotocol.c242 rb_link_node(&skb->rbnode, NULL, p); in mptcp_data_queue_ofo()
243 rb_insert_color(&skb->rbnode, &msk->out_of_order_queue); in mptcp_data_queue_ofo()
260 parent = &msk->ooo_last_skb->rbnode; in mptcp_data_queue_ofo()
291 rb_replace_node(&skb1->rbnode, &skb->rbnode, in mptcp_data_queue_ofo()
306 rb_link_node(&skb->rbnode, parent, p); in mptcp_data_queue_ofo()
307 rb_insert_color(&skb->rbnode, &msk->out_of_order_queue); in mptcp_data_queue_ofo()
314 rb_erase(&skb1->rbnode, &msk->out_of_order_queue); in mptcp_data_queue_ofo()
737 rb_erase(&skb->rbnode, &msk->out_of_order_queue); in __mptcp_ofo_queue()
/linux-6.1.9/include/linux/
Dskbuff.h859 struct rb_node rbnode; /* used in netem, ip4 defrag, and tcp stack */ member
3816 #define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
3819 #define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
3820 #define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
/linux-6.1.9/Documentation/networking/
Drds.rst318 wraps the raw congestion bitmap, contains rbnode, waitq, etc.
/linux-6.1.9/include/net/
Dtcp.h1927 rb_erase(&skb->rbnode, &sk->tcp_rtx_queue); in tcp_rtx_queue_unlink()
/linux-6.1.9/net/core/
Dskbuff.c3460 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); in skb_rbtree_purge()
3463 rb_erase(&skb->rbnode, root); in skb_rbtree_purge()