/linux-6.6.21/fs/hpfs/ |
D | anode.c | 15 struct bplus_header *btree, unsigned sec, in hpfs_bplus_lookup() argument 24 if (bp_internal(btree)) { in hpfs_bplus_lookup() 25 for (i = 0; i < btree->n_used_nodes; i++) in hpfs_bplus_lookup() 26 if (le32_to_cpu(btree->u.internal[i].file_secno) > sec) { in hpfs_bplus_lookup() 27 a = le32_to_cpu(btree->u.internal[i].down); in hpfs_bplus_lookup() 30 btree = &anode->btree; in hpfs_bplus_lookup() 37 for (i = 0; i < btree->n_used_nodes; i++) in hpfs_bplus_lookup() 38 if (le32_to_cpu(btree->u.external[i].file_secno) <= sec && in hpfs_bplus_lookup() 39 … le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > sec) { in hpfs_bplus_lookup() 40 …a = le32_to_cpu(btree->u.external[i].disk_secno) + sec - le32_to_cpu(btree->u.external[i].file_sec… in hpfs_bplus_lookup() [all …]
|
D | map.c | 180 if ((unsigned)fnode->btree.n_used_nodes + (unsigned)fnode->btree.n_free_nodes != in hpfs_map_fnode() 181 (bp_internal(&fnode->btree) ? 12 : 8)) { in hpfs_map_fnode() 187 if (le16_to_cpu(fnode->btree.first_free) != in hpfs_map_fnode() 188 8 + fnode->btree.n_used_nodes * (bp_internal(&fnode->btree) ? 8 : 12)) { in hpfs_map_fnode() 235 if ((unsigned)anode->btree.n_used_nodes + (unsigned)anode->btree.n_free_nodes != in hpfs_map_anode() 236 (bp_internal(&anode->btree) ? 60 : 40)) { in hpfs_map_anode() 240 if (le16_to_cpu(anode->btree.first_free) != in hpfs_map_anode() 241 8 + anode->btree.n_used_nodes * (bp_internal(&anode->btree) ? 8 : 12)) { in hpfs_map_anode()
|
D | alloc.c | 466 f->btree.n_free_nodes = 8; in hpfs_alloc_fnode() 467 f->btree.first_free = cpu_to_le16(8); in hpfs_alloc_fnode() 483 a->btree.n_free_nodes = 40; in hpfs_alloc_anode() 484 a->btree.n_used_nodes = 0; in hpfs_alloc_anode() 485 a->btree.first_free = cpu_to_le16(8); in hpfs_alloc_anode()
|
/linux-6.6.21/fs/nilfs2/ |
D | btree.c | 58 static int nilfs_btree_get_new_block(const struct nilfs_bmap *btree, in nilfs_btree_get_new_block() argument 61 struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode; in nilfs_btree_get_new_block() 112 static int nilfs_btree_node_size(const struct nilfs_bmap *btree) in nilfs_btree_node_size() argument 114 return i_blocksize(btree->b_inode); in nilfs_btree_node_size() 117 static int nilfs_btree_nchildren_per_block(const struct nilfs_bmap *btree) in nilfs_btree_nchildren_per_block() argument 119 return btree->b_nchildren_per_block; in nilfs_btree_nchildren_per_block() 410 nilfs_btree_get_root(const struct nilfs_bmap *btree) in nilfs_btree_get_root() argument 412 return (struct nilfs_btree_node *)btree->b_u.u_data; in nilfs_btree_get_root() 427 static int nilfs_btree_height(const struct nilfs_bmap *btree) in nilfs_btree_height() argument 429 return nilfs_btree_node_get_level(nilfs_btree_get_root(btree)) + 1; in nilfs_btree_height() [all …]
|
D | Makefile | 4 btnode.o bmap.o btree.o direct.o dat.o recovery.o \
|
/linux-6.6.21/drivers/md/bcache/ |
D | btree.h | 117 struct btree { struct 127 struct btree *parent; argument 152 static inline bool btree_node_ ## flag(struct btree *b) \ argument 155 static inline void set_btree_node_ ## flag(struct btree *b) \ 170 static inline struct btree_write *btree_current_write(struct btree *b) in btree_current_write() 175 static inline struct btree_write *btree_prev_write(struct btree *b) in btree_prev_write() 180 static inline struct bset *btree_bset_first(struct btree *b) in btree_bset_first() 185 static inline struct bset *btree_bset_last(struct btree *b) in btree_bset_last() 190 static inline unsigned int bset_block_offset(struct btree *b, struct bset *i) in bset_block_offset() 248 static inline void rw_lock(bool w, struct btree *b, int level) in rw_lock() [all …]
|
D | btree.c | 107 static inline struct bset *write_block(struct btree *b) in write_block() 112 static void bch_btree_init_next(struct btree *b) in bch_btree_init_next() 139 static uint64_t btree_csum_set(struct btree *b, struct bset *i) in btree_csum_set() 148 void bch_btree_node_read_done(struct btree *b) in bch_btree_node_read_done() 243 static void bch_btree_node_read(struct btree *b) in bch_btree_node_read() 281 static void btree_complete_write(struct btree *b, struct btree_write *w) in btree_complete_write() 298 struct btree *b = container_of(cl, struct btree, io); in btree_node_write_unlock() 305 struct btree *b = container_of(cl, struct btree, io); in __btree_node_write_done() 320 struct btree *b = container_of(cl, struct btree, io); in btree_node_write_done() 329 struct btree *b = container_of(cl, struct btree, io); in btree_node_write_endio() [all …]
|
D | extents.c | 128 struct btree *b = container_of(keys, struct btree, keys); in bch_bkey_dump() 168 struct btree *b = container_of(bk, struct btree, keys); in bch_btree_ptr_invalid() 173 static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k) in btree_ptr_bad_expensive() 207 struct btree *b = container_of(bk, struct btree, keys); in bch_btree_ptr_bad() 232 struct btree *b = container_of(bk, struct btree, keys); in bch_btree_ptr_insert_fixup() 328 struct cache_set *c = container_of(b, struct btree, keys)->c; in bch_extent_insert_fixup() 502 struct btree *b = container_of(bk, struct btree, keys); in bch_extent_invalid() 507 static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k, in bch_extent_bad_expensive() 539 struct btree *b = container_of(bk, struct btree, keys); in bch_extent_bad() 585 struct btree *b = container_of(bk, struct btree, keys); in bch_extent_merge()
|
D | debug.h | 11 void bch_btree_verify(struct btree *b); 20 static inline void bch_btree_verify(struct btree *b) {} in bch_btree_verify()
|
D | Makefile | 5 bcache-y := alloc.o bset.o btree.o closure.o debug.o extents.o\
|
D | bcache.h | 222 struct btree; 277 int (*cache_miss)(struct btree *b, struct search *s, 668 struct btree *root; 671 struct btree *verify_data;
|
/linux-6.6.21/fs/xfs/libxfs/ |
D | xfs_da_btree.c | 152 to->btree = from3->__btree; in xfs_da3_node_hdr_from_disk() 160 to->btree = from->__btree; in xfs_da3_node_hdr_from_disk() 655 struct xfs_da_node_entry *btree; in xfs_da3_root_split() local 690 btree = icnodehdr.btree; in xfs_da3_root_split() 691 size = (int)((char *)&btree[icnodehdr.count] - (char *)oldroot); in xfs_da3_root_split() 749 btree = nodehdr.btree; in xfs_da3_root_split() 750 btree[0].hashval = cpu_to_be32(blk1->hashval); in xfs_da3_root_split() 751 btree[0].before = cpu_to_be32(blk1->blkno); in xfs_da3_root_split() 752 btree[1].hashval = cpu_to_be32(blk2->hashval); in xfs_da3_root_split() 753 btree[1].before = cpu_to_be32(blk2->blkno); in xfs_da3_root_split() [all …]
|
/linux-6.6.21/Documentation/admin-guide/device-mapper/ |
D | persistent-data.rst | 14 - Another btree-based caching target posted to dm-devel 72 dm-btree.[hc] 73 dm-btree-remove.c 74 dm-btree-spine.c 75 dm-btree-internal.h 77 Currently there is only one data structure, a hierarchical btree. 81 The btree is 'hierarchical' in that you can define it to be composed 83 thin-provisioning target uses a btree with two levels of nesting.
|
/linux-6.6.21/drivers/md/persistent-data/ |
D | Makefile | 11 dm-btree.o \ 12 dm-btree-remove.o \ 13 dm-btree-spine.o
|
/linux-6.6.21/include/trace/events/ |
D | bcache.h | 64 TP_PROTO(struct btree *b), 258 TP_PROTO(struct btree *b), 263 TP_PROTO(struct btree *b), 283 TP_PROTO(struct btree *b), 293 TP_PROTO(struct btree *b), 333 TP_PROTO(struct btree *b, struct bkey *k, unsigned op, unsigned status), 366 TP_PROTO(struct btree *b, unsigned keys), 383 TP_PROTO(struct btree *b, unsigned keys), 388 TP_PROTO(struct btree *b, unsigned keys), 393 TP_PROTO(struct btree *b),
|
/linux-6.6.21/rust/alloc/collections/ |
D | mod.rs | 10 mod btree; module 21 pub use super::btree::map::*; 29 pub use super::btree::set::*;
|
/linux-6.6.21/fs/xfs/ |
D | xfs_attr_list.c | 208 struct xfs_da_node_entry *btree; in xfs_attr_node_list_lookup() local 251 btree = nodehdr.btree; in xfs_attr_node_list_lookup() 252 for (i = 0; i < nodehdr.count; btree++, i++) { in xfs_attr_node_list_lookup() 253 if (cursor->hashval <= be32_to_cpu(btree->hashval)) { in xfs_attr_node_list_lookup() 254 cursor->blkno = be32_to_cpu(btree->before); in xfs_attr_node_list_lookup() 256 btree); in xfs_attr_node_list_lookup()
|
D | xfs_attr_inactive.c | 159 child_fsb = be32_to_cpu(ichdr.btree[0].before); in xfs_attr3_node_inactive() 231 child_fsb = be32_to_cpu(phdr.btree[i + 1].before); in xfs_attr3_node_inactive()
|
/linux-6.6.21/fs/befs/ |
D | ChangeLog | 27 * Did the string comparison really right this time (btree.c) [WD] 30 a pointer value. (btree.c) [WD] 38 keys within btree nodes, rather than the linear search we were using 39 before. (btree.c) [Sergey S. Kostyliov <rathamahata@php4.ru>] 56 (btree.c) [WD] 105 * Removed notion of btree handle from btree.c. It was unnecessary, as the 128 (btree.c) [WD] 133 seekleaf() in btree.c [WD] 148 (datastream.c, btree.c super.c inode.c) [WD] 253 * Fix bug with reading an empty directory. (btree.c and dir.c) [all …]
|
D | Makefile | 8 befs-objs := datastream.o btree.o super.o inode.o debug.o io.o linuxvfs.o
|
/linux-6.6.21/Documentation/filesystems/ |
D | xfs-online-fsck-design.rst | 275 If corruption is found in the inode header or inode btree and ``xfs_scrub`` 992 Second, the different ondisk storage format of the reverse mapping btree 1032 is this an attribute fork extent? A file mapping btree extent? Or an 1061 btree block requires locking the file and searching the entire btree to 1168 Btree records spanning an interval of the btree keyspace are checked for 1207 The XFS btree code has keyspace scanning functions that online fsck uses to 1211 For the reverse mapping btree, it is possible to mask parts of the key for the 1213 btree contains records mapping a certain extent of physical space without the 1228 - Do node pointers within the btree point to valid block addresses for the type 1229 of btree? [all …]
|
/linux-6.6.21/fs/xfs/scrub/ |
D | dabtree.c | 92 return hdr.btree + blk->index; in xchk_da_btree_node_entry() 319 struct xfs_da_node_entry *btree; in xchk_da_btree_block() local 422 btree = nodehdr.btree; in xchk_da_btree_block() 424 blk->hashval = be32_to_cpu(btree[*pmaxrecs - 1].hashval); in xchk_da_btree_block()
|
/linux-6.6.21/Documentation/admin-guide/ |
D | bcache.rst | 18 in erase block sized buckets, and it uses a hybrid btree/log to track cached 388 the way cache coherency is handled for cache misses. If a btree node is full, 393 cause the btree node to be split, and you need almost no write traffic for 394 this to not show up enough to be noticeable (especially since bcache's btree 517 Average data per key in the btree. 526 Amount of memory currently used by the btree cache 560 Percentage of the root btree node in use. If this gets too high the node 568 Depth of the btree (A single node btree has depth 0). 579 duration: garbage collection, btree read, btree node sorts and btree splits. 585 Total nodes in the btree. [all …]
|
/linux-6.6.21/fs/hfs/ |
D | Makefile | 8 hfs-objs := bitmap.o bfind.o bnode.o brec.o btree.o \
|
/linux-6.6.21/fs/hfsplus/ |
D | Makefile | 8 hfsplus-objs := super.o options.o inode.o ioctl.o extents.o catalog.o dir.o btree.o \
|