Lines Matching refs:tree

21 	struct hfs_btree *tree;  in hfs_btree_open()  local
27 tree = kzalloc(sizeof(*tree), GFP_KERNEL); in hfs_btree_open()
28 if (!tree) in hfs_btree_open()
31 mutex_init(&tree->tree_lock); in hfs_btree_open()
32 spin_lock_init(&tree->hash_lock); in hfs_btree_open()
34 tree->sb = sb; in hfs_btree_open()
35 tree->cnid = id; in hfs_btree_open()
36 tree->keycmp = keycmp; in hfs_btree_open()
38 tree->inode = iget_locked(sb, id); in hfs_btree_open()
39 if (!tree->inode) in hfs_btree_open()
41 BUG_ON(!(tree->inode->i_state & I_NEW)); in hfs_btree_open()
44 HFS_I(tree->inode)->flags = 0; in hfs_btree_open()
45 mutex_init(&HFS_I(tree->inode)->extents_lock); in hfs_btree_open()
48 hfs_inode_read_fork(tree->inode, mdb->drXTExtRec, mdb->drXTFlSize, in hfs_btree_open()
50 if (HFS_I(tree->inode)->alloc_blocks > in hfs_btree_open()
51 HFS_I(tree->inode)->first_blocks) { in hfs_btree_open()
53 unlock_new_inode(tree->inode); in hfs_btree_open()
57 tree->inode->i_mapping->a_ops = &hfs_btree_aops; in hfs_btree_open()
60 hfs_inode_read_fork(tree->inode, mdb->drCTExtRec, mdb->drCTFlSize, in hfs_btree_open()
63 if (!HFS_I(tree->inode)->first_blocks) { in hfs_btree_open()
65 unlock_new_inode(tree->inode); in hfs_btree_open()
69 tree->inode->i_mapping->a_ops = &hfs_btree_aops; in hfs_btree_open()
75 unlock_new_inode(tree->inode); in hfs_btree_open()
77 mapping = tree->inode->i_mapping; in hfs_btree_open()
85 tree->root = be32_to_cpu(head->root); in hfs_btree_open()
86 tree->leaf_count = be32_to_cpu(head->leaf_count); in hfs_btree_open()
87 tree->leaf_head = be32_to_cpu(head->leaf_head); in hfs_btree_open()
88 tree->leaf_tail = be32_to_cpu(head->leaf_tail); in hfs_btree_open()
89 tree->node_count = be32_to_cpu(head->node_count); in hfs_btree_open()
90 tree->free_nodes = be32_to_cpu(head->free_nodes); in hfs_btree_open()
91 tree->attributes = be32_to_cpu(head->attributes); in hfs_btree_open()
92 tree->node_size = be16_to_cpu(head->node_size); in hfs_btree_open()
93 tree->max_key_len = be16_to_cpu(head->max_key_len); in hfs_btree_open()
94 tree->depth = be16_to_cpu(head->depth); in hfs_btree_open()
96 size = tree->node_size; in hfs_btree_open()
99 if (!tree->node_count) in hfs_btree_open()
103 if (tree->max_key_len != HFS_MAX_EXT_KEYLEN) { in hfs_btree_open()
105 tree->max_key_len); in hfs_btree_open()
110 if (tree->max_key_len != HFS_MAX_CAT_KEYLEN) { in hfs_btree_open()
112 tree->max_key_len); in hfs_btree_open()
120 tree->node_size_shift = ffs(size) - 1; in hfs_btree_open()
121 tree->pages_per_bnode = (tree->node_size + PAGE_SIZE - 1) >> PAGE_SHIFT; in hfs_btree_open()
125 return tree; in hfs_btree_open()
131 tree->inode->i_mapping->a_ops = &hfs_aops; in hfs_btree_open()
132 iput(tree->inode); in hfs_btree_open()
134 kfree(tree); in hfs_btree_open()
139 void hfs_btree_close(struct hfs_btree *tree) in hfs_btree_close() argument
144 if (!tree) in hfs_btree_close()
148 while ((node = tree->node_hash[i])) { in hfs_btree_close()
149 tree->node_hash[i] = node->next_hash; in hfs_btree_close()
152 node->tree->cnid, node->this, in hfs_btree_close()
155 tree->node_hash_cnt--; in hfs_btree_close()
158 iput(tree->inode); in hfs_btree_close()
159 kfree(tree); in hfs_btree_close()
162 void hfs_btree_write(struct hfs_btree *tree) in hfs_btree_write() argument
168 node = hfs_bnode_find(tree, 0); in hfs_btree_write()
177 head->root = cpu_to_be32(tree->root); in hfs_btree_write()
178 head->leaf_count = cpu_to_be32(tree->leaf_count); in hfs_btree_write()
179 head->leaf_head = cpu_to_be32(tree->leaf_head); in hfs_btree_write()
180 head->leaf_tail = cpu_to_be32(tree->leaf_tail); in hfs_btree_write()
181 head->node_count = cpu_to_be32(tree->node_count); in hfs_btree_write()
182 head->free_nodes = cpu_to_be32(tree->free_nodes); in hfs_btree_write()
183 head->attributes = cpu_to_be32(tree->attributes); in hfs_btree_write()
184 head->depth = cpu_to_be16(tree->depth); in hfs_btree_write()
193 struct hfs_btree *tree = prev->tree; in hfs_bmap_new_bmap() local
198 node = hfs_bnode_create(tree, idx); in hfs_bmap_new_bmap()
202 if (!tree->free_nodes) in hfs_bmap_new_bmap()
204 tree->free_nodes--; in hfs_bmap_new_bmap()
211 hfs_bnode_clear(node, 0, tree->node_size); in hfs_bmap_new_bmap()
220 hfs_bnode_write_u16(node, tree->node_size - 2, 14); in hfs_bmap_new_bmap()
221 hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6); in hfs_bmap_new_bmap()
227 int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes) in hfs_bmap_reserve() argument
229 struct inode *inode = tree->inode; in hfs_bmap_reserve()
233 while (tree->free_nodes < rsvd_nodes) { in hfs_bmap_reserve()
239 HFS_SB(tree->sb)->alloc_blksz; in hfs_bmap_reserve()
241 tree->sb->s_blocksize_bits; in hfs_bmap_reserve()
243 count = inode->i_size >> tree->node_size_shift; in hfs_bmap_reserve()
244 tree->free_nodes += count - tree->node_count; in hfs_bmap_reserve()
245 tree->node_count = count; in hfs_bmap_reserve()
250 struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) in hfs_bmap_alloc() argument
261 res = hfs_bmap_reserve(tree, 1); in hfs_bmap_alloc()
266 node = hfs_bnode_find(tree, nidx); in hfs_bmap_alloc()
288 tree->free_nodes--; in hfs_bmap_alloc()
289 mark_inode_dirty(tree->inode); in hfs_bmap_alloc()
291 return hfs_bnode_create(tree, idx); in hfs_bmap_alloc()
309 next_node = hfs_bnode_find(tree, nidx); in hfs_bmap_alloc()
326 struct hfs_btree *tree; in hfs_bmap_free() local
333 tree = node->tree; in hfs_bmap_free()
335 node = hfs_bnode_find(tree, 0); in hfs_bmap_free()
352 node = hfs_bnode_find(tree, i); in hfs_bmap_free()
381 tree->free_nodes++; in hfs_bmap_free()
382 mark_inode_dirty(tree->inode); in hfs_bmap_free()