1 /*
2 * linux/fs/hfsplus/bnode.c
3 *
4 * Copyright (C) 2001
5 * Brad Boyer (flar@allandria.com)
6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
7 *
8 * Handle basic btree node operations
9 */
10
11 #include <linux/string.h>
12 #include <linux/slab.h>
13 #include <linux/pagemap.h>
14 #include <linux/fs.h>
15 #include <linux/swap.h>
16 #include <linux/version.h>
17 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
18 #include <linux/buffer_head.h>
19 #endif
20
21 #include "hfsplus_fs.h"
22 #include "hfsplus_raw.h"
23
24 #define REF_PAGES 0
25
26 int hfsplus_update_idx_rec(struct hfsplus_find_data *fd);
27
28 /* Get the given block associated with the tree owning node */
hfsplus_getblk(struct inode * inode,unsigned long n)29 struct buffer_head *hfsplus_getblk(struct inode *inode, unsigned long n)
30 {
31 struct super_block *sb;
32 struct buffer_head tmp_bh;
33
34 sb = inode->i_sb;
35 if (hfsplus_get_block(inode, n, &tmp_bh, 1)) {
36 printk("HFS+-fs: Failed to find block for B*Tree data\n");
37 return NULL;
38 }
39 return sb_bread(sb, tmp_bh.b_blocknr);
40 }
41
42 /* Copy a specified range of bytes from the raw data of a node */
hfsplus_bnode_readbytes(hfsplus_bnode * node,void * buf,unsigned long off,unsigned long len)43 void hfsplus_bnode_readbytes(hfsplus_bnode *node, void *buf,
44 unsigned long off, unsigned long len)
45 {
46 unsigned long l;
47 struct page **pagep;
48
49 off += node->page_offset;
50 pagep = node->page + (off >> PAGE_CACHE_SHIFT);
51 off &= ~PAGE_CACHE_MASK;
52
53 l = min(len, PAGE_CACHE_SIZE - off);
54 memcpy(buf, hfsplus_kmap(*pagep) + off, l);
55 hfsplus_kunmap(*pagep++);
56
57 while ((len -= l)) {
58 buf += l;
59 l = min(len, PAGE_CACHE_SIZE);
60 memcpy(buf, hfsplus_kmap(*pagep), l);
61 hfsplus_kunmap(*pagep++);
62 }
63 }
64
hfsplus_bnode_read_u16(hfsplus_bnode * node,unsigned long off)65 u16 hfsplus_bnode_read_u16(hfsplus_bnode *node, unsigned long off)
66 {
67 u16 data;
68 // optimize later...
69 hfsplus_bnode_readbytes(node, &data, off, 2);
70 return be16_to_cpu(data);
71 }
72
hfsplus_bnode_read_key(hfsplus_bnode * node,void * key,unsigned long off)73 void hfsplus_bnode_read_key(hfsplus_bnode *node, void *key, unsigned long off)
74 {
75 hfsplus_btree *tree;
76 unsigned long key_len;
77
78 tree = node->tree;
79 if (node->kind == HFSPLUS_NODE_LEAF ||
80 tree->attributes & HFSPLUS_TREE_VAR_NDXKEY_SIZE)
81 key_len = hfsplus_bnode_read_u16(node, off) + 2;
82 else
83 key_len = tree->max_key_len + 2;
84
85 hfsplus_bnode_readbytes(node, key, off, key_len);
86 }
87
hfsplus_bnode_writebytes(hfsplus_bnode * node,void * buf,unsigned long off,unsigned long len)88 void hfsplus_bnode_writebytes(hfsplus_bnode *node, void *buf,
89 unsigned long off, unsigned long len)
90 {
91 unsigned long l;
92 struct page **pagep;
93
94 off += node->page_offset;
95 pagep = node->page + (off >> PAGE_CACHE_SHIFT);
96 off &= ~PAGE_CACHE_MASK;
97
98 l = min(len, PAGE_CACHE_SIZE - off);
99 memcpy(hfsplus_kmap(*pagep) + off, buf, l);
100 set_page_dirty(*pagep);
101 hfsplus_kunmap(*pagep++);
102
103 while ((len -= l)) {
104 buf += l;
105 l = min(len, PAGE_CACHE_SIZE);
106 memcpy(hfsplus_kmap(*pagep), buf, l);
107 set_page_dirty(*pagep);
108 hfsplus_kunmap(*pagep++);
109 }
110 }
111
hfsplus_bnode_write_u16(hfsplus_bnode * node,unsigned long off,u16 data)112 void hfsplus_bnode_write_u16(hfsplus_bnode *node, unsigned long off, u16 data)
113 {
114 data = cpu_to_be16(data);
115 // optimize later...
116 hfsplus_bnode_writebytes(node, &data, off, 2);
117 }
118
hfsplus_bnode_copybytes(hfsplus_bnode * dst_node,unsigned long dst,hfsplus_bnode * src_node,unsigned long src,unsigned long len)119 void hfsplus_bnode_copybytes(hfsplus_bnode *dst_node, unsigned long dst,
120 hfsplus_bnode *src_node, unsigned long src, unsigned long len)
121 {
122 struct hfsplus_btree *tree;
123 struct page **src_page, **dst_page;
124 unsigned long l;
125
126 dprint(DBG_BNODE_MOD, "copybytes: %lu,%lu,%lu\n", dst, src, len);
127 if (!len)
128 return;
129 tree = src_node->tree;
130 src += src_node->page_offset;
131 dst += dst_node->page_offset;
132 src_page = src_node->page + (src >> PAGE_CACHE_SHIFT);
133 src &= ~PAGE_CACHE_MASK;
134 dst_page = dst_node->page + (dst >> PAGE_CACHE_SHIFT);
135 dst &= ~PAGE_CACHE_MASK;
136
137 if (src == dst) {
138 l = min(len, PAGE_CACHE_SIZE - src);
139 memcpy(hfsplus_kmap(*dst_page) + src, hfsplus_kmap(*src_page) + src, l);
140 hfsplus_kunmap(*src_page++);
141 set_page_dirty(*dst_page);
142 hfsplus_kunmap(*dst_page++);
143
144 while ((len -= l)) {
145 l = min(len, PAGE_CACHE_SIZE);
146 memcpy(hfsplus_kmap(*dst_page), hfsplus_kmap(*src_page), l);
147 hfsplus_kunmap(*src_page++);
148 set_page_dirty(*dst_page);
149 hfsplus_kunmap(*dst_page++);
150 }
151 } else {
152 void *src_ptr, *dst_ptr;
153
154 do {
155 src_ptr = hfsplus_kmap(*src_page) + src;
156 dst_ptr = hfsplus_kmap(*dst_page) + dst;
157 if (PAGE_CACHE_SIZE - src < PAGE_CACHE_SIZE - dst) {
158 l = PAGE_CACHE_SIZE - src;
159 src = 0;
160 dst += l;
161 } else {
162 l = PAGE_CACHE_SIZE - dst;
163 src += l;
164 dst = 0;
165 }
166 l = min(len, l);
167 memcpy(dst_ptr, src_ptr, l);
168 hfsplus_kunmap(*src_page);
169 set_page_dirty(*dst_page);
170 hfsplus_kunmap(*dst_page);
171 if (!dst)
172 dst_page++;
173 else
174 src_page++;
175 } while ((len -= l));
176 }
177 }
178
hfsplus_bnode_movebytes(hfsplus_bnode * node,unsigned long dst,unsigned long src,unsigned long len)179 void hfsplus_bnode_movebytes(hfsplus_bnode *node, unsigned long dst,
180 unsigned long src, unsigned long len)
181 {
182 struct page **src_page, **dst_page;
183 unsigned long l;
184
185 dprint(DBG_BNODE_MOD, "movebytes: %lu,%lu,%lu\n", dst, src, len);
186 if (!len)
187 return;
188 src += node->page_offset;
189 dst += node->page_offset;
190 if (dst > src) {
191 src += len - 1;
192 src_page = node->page + (src >> PAGE_CACHE_SHIFT);
193 src = (src & ~PAGE_CACHE_MASK) + 1;
194 dst += len - 1;
195 dst_page = node->page + (dst >> PAGE_CACHE_SHIFT);
196 dst = (dst & ~PAGE_CACHE_MASK) + 1;
197
198 if (src == dst) {
199 while (src < len) {
200 memmove(hfsplus_kmap(*dst_page), hfsplus_kmap(*src_page), src);
201 hfsplus_kunmap(*src_page--);
202 set_page_dirty(*dst_page);
203 hfsplus_kunmap(*dst_page--);
204 len -= src;
205 src = PAGE_CACHE_SIZE;
206 }
207 src -= len;
208 memmove(hfsplus_kmap(*dst_page) + src, hfsplus_kmap(*src_page) + src, len);
209 hfsplus_kunmap(*src_page);
210 set_page_dirty(*dst_page);
211 hfsplus_kunmap(*dst_page);
212 } else {
213 void *src_ptr, *dst_ptr;
214
215 do {
216 src_ptr = hfsplus_kmap(*src_page) + src;
217 dst_ptr = hfsplus_kmap(*dst_page) + dst;
218 if (src < dst) {
219 l = src;
220 src = PAGE_CACHE_SIZE;
221 dst -= l;
222 } else {
223 l = dst;
224 src -= l;
225 dst = PAGE_CACHE_SIZE;
226 }
227 l = min(len, l);
228 memmove(dst_ptr - l, src_ptr - l, l);
229 hfsplus_kunmap(*src_page);
230 set_page_dirty(*dst_page);
231 hfsplus_kunmap(*dst_page);
232 if (dst == PAGE_CACHE_SIZE)
233 dst_page--;
234 else
235 src_page--;
236 } while ((len -= l));
237 }
238 } else {
239 src_page = node->page + (src >> PAGE_CACHE_SHIFT);
240 src &= ~PAGE_CACHE_MASK;
241 dst_page = node->page + (dst >> PAGE_CACHE_SHIFT);
242 dst &= ~PAGE_CACHE_MASK;
243
244 if (src == dst) {
245 l = min(len, PAGE_CACHE_SIZE - src);
246 memmove(hfsplus_kmap(*dst_page) + src, hfsplus_kmap(*src_page) + src, l);
247 hfsplus_kunmap(*src_page++);
248 set_page_dirty(*dst_page);
249 hfsplus_kunmap(*dst_page++);
250
251 while ((len -= l)) {
252 l = min(len, PAGE_CACHE_SIZE);
253 memmove(hfsplus_kmap(*dst_page), hfsplus_kmap(*src_page), l);
254 hfsplus_kunmap(*src_page++);
255 set_page_dirty(*dst_page);
256 hfsplus_kunmap(*dst_page++);
257 }
258 } else {
259 void *src_ptr, *dst_ptr;
260
261 do {
262 src_ptr = hfsplus_kmap(*src_page) + src;
263 dst_ptr = hfsplus_kmap(*dst_page) + dst;
264 if (PAGE_CACHE_SIZE - src < PAGE_CACHE_SIZE - dst) {
265 l = PAGE_CACHE_SIZE - src;
266 src = 0;
267 dst += l;
268 } else {
269 l = PAGE_CACHE_SIZE - dst;
270 src += l;
271 dst = 0;
272 }
273 l = min(len, l);
274 memmove(dst_ptr, src_ptr, l);
275 hfsplus_kunmap(*src_page);
276 set_page_dirty(*dst_page);
277 hfsplus_kunmap(*dst_page);
278 if (!dst)
279 dst_page++;
280 else
281 src_page++;
282 } while ((len -= l));
283 }
284 }
285 }
286
hfsplus_bnode_dump(hfsplus_bnode * node)287 void hfsplus_bnode_dump(hfsplus_bnode *node)
288 {
289 hfsplus_btree_node_desc desc;
290 u32 cnid;
291 int i, off, key_off;
292
293 dprint(DBG_BNODE_MOD, "bnode: %d\n", node->this);
294 hfsplus_bnode_readbytes(node, &desc, 0, sizeof(desc));
295 dprint(DBG_BNODE_MOD, "%d, %d, %d, %d, %d\n",
296 be32_to_cpu(desc.next), be32_to_cpu(desc.prev),
297 desc.kind, desc.height, be16_to_cpu(desc.num_rec));
298
299 off = node->tree->node_size - 2;
300 for (i = be16_to_cpu(desc.num_rec); i >= 0; off -= 2, i--) {
301 key_off = hfsplus_bnode_read_u16(node, off);
302 dprint(DBG_BNODE_MOD, " %d", key_off);
303 if (i && node->kind == HFSPLUS_NODE_NDX) {
304 int tmp;
305
306 tmp = hfsplus_bnode_read_u16(node, key_off);
307 dprint(DBG_BNODE_MOD, " (%d", tmp);
308 hfsplus_bnode_readbytes(node, &cnid, key_off + 2 + tmp, 4);
309 dprint(DBG_BNODE_MOD, ",%d)", be32_to_cpu(cnid));
310 }
311 }
312 dprint(DBG_BNODE_MOD, "\n");
313 }
314
hfsplus_btree_add_level(hfsplus_btree * tree)315 int hfsplus_btree_add_level(hfsplus_btree *tree)
316 {
317 hfsplus_bnode *node, *new_node;
318 hfsplus_btree_node_desc node_desc;
319 int key_size, rec;
320 u32 cnid;
321
322 node = NULL;
323 if (tree->root)
324 node = hfsplus_find_bnode(tree, tree->root);
325 new_node = hfsplus_btree_alloc_node(tree);
326 if (IS_ERR(new_node))
327 return PTR_ERR(new_node);
328
329 tree->root = new_node->this;
330 if (!tree->depth) {
331 tree->leaf_head = tree->leaf_tail = new_node->this;
332 new_node->kind = HFSPLUS_NODE_LEAF;
333 new_node->num_recs = 0;
334 } else {
335 new_node->kind = HFSPLUS_NODE_NDX;
336 new_node->num_recs = 1;
337 }
338 new_node->parent = 0;
339 new_node->next = 0;
340 new_node->prev = 0;
341 new_node->height = ++tree->depth;
342
343 node_desc.next = cpu_to_be32(new_node->next);
344 node_desc.prev = cpu_to_be32(new_node->prev);
345 node_desc.kind = new_node->kind;
346 node_desc.height = new_node->height;
347 node_desc.num_rec = cpu_to_be16(new_node->num_recs);
348 node_desc.reserved = 0;
349 hfsplus_bnode_writebytes(new_node, &node_desc, 0, sizeof(node_desc));
350
351 rec = tree->node_size - 2;
352 hfsplus_bnode_write_u16(new_node, rec, 14);
353
354 if (node) {
355 /* insert old root idx into new root */
356 node->parent = tree->root;
357 key_size = hfsplus_bnode_read_u16(node, 14) + 2;
358 // key_size if index node
359 hfsplus_bnode_copybytes(new_node, 14, node, 14, key_size);
360 cnid = cpu_to_be32(node->this);
361 hfsplus_bnode_writebytes(new_node, &cnid, 14 + key_size, 4);
362
363 rec -= 2;
364 hfsplus_bnode_write_u16(new_node, rec, 14 + key_size + 4);
365
366 hfsplus_put_bnode(node);
367 }
368 hfsplus_put_bnode(new_node);
369 mark_inode_dirty(tree->inode);
370
371 return 0;
372 }
373
hfsplus_bnode_split(struct hfsplus_find_data * fd)374 hfsplus_bnode *hfsplus_bnode_split(struct hfsplus_find_data *fd)
375 {
376 hfsplus_btree *tree;
377 hfsplus_bnode *node, *new_node;
378 hfsplus_btree_node_desc node_desc;
379 int num_recs, new_rec_off, new_off, old_rec_off;
380 int data_start, data_end, size;
381
382 tree = fd->tree;
383 node = fd->bnode;
384 new_node = hfsplus_btree_alloc_node(tree);
385 if (IS_ERR(new_node))
386 return new_node;
387 hfsplus_get_bnode(node);
388 dprint(DBG_BNODE_MOD, "split_nodes: %d - %d - %d\n",
389 node->this, new_node->this, node->next);
390 new_node->next = node->next;
391 new_node->prev = node->this;
392 new_node->parent = node->parent;
393 new_node->kind = node->kind;
394 new_node->height = node->height;
395
396 size = tree->node_size / 2;
397 old_rec_off = tree->node_size - 4;
398 num_recs = 1;
399 for (;;) {
400 data_start = hfsplus_bnode_read_u16(node, old_rec_off);
401 if (data_start > size)
402 break;
403 old_rec_off -= 2;
404 num_recs++;
405 if (num_recs < node->num_recs)
406 continue;
407 /* panic? */
408 hfsplus_put_bnode(node);
409 hfsplus_put_bnode(new_node);
410 return NULL;
411 }
412
413 if (fd->record + 1 < num_recs) {
414 /* new record is in the lower half,
415 * so leave some more space there
416 */
417 old_rec_off += 2;
418 num_recs--;
419 data_start = hfsplus_bnode_read_u16(node, old_rec_off);
420 } else {
421 hfsplus_put_bnode(node);
422 hfsplus_get_bnode(new_node);
423 fd->bnode = new_node;
424 fd->record -= num_recs;
425 fd->keyoffset -= data_start;
426 fd->entryoffset -= data_start;
427 }
428 new_node->num_recs = node->num_recs - num_recs;
429 node->num_recs = num_recs;
430
431 new_rec_off = tree->node_size - 2;
432 new_off = 14;
433 size = data_start - new_off;
434 num_recs = new_node->num_recs;
435 data_end = data_start;
436 while (num_recs) {
437 hfsplus_bnode_write_u16(new_node, new_rec_off, new_off);
438 old_rec_off -= 2;
439 new_rec_off -= 2;
440 data_end = hfsplus_bnode_read_u16(node, old_rec_off);
441 new_off = data_end - size;
442 num_recs--;
443 }
444 hfsplus_bnode_write_u16(new_node, new_rec_off, new_off);
445 hfsplus_bnode_copybytes(new_node, 14, node, data_start, data_end - data_start);
446
447 /* update new bnode header */
448 node_desc.next = cpu_to_be32(new_node->next);
449 node_desc.prev = cpu_to_be32(new_node->prev);
450 node_desc.kind = new_node->kind;
451 node_desc.height = new_node->height;
452 node_desc.num_rec = cpu_to_be16(new_node->num_recs);
453 node_desc.reserved = 0;
454 hfsplus_bnode_writebytes(new_node, &node_desc, 0, sizeof(node_desc));
455
456 /* update previous bnode header */
457 node->next = new_node->this;
458 hfsplus_bnode_readbytes(node, &node_desc, 0, sizeof(node_desc));
459 node_desc.next = cpu_to_be32(node->next);
460 node_desc.num_rec = cpu_to_be16(node->num_recs);
461 hfsplus_bnode_writebytes(node, &node_desc, 0, sizeof(node_desc));
462
463 /* update next bnode header */
464 if (new_node->next) {
465 hfsplus_bnode *next_node = hfsplus_find_bnode(tree, new_node->next);
466 next_node->prev = new_node->this;
467 hfsplus_bnode_readbytes(next_node, &node_desc, 0, sizeof(node_desc));
468 node_desc.prev = cpu_to_be32(next_node->prev);
469 hfsplus_bnode_writebytes(next_node, &node_desc, 0, sizeof(node_desc));
470 hfsplus_put_bnode(next_node);
471 } else if (node->this == tree->leaf_tail) {
472 /* if there is no next node, this might be the new tail */
473 tree->leaf_tail = new_node->this;
474 mark_inode_dirty(tree->inode);
475 }
476
477 hfsplus_bnode_dump(node);
478 hfsplus_bnode_dump(new_node);
479 hfsplus_put_bnode(node);
480
481 return new_node;
482 }
483
hfsplus_bnode_insert_rec(struct hfsplus_find_data * fd,void * entry,int entry_len)484 int hfsplus_bnode_insert_rec(struct hfsplus_find_data *fd, void *entry, int entry_len)
485 {
486 hfsplus_btree *tree;
487 hfsplus_bnode *node, *new_node;
488 int size, key_len, rec;
489 int data_off, end_off;
490 int idx_rec_off, data_rec_off, end_rec_off;
491 u32 cnid;
492
493 tree = fd->tree;
494 if (!fd->bnode) {
495 if (!tree->root)
496 hfsplus_btree_add_level(tree);
497 fd->bnode = hfsplus_find_bnode(tree, tree->leaf_head);
498 fd->record = -1;
499 }
500 new_node = NULL;
501 again:
502 /* new record idx and complete record size */
503 rec = fd->record + 1;
504 key_len = be16_to_cpu(fd->search_key->key_len) + 2;
505 size = key_len + entry_len;
506
507 node = fd->bnode;
508 hfsplus_bnode_dump(node);
509 /* get last offset */
510 end_rec_off = tree->node_size - (node->num_recs + 1) * 2;
511 end_off = hfsplus_bnode_read_u16(node, end_rec_off);
512 end_rec_off -= 2;
513 dprint(DBG_BNODE_MOD, "insert_rec: %d, %d, %d, %d\n", rec, size, end_off, end_rec_off);
514 if (size > end_rec_off - end_off) {
515 if (new_node)
516 panic("not enough room!\n");
517 new_node = hfsplus_bnode_split(fd);
518 if (IS_ERR(new_node))
519 return PTR_ERR(new_node);
520 goto again;
521 }
522 if (node->kind == HFSPLUS_NODE_LEAF) {
523 tree->leaf_count++;
524 mark_inode_dirty(tree->inode);
525 }
526 node->num_recs++;
527 /* write new last offset */
528 hfsplus_bnode_write_u16(node, offsetof(hfsplus_btree_node_desc, num_rec), node->num_recs);
529 hfsplus_bnode_write_u16(node, end_rec_off, end_off + size);
530 data_off = end_off;
531 data_rec_off = end_rec_off + 2;
532 idx_rec_off = tree->node_size - (rec + 1) * 2;
533 if (idx_rec_off == data_rec_off)
534 goto skip;
535 /* move all following entries */
536 do {
537 data_off = hfsplus_bnode_read_u16(node, data_rec_off + 2);
538 hfsplus_bnode_write_u16(node, data_rec_off, data_off + size);
539 data_rec_off += 2;
540 } while (data_rec_off < idx_rec_off);
541
542 /* move data away */
543 hfsplus_bnode_movebytes(node, data_off + size, data_off,
544 end_off - data_off);
545
546 skip:
547 hfsplus_bnode_writebytes(node, fd->search_key, data_off, key_len);
548 hfsplus_bnode_writebytes(node, entry, data_off + key_len, entry_len);
549 hfsplus_bnode_dump(node);
550
551 if (new_node) {
552 if (!rec && new_node != node)
553 hfsplus_update_idx_rec(fd);
554
555 hfsplus_put_bnode(fd->bnode);
556 if (!new_node->parent) {
557 hfsplus_btree_add_level(tree);
558 new_node->parent = tree->root;
559 }
560 fd->bnode = hfsplus_find_bnode(tree, new_node->parent);
561
562 /* create index data entry */
563 cnid = cpu_to_be32(new_node->this);
564 entry = &cnid;
565 entry_len = sizeof(cnid);
566
567 /* get index key */
568 hfsplus_bnode_read_key(new_node, fd->search_key, 14);
569 hfsplus_find_rec(fd->bnode, fd);
570
571 hfsplus_put_bnode(new_node);
572 new_node = NULL;
573 goto again;
574 }
575
576 if (!rec)
577 hfsplus_update_idx_rec(fd);
578
579 return 0;
580 }
581
hfsplus_update_idx_rec(struct hfsplus_find_data * fd)582 int hfsplus_update_idx_rec(struct hfsplus_find_data *fd)
583 {
584 hfsplus_btree *tree;
585 hfsplus_bnode *node, *new_node, *parent;
586 int newkeylen, diff;
587 int rec, rec_off, end_rec_off;
588 int start_off, end_off;
589
590 tree = fd->tree;
591 node = fd->bnode;
592 new_node = NULL;
593 if (!node->parent)
594 return 0;
595
596 again:
597 parent = hfsplus_find_bnode(tree, node->parent);
598 hfsplus_find_rec(parent, fd);
599 hfsplus_bnode_dump(parent);
600 rec = fd->record;
601
602 /* size difference between old and new key */
603 newkeylen = hfsplus_bnode_read_u16(node, 14) + 2;
604 dprint(DBG_BNODE_MOD, "update_rec: %d, %d, %d\n", rec, fd->keylength, newkeylen);
605
606 rec_off = tree->node_size - (rec + 2) * 2;
607 end_rec_off = tree->node_size - (parent->num_recs + 1) * 2;
608 diff = newkeylen - fd->keylength;
609 if (!diff)
610 goto skip;
611 if (diff > 0) {
612 end_off = hfsplus_bnode_read_u16(parent, end_rec_off);
613 if (end_rec_off - end_off < diff) {
614
615 printk("splitting index node...\n");
616 fd->bnode = parent;
617 new_node = hfsplus_bnode_split(fd);
618 if (IS_ERR(new_node))
619 return PTR_ERR(new_node);
620 parent = fd->bnode;
621 rec = fd->record;
622 rec_off = tree->node_size - (rec + 2) * 2;
623 end_rec_off = tree->node_size - (parent->num_recs + 1) * 2;
624 }
625 }
626
627 end_off = start_off = hfsplus_bnode_read_u16(parent, rec_off);
628 hfsplus_bnode_write_u16(parent, rec_off, start_off + diff);
629 start_off -= 4; /* move previous cnid too */
630
631 while (rec_off > end_rec_off) {
632 rec_off -= 2;
633 end_off = hfsplus_bnode_read_u16(parent, rec_off);
634 hfsplus_bnode_write_u16(parent, rec_off, end_off + diff);
635 }
636 hfsplus_bnode_movebytes(parent, start_off + diff, start_off,
637 end_off - start_off);
638 skip:
639 hfsplus_bnode_copybytes(parent, fd->keyoffset, node, 14, newkeylen);
640 hfsplus_bnode_dump(parent);
641
642 hfsplus_put_bnode(node);
643 node = parent;
644
645 if (new_node) {
646 u32 cnid;
647
648 fd->bnode = hfsplus_find_bnode(tree, new_node->parent);
649 /* create index key and entry */
650 hfsplus_bnode_read_key(new_node, fd->search_key, 14);
651 cnid = cpu_to_be32(new_node->this);
652
653 hfsplus_find_rec(fd->bnode, fd);
654 hfsplus_bnode_insert_rec(fd, &cnid, sizeof(cnid));
655 hfsplus_put_bnode(fd->bnode);
656 hfsplus_put_bnode(new_node);
657
658 if (!rec) {
659 if (new_node == node)
660 goto out;
661 /* restore search_key */
662 hfsplus_bnode_read_key(node, fd->search_key, 14);
663 }
664 }
665
666 if (!rec && node->parent)
667 goto again;
668 out:
669 fd->bnode = node;
670 return 0;
671 }
672
hfsplus_bnode_remove_rec(struct hfsplus_find_data * fd)673 int hfsplus_bnode_remove_rec(struct hfsplus_find_data *fd)
674 {
675 hfsplus_btree *tree;
676 hfsplus_bnode *node, *parent;
677 int end_off, rec_off, data_off, size;
678
679 tree = fd->tree;
680 node = fd->bnode;
681 again:
682 rec_off = tree->node_size - (fd->record + 2) * 2;
683 end_off = tree->node_size - (node->num_recs + 1) * 2;
684
685 if (node->kind == HFSPLUS_NODE_LEAF) {
686 tree->leaf_count--;
687 mark_inode_dirty(tree->inode);
688 }
689 hfsplus_bnode_dump(node);
690 dprint(DBG_BNODE_MOD, "remove_rec: %d, %d\n", fd->record, fd->keylength + fd->entrylength);
691 if (!--node->num_recs) {
692 hfsplus_btree_remove_node(node);
693 if (!node->parent)
694 return 0;
695 parent = hfsplus_find_bnode(tree, node->parent);
696 if (!parent)
697 return -EIO;
698 hfsplus_put_bnode(node);
699 node = fd->bnode = parent;
700
701 hfsplus_find_rec(node, fd);
702 goto again;
703 }
704 hfsplus_bnode_write_u16(node, offsetof(hfsplus_btree_node_desc, num_rec), node->num_recs);
705
706 if (rec_off == end_off)
707 goto skip;
708 size = fd->keylength + fd->entrylength;
709
710 do {
711 data_off = hfsplus_bnode_read_u16(node, rec_off);
712 hfsplus_bnode_write_u16(node, rec_off + 2, data_off - size);
713 rec_off -= 2;
714 } while (rec_off >= end_off);
715
716 /* fill hole */
717 hfsplus_bnode_movebytes(node, fd->keyoffset, fd->keyoffset + size,
718 data_off - fd->keyoffset - size);
719 skip:
720 hfsplus_bnode_dump(node);
721 if (!fd->record)
722 hfsplus_update_idx_rec(fd);
723 return 0;
724 }
725
726 /* Check for valid kind/height pairs , return 0 for bad pairings */
hfsplus_check_kh(hfsplus_btree * tree,u8 kind,u8 height)727 static int hfsplus_check_kh(hfsplus_btree *tree, u8 kind, u8 height)
728 {
729 if ((kind == HFSPLUS_NODE_HEAD) || (kind == HFSPLUS_NODE_MAP)) {
730 if (height != 0)
731 goto hk_error;
732 } else if (kind == HFSPLUS_NODE_LEAF) {
733 if (height != 1)
734 goto hk_error;
735 } else if (kind == HFSPLUS_NODE_NDX) {
736 if ((height <= 1) || (height > tree->depth))
737 goto hk_error;
738 } else {
739 printk("HFS+-fs: unknown node type in B*Tree\n");
740 return 0;
741 }
742 return 1;
743 hk_error:
744 printk("HFS+-fs: corrupt node height in B*Tree\n");
745 return 0;
746 }
747
hfsplus_bnode_hash(u32 num)748 static inline int hfsplus_bnode_hash(u32 num)
749 {
750 num = (num >> 16) + num;
751 num += num >> 8;
752 return num & (NODE_HASH_SIZE - 1);
753 }
754
__hfsplus_find_bnode(hfsplus_btree * tree,u32 cnid)755 hfsplus_bnode *__hfsplus_find_bnode(hfsplus_btree *tree, u32 cnid)
756 {
757 hfsplus_bnode *node;
758
759 if (cnid >= tree->node_count) {
760 printk("HFS+-fs: request for non-existent node %d in B*Tree\n", cnid);
761 return NULL;
762 }
763
764 for (node = tree->node_hash[hfsplus_bnode_hash(cnid)];
765 node; node = node->next_hash) {
766 if (node->this == cnid) {
767 return node;
768 }
769 }
770 return NULL;
771 }
772
__hfsplus_create_bnode(hfsplus_btree * tree,u32 cnid)773 hfsplus_bnode *__hfsplus_create_bnode(hfsplus_btree *tree, u32 cnid)
774 {
775 struct super_block *sb;
776 hfsplus_bnode *node, *node2;
777 struct address_space *mapping;
778 struct page *page;
779 int size, block, i, hash;
780 loff_t off;
781
782 if (cnid >= tree->node_count) {
783 printk("HFS+-fs: request for non-existent node %d in B*Tree\n", cnid);
784 return NULL;
785 }
786
787 sb = tree->inode->i_sb;
788 size = sizeof(hfsplus_bnode) + tree->pages_per_bnode *
789 sizeof(struct page *);
790 node = kmalloc(size, GFP_KERNEL);
791 if (!node)
792 return NULL;
793 memset(node, 0, size);
794 node->tree = tree;
795 node->this = cnid;
796 set_bit(HFSPLUS_BNODE_NEW, &node->flags);
797 atomic_set(&node->refcnt, 1);
798 dprint(DBG_BNODE_REFS, "new_node(%d:%d): 1\n",
799 node->tree->cnid, node->this);
800 init_waitqueue_head(&node->lock_wq);
801 spin_lock(&tree->hash_lock);
802 node2 = __hfsplus_find_bnode(tree, cnid);
803 if (!node2) {
804 hash = hfsplus_bnode_hash(cnid);
805 node->next_hash = tree->node_hash[hash];
806 tree->node_hash[hash] = node;
807 tree->node_hash_cnt++;
808 } else {
809 spin_unlock(&tree->hash_lock);
810 kfree(node);
811 wait_event(node2->lock_wq, !test_bit(HFSPLUS_BNODE_NEW, &node2->flags));
812 return node2;
813 }
814 spin_unlock(&tree->hash_lock);
815
816 mapping = tree->inode->i_mapping;
817 off = (loff_t)cnid * tree->node_size;
818 block = off >> PAGE_CACHE_SHIFT;
819 node->page_offset = off & ~PAGE_CACHE_MASK;
820 for (i = 0; i < tree->pages_per_bnode; i++) {
821 page = grab_cache_page(mapping, block++);
822 if (!page)
823 goto fail;
824 if (!PageUptodate(page)) {
825 if (mapping->a_ops->readpage(NULL, page))
826 goto fail;
827 wait_on_page_locked(page);
828 if (!PageUptodate(page))
829 goto fail;
830 } else
831 unlock_page(page);
832 #if !REF_PAGES
833 page_cache_release(page);
834 #endif
835 node->page[i] = page;
836 }
837
838 return node;
839 fail:
840 if (page)
841 page_cache_release(page);
842 set_bit(HFSPLUS_BNODE_ERROR, &node->flags);
843 return node;
844 }
845
__hfsplus_bnode_remove(hfsplus_bnode * node)846 void __hfsplus_bnode_remove(hfsplus_bnode *node)
847 {
848 hfsplus_bnode **p;
849
850 dprint(DBG_BNODE_REFS, "remove_node(%d:%d): %d\n",
851 node->tree->cnid, node->this, atomic_read(&node->refcnt));
852 for (p = &node->tree->node_hash[hfsplus_bnode_hash(node->this)];
853 *p && *p != node; p = &(*p)->next_hash)
854 ;
855 if (!*p)
856 BUG();
857 *p = node->next_hash;
858 node->tree->node_hash_cnt--;
859 }
860
861 /* Load a particular node out of a tree */
hfsplus_find_bnode(hfsplus_btree * tree,u32 num)862 hfsplus_bnode *hfsplus_find_bnode(hfsplus_btree *tree, u32 num)
863 {
864 hfsplus_bnode *node;
865 hfsplus_btree_node_desc *desc;
866 int i, rec_off, off, next_off;
867 int entry_size, key_size;
868
869 spin_lock(&tree->hash_lock);
870 node = __hfsplus_find_bnode(tree, num);
871 if (node) {
872 hfsplus_get_bnode(node);
873 spin_unlock(&tree->hash_lock);
874 wait_event(node->lock_wq, !test_bit(HFSPLUS_BNODE_NEW, &node->flags));
875 return node;
876 }
877 spin_unlock(&tree->hash_lock);
878 node = __hfsplus_create_bnode(tree, num);
879 if (!node)
880 return ERR_PTR(-ENOMEM);
881 if (!test_bit(HFSPLUS_BNODE_NEW, &node->flags))
882 return node;
883
884 desc = (hfsplus_btree_node_desc *)(hfsplus_kmap(node->page[0]) + node->page_offset);
885 node->prev = be32_to_cpu(desc->prev);
886 node->next = be32_to_cpu(desc->next);
887 node->num_recs = be16_to_cpu(desc->num_rec);
888 node->kind = desc->kind;
889 node->height = desc->height;
890
891 if (!hfsplus_check_kh(tree, desc->kind, desc->height)) {
892 hfsplus_kunmap(node->page[0]);
893 goto node_error;
894 }
895 hfsplus_kunmap(node->page[0]);
896
897 rec_off = tree->node_size - 2;
898 off = hfsplus_bnode_read_u16(node, rec_off);
899 if (off != sizeof(hfsplus_btree_node_desc))
900 goto node_error;
901 for (i = 1; i <= node->num_recs; off = next_off, i++) {
902 rec_off -= 2;
903 next_off = hfsplus_bnode_read_u16(node, rec_off);
904 if (next_off <= off ||
905 next_off > tree->node_size ||
906 next_off & 1)
907 goto node_error;
908 entry_size = next_off - off;
909 if (node->kind != HFSPLUS_NODE_NDX &&
910 node->kind != HFSPLUS_NODE_LEAF)
911 continue;
912 key_size = hfsplus_bnode_read_u16(node, off) + 2;
913 if (key_size >= entry_size || key_size & 1)
914 goto node_error;
915 }
916 clear_bit(HFSPLUS_BNODE_NEW, &node->flags);
917 wake_up(&node->lock_wq);
918 return node;
919
920 node_error:
921 set_bit(HFSPLUS_BNODE_ERROR, &node->flags);
922 clear_bit(HFSPLUS_BNODE_NEW, &node->flags);
923 wake_up(&node->lock_wq);
924 hfsplus_put_bnode(node);
925 return ERR_PTR(-EIO);
926 }
927
hfsplus_bnode_free(hfsplus_bnode * node)928 void hfsplus_bnode_free(hfsplus_bnode *node)
929 {
930 //int i;
931
932 //for (i = 0; i < node->tree->pages_per_bnode; i++)
933 // if (node->page[i])
934 // page_cache_release(node->page[i]);
935 kfree(node);
936 }
937
hfsplus_create_bnode(hfsplus_btree * tree,u32 num)938 hfsplus_bnode *hfsplus_create_bnode(hfsplus_btree *tree, u32 num)
939 {
940 hfsplus_bnode *node;
941 struct page **pagep;
942 int i;
943
944 spin_lock(&tree->hash_lock);
945 node = __hfsplus_find_bnode(tree, num);
946 spin_unlock(&tree->hash_lock);
947 if (node)
948 BUG();
949 node = __hfsplus_create_bnode(tree, num);
950 if (!node)
951 return ERR_PTR(-ENOMEM);
952
953 pagep = node->page;
954 memset(hfsplus_kmap(*pagep) + node->page_offset, 0,
955 min((int)PAGE_CACHE_SIZE, (int)tree->node_size));
956 set_page_dirty(*pagep);
957 hfsplus_kunmap(*pagep++);
958 for (i = 1; i < tree->pages_per_bnode; i++) {
959 memset(hfsplus_kmap(*pagep), 0, PAGE_CACHE_SIZE);
960 set_page_dirty(*pagep);
961 hfsplus_kunmap(*pagep++);
962 }
963 clear_bit(HFSPLUS_BNODE_NEW, &node->flags);
964 wake_up(&node->lock_wq);
965
966 return node;
967 }
968
hfsplus_get_bnode(hfsplus_bnode * node)969 void hfsplus_get_bnode(hfsplus_bnode *node)
970 {
971 if (node) {
972 atomic_inc(&node->refcnt);
973 #if REF_PAGES
974 {
975 int i;
976 for (i = 0; i < node->tree->pages_per_bnode; i++)
977 get_page(node->page[i]);
978 }
979 #endif
980 dprint(DBG_BNODE_REFS, "get_node(%d:%d): %d\n",
981 node->tree->cnid, node->this, atomic_read(&node->refcnt));
982 }
983 }
984
985 /* Dispose of resources used by a node */
hfsplus_put_bnode(hfsplus_bnode * node)986 void hfsplus_put_bnode(hfsplus_bnode *node)
987 {
988 if (node) {
989 struct hfsplus_btree *tree = node->tree;
990 int i;
991
992 dprint(DBG_BNODE_REFS, "put_node(%d:%d): %d\n",
993 node->tree->cnid, node->this, atomic_read(&node->refcnt));
994 if (!atomic_read(&node->refcnt))
995 BUG();
996 if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock)) {
997 #if REF_PAGES
998 for (i = 0; i < tree->pages_per_bnode; i++)
999 put_page(node->page[i]);
1000 #endif
1001 return;
1002 }
1003 for (i = 0; i < tree->pages_per_bnode; i++) {
1004 mark_page_accessed(node->page[i]);
1005 #if REF_PAGES
1006 put_page(node->page[i]);
1007 #endif
1008 }
1009
1010 if (test_bit(HFSPLUS_BNODE_DELETED, &node->flags)) {
1011 __hfsplus_bnode_remove(node);
1012 spin_unlock(&tree->hash_lock);
1013 hfsplus_btree_free_node(node);
1014 hfsplus_bnode_free(node);
1015 return;
1016 }
1017 spin_unlock(&tree->hash_lock);
1018 }
1019 }
1020
hfsplus_lock_bnode(hfsplus_bnode * node)1021 void hfsplus_lock_bnode(hfsplus_bnode *node)
1022 {
1023 wait_event(node->lock_wq, !test_and_set_bit(HFSPLUS_BNODE_LOCK, &node->flags));
1024 }
1025
hfsplus_unlock_bnode(hfsplus_bnode * node)1026 void hfsplus_unlock_bnode(hfsplus_bnode *node)
1027 {
1028 clear_bit(HFSPLUS_BNODE_LOCK, &node->flags);
1029 if (waitqueue_active(&node->lock_wq))
1030 wake_up(&node->lock_wq);
1031 }
1032