1 /*
2 * Copyright 1996, 1997, 1998 Hans Reiser, see reiserfs/README for licensing and copyright details
3 */
4
5 #include <linux/reiserfs_fs.h>
6
7 #include <linux/slab.h>
8 #include <linux/interrupt.h>
9 #include <linux/sched.h>
10 #include <linux/bug.h>
11 #include <linux/workqueue.h>
12 #include <asm/unaligned.h>
13 #include <linux/bitops.h>
14 #include <linux/proc_fs.h>
15 #include <linux/buffer_head.h>
16
17 /* the 32 bit compat definitions with int argument */
18 #define REISERFS_IOC32_UNPACK _IOW(0xCD, 1, int)
19 #define REISERFS_IOC32_GETFLAGS FS_IOC32_GETFLAGS
20 #define REISERFS_IOC32_SETFLAGS FS_IOC32_SETFLAGS
21 #define REISERFS_IOC32_GETVERSION FS_IOC32_GETVERSION
22 #define REISERFS_IOC32_SETVERSION FS_IOC32_SETVERSION
23
24 struct reiserfs_journal_list;
25
26 /** bitmasks for i_flags field in reiserfs-specific part of inode */
27 typedef enum {
28 /** this says what format of key do all items (but stat data) of
29 an object have. If this is set, that format is 3.6 otherwise
30 - 3.5 */
31 i_item_key_version_mask = 0x0001,
32 /** If this is unset, object has 3.5 stat data, otherwise, it has
33 3.6 stat data with 64bit size, 32bit nlink etc. */
34 i_stat_data_version_mask = 0x0002,
35 /** file might need tail packing on close */
36 i_pack_on_close_mask = 0x0004,
37 /** don't pack tail of file */
38 i_nopack_mask = 0x0008,
39 /** If those is set, "safe link" was created for this file during
40 truncate or unlink. Safe link is used to avoid leakage of disk
41 space on crash with some files open, but unlinked. */
42 i_link_saved_unlink_mask = 0x0010,
43 i_link_saved_truncate_mask = 0x0020,
44 i_has_xattr_dir = 0x0040,
45 i_data_log = 0x0080,
46 } reiserfs_inode_flags;
47
48 struct reiserfs_inode_info {
49 __u32 i_key[4]; /* key is still 4 32 bit integers */
50 /** transient inode flags that are never stored on disk. Bitmasks
51 for this field are defined above. */
52 __u32 i_flags;
53
54 __u32 i_first_direct_byte; // offset of first byte stored in direct item.
55
56 /* copy of persistent inode flags read from sd_attrs. */
57 __u32 i_attrs;
58
59 int i_prealloc_block; /* first unused block of a sequence of unused blocks */
60 int i_prealloc_count; /* length of that sequence */
61 struct list_head i_prealloc_list; /* per-transaction list of inodes which
62 * have preallocated blocks */
63
64 unsigned new_packing_locality:1; /* new_packig_locality is created; new blocks
65 * for the contents of this directory should be
66 * displaced */
67
68 /* we use these for fsync or O_SYNC to decide which transaction
69 ** needs to be committed in order for this inode to be properly
70 ** flushed */
71 unsigned int i_trans_id;
72 struct reiserfs_journal_list *i_jl;
73 atomic_t openers;
74 struct mutex tailpack;
75 #ifdef CONFIG_REISERFS_FS_XATTR
76 struct rw_semaphore i_xattr_sem;
77 #endif
78 struct inode vfs_inode;
79 };
80
81 typedef enum {
82 reiserfs_attrs_cleared = 0x00000001,
83 } reiserfs_super_block_flags;
84
85 /* struct reiserfs_super_block accessors/mutators
86 * since this is a disk structure, it will always be in
87 * little endian format. */
88 #define sb_block_count(sbp) (le32_to_cpu((sbp)->s_v1.s_block_count))
89 #define set_sb_block_count(sbp,v) ((sbp)->s_v1.s_block_count = cpu_to_le32(v))
90 #define sb_free_blocks(sbp) (le32_to_cpu((sbp)->s_v1.s_free_blocks))
91 #define set_sb_free_blocks(sbp,v) ((sbp)->s_v1.s_free_blocks = cpu_to_le32(v))
92 #define sb_root_block(sbp) (le32_to_cpu((sbp)->s_v1.s_root_block))
93 #define set_sb_root_block(sbp,v) ((sbp)->s_v1.s_root_block = cpu_to_le32(v))
94
95 #define sb_jp_journal_1st_block(sbp) \
96 (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_1st_block))
97 #define set_sb_jp_journal_1st_block(sbp,v) \
98 ((sbp)->s_v1.s_journal.jp_journal_1st_block = cpu_to_le32(v))
99 #define sb_jp_journal_dev(sbp) \
100 (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_dev))
101 #define set_sb_jp_journal_dev(sbp,v) \
102 ((sbp)->s_v1.s_journal.jp_journal_dev = cpu_to_le32(v))
103 #define sb_jp_journal_size(sbp) \
104 (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_size))
105 #define set_sb_jp_journal_size(sbp,v) \
106 ((sbp)->s_v1.s_journal.jp_journal_size = cpu_to_le32(v))
107 #define sb_jp_journal_trans_max(sbp) \
108 (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_trans_max))
109 #define set_sb_jp_journal_trans_max(sbp,v) \
110 ((sbp)->s_v1.s_journal.jp_journal_trans_max = cpu_to_le32(v))
111 #define sb_jp_journal_magic(sbp) \
112 (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_magic))
113 #define set_sb_jp_journal_magic(sbp,v) \
114 ((sbp)->s_v1.s_journal.jp_journal_magic = cpu_to_le32(v))
115 #define sb_jp_journal_max_batch(sbp) \
116 (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_max_batch))
117 #define set_sb_jp_journal_max_batch(sbp,v) \
118 ((sbp)->s_v1.s_journal.jp_journal_max_batch = cpu_to_le32(v))
119 #define sb_jp_jourmal_max_commit_age(sbp) \
120 (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_max_commit_age))
121 #define set_sb_jp_journal_max_commit_age(sbp,v) \
122 ((sbp)->s_v1.s_journal.jp_journal_max_commit_age = cpu_to_le32(v))
123
124 #define sb_blocksize(sbp) (le16_to_cpu((sbp)->s_v1.s_blocksize))
125 #define set_sb_blocksize(sbp,v) ((sbp)->s_v1.s_blocksize = cpu_to_le16(v))
126 #define sb_oid_maxsize(sbp) (le16_to_cpu((sbp)->s_v1.s_oid_maxsize))
127 #define set_sb_oid_maxsize(sbp,v) ((sbp)->s_v1.s_oid_maxsize = cpu_to_le16(v))
128 #define sb_oid_cursize(sbp) (le16_to_cpu((sbp)->s_v1.s_oid_cursize))
129 #define set_sb_oid_cursize(sbp,v) ((sbp)->s_v1.s_oid_cursize = cpu_to_le16(v))
130 #define sb_umount_state(sbp) (le16_to_cpu((sbp)->s_v1.s_umount_state))
131 #define set_sb_umount_state(sbp,v) ((sbp)->s_v1.s_umount_state = cpu_to_le16(v))
132 #define sb_fs_state(sbp) (le16_to_cpu((sbp)->s_v1.s_fs_state))
133 #define set_sb_fs_state(sbp,v) ((sbp)->s_v1.s_fs_state = cpu_to_le16(v))
134 #define sb_hash_function_code(sbp) \
135 (le32_to_cpu((sbp)->s_v1.s_hash_function_code))
136 #define set_sb_hash_function_code(sbp,v) \
137 ((sbp)->s_v1.s_hash_function_code = cpu_to_le32(v))
138 #define sb_tree_height(sbp) (le16_to_cpu((sbp)->s_v1.s_tree_height))
139 #define set_sb_tree_height(sbp,v) ((sbp)->s_v1.s_tree_height = cpu_to_le16(v))
140 #define sb_bmap_nr(sbp) (le16_to_cpu((sbp)->s_v1.s_bmap_nr))
141 #define set_sb_bmap_nr(sbp,v) ((sbp)->s_v1.s_bmap_nr = cpu_to_le16(v))
142 #define sb_version(sbp) (le16_to_cpu((sbp)->s_v1.s_version))
143 #define set_sb_version(sbp,v) ((sbp)->s_v1.s_version = cpu_to_le16(v))
144
145 #define sb_mnt_count(sbp) (le16_to_cpu((sbp)->s_mnt_count))
146 #define set_sb_mnt_count(sbp, v) ((sbp)->s_mnt_count = cpu_to_le16(v))
147
148 #define sb_reserved_for_journal(sbp) \
149 (le16_to_cpu((sbp)->s_v1.s_reserved_for_journal))
150 #define set_sb_reserved_for_journal(sbp,v) \
151 ((sbp)->s_v1.s_reserved_for_journal = cpu_to_le16(v))
152
153 /* LOGGING -- */
154
155 /* These all interelate for performance.
156 **
157 ** If the journal block count is smaller than n transactions, you lose speed.
158 ** I don't know what n is yet, I'm guessing 8-16.
159 **
160 ** typical transaction size depends on the application, how often fsync is
161 ** called, and how many metadata blocks you dirty in a 30 second period.
162 ** The more small files (<16k) you use, the larger your transactions will
163 ** be.
164 **
165 ** If your journal fills faster than dirty buffers get flushed to disk, it must flush them before allowing the journal
166 ** to wrap, which slows things down. If you need high speed meta data updates, the journal should be big enough
167 ** to prevent wrapping before dirty meta blocks get to disk.
168 **
169 ** If the batch max is smaller than the transaction max, you'll waste space at the end of the journal
170 ** because journal_end sets the next transaction to start at 0 if the next transaction has any chance of wrapping.
171 **
172 ** The large the batch max age, the better the speed, and the more meta data changes you'll lose after a crash.
173 **
174 */
175
176 /* don't mess with these for a while */
177 /* we have a node size define somewhere in reiserfs_fs.h. -Hans */
178 #define JOURNAL_BLOCK_SIZE 4096 /* BUG gotta get rid of this */
179 #define JOURNAL_MAX_CNODE 1500 /* max cnodes to allocate. */
180 #define JOURNAL_HASH_SIZE 8192
181 #define JOURNAL_NUM_BITMAPS 5 /* number of copies of the bitmaps to have floating. Must be >= 2 */
182
183 /* One of these for every block in every transaction
184 ** Each one is in two hash tables. First, a hash of the current transaction, and after journal_end, a
185 ** hash of all the in memory transactions.
186 ** next and prev are used by the current transaction (journal_hash).
187 ** hnext and hprev are used by journal_list_hash. If a block is in more than one transaction, the journal_list_hash
188 ** links it in multiple times. This allows flush_journal_list to remove just the cnode belonging
189 ** to a given transaction.
190 */
191 struct reiserfs_journal_cnode {
192 struct buffer_head *bh; /* real buffer head */
193 struct super_block *sb; /* dev of real buffer head */
194 __u32 blocknr; /* block number of real buffer head, == 0 when buffer on disk */
195 unsigned long state;
196 struct reiserfs_journal_list *jlist; /* journal list this cnode lives in */
197 struct reiserfs_journal_cnode *next; /* next in transaction list */
198 struct reiserfs_journal_cnode *prev; /* prev in transaction list */
199 struct reiserfs_journal_cnode *hprev; /* prev in hash list */
200 struct reiserfs_journal_cnode *hnext; /* next in hash list */
201 };
202
203 struct reiserfs_bitmap_node {
204 int id;
205 char *data;
206 struct list_head list;
207 };
208
209 struct reiserfs_list_bitmap {
210 struct reiserfs_journal_list *journal_list;
211 struct reiserfs_bitmap_node **bitmaps;
212 };
213
214 /*
215 ** one of these for each transaction. The most important part here is the j_realblock.
216 ** this list of cnodes is used to hash all the blocks in all the commits, to mark all the
217 ** real buffer heads dirty once all the commits hit the disk,
218 ** and to make sure every real block in a transaction is on disk before allowing the log area
219 ** to be overwritten */
220 struct reiserfs_journal_list {
221 unsigned long j_start;
222 unsigned long j_state;
223 unsigned long j_len;
224 atomic_t j_nonzerolen;
225 atomic_t j_commit_left;
226 atomic_t j_older_commits_done; /* all commits older than this on disk */
227 struct mutex j_commit_mutex;
228 unsigned int j_trans_id;
229 time_t j_timestamp;
230 struct reiserfs_list_bitmap *j_list_bitmap;
231 struct buffer_head *j_commit_bh; /* commit buffer head */
232 struct reiserfs_journal_cnode *j_realblock;
233 struct reiserfs_journal_cnode *j_freedlist; /* list of buffers that were freed during this trans. free each of these on flush */
234 /* time ordered list of all active transactions */
235 struct list_head j_list;
236
237 /* time ordered list of all transactions we haven't tried to flush yet */
238 struct list_head j_working_list;
239
240 /* list of tail conversion targets in need of flush before commit */
241 struct list_head j_tail_bh_list;
242 /* list of data=ordered buffers in need of flush before commit */
243 struct list_head j_bh_list;
244 int j_refcount;
245 };
246
247 struct reiserfs_journal {
248 struct buffer_head **j_ap_blocks; /* journal blocks on disk */
249 struct reiserfs_journal_cnode *j_last; /* newest journal block */
250 struct reiserfs_journal_cnode *j_first; /* oldest journal block. start here for traverse */
251
252 struct block_device *j_dev_bd;
253 fmode_t j_dev_mode;
254 int j_1st_reserved_block; /* first block on s_dev of reserved area journal */
255
256 unsigned long j_state;
257 unsigned int j_trans_id;
258 unsigned long j_mount_id;
259 unsigned long j_start; /* start of current waiting commit (index into j_ap_blocks) */
260 unsigned long j_len; /* length of current waiting commit */
261 unsigned long j_len_alloc; /* number of buffers requested by journal_begin() */
262 atomic_t j_wcount; /* count of writers for current commit */
263 unsigned long j_bcount; /* batch count. allows turning X transactions into 1 */
264 unsigned long j_first_unflushed_offset; /* first unflushed transactions offset */
265 unsigned j_last_flush_trans_id; /* last fully flushed journal timestamp */
266 struct buffer_head *j_header_bh;
267
268 time_t j_trans_start_time; /* time this transaction started */
269 struct mutex j_mutex;
270 struct mutex j_flush_mutex;
271 wait_queue_head_t j_join_wait; /* wait for current transaction to finish before starting new one */
272 atomic_t j_jlock; /* lock for j_join_wait */
273 int j_list_bitmap_index; /* number of next list bitmap to use */
274 int j_must_wait; /* no more journal begins allowed. MUST sleep on j_join_wait */
275 int j_next_full_flush; /* next journal_end will flush all journal list */
276 int j_next_async_flush; /* next journal_end will flush all async commits */
277
278 int j_cnode_used; /* number of cnodes on the used list */
279 int j_cnode_free; /* number of cnodes on the free list */
280
281 unsigned int j_trans_max; /* max number of blocks in a transaction. */
282 unsigned int j_max_batch; /* max number of blocks to batch into a trans */
283 unsigned int j_max_commit_age; /* in seconds, how old can an async commit be */
284 unsigned int j_max_trans_age; /* in seconds, how old can a transaction be */
285 unsigned int j_default_max_commit_age; /* the default for the max commit age */
286
287 struct reiserfs_journal_cnode *j_cnode_free_list;
288 struct reiserfs_journal_cnode *j_cnode_free_orig; /* orig pointer returned from vmalloc */
289
290 struct reiserfs_journal_list *j_current_jl;
291 int j_free_bitmap_nodes;
292 int j_used_bitmap_nodes;
293
294 int j_num_lists; /* total number of active transactions */
295 int j_num_work_lists; /* number that need attention from kreiserfsd */
296
297 /* debugging to make sure things are flushed in order */
298 unsigned int j_last_flush_id;
299
300 /* debugging to make sure things are committed in order */
301 unsigned int j_last_commit_id;
302
303 struct list_head j_bitmap_nodes;
304 struct list_head j_dirty_buffers;
305 spinlock_t j_dirty_buffers_lock; /* protects j_dirty_buffers */
306
307 /* list of all active transactions */
308 struct list_head j_journal_list;
309 /* lists that haven't been touched by writeback attempts */
310 struct list_head j_working_list;
311
312 struct reiserfs_list_bitmap j_list_bitmap[JOURNAL_NUM_BITMAPS]; /* array of bitmaps to record the deleted blocks */
313 struct reiserfs_journal_cnode *j_hash_table[JOURNAL_HASH_SIZE]; /* hash table for real buffer heads in current trans */
314 struct reiserfs_journal_cnode *j_list_hash_table[JOURNAL_HASH_SIZE]; /* hash table for all the real buffer heads in all
315 the transactions */
316 struct list_head j_prealloc_list; /* list of inodes which have preallocated blocks */
317 int j_persistent_trans;
318 unsigned long j_max_trans_size;
319 unsigned long j_max_batch_size;
320
321 int j_errno;
322
323 /* when flushing ordered buffers, throttle new ordered writers */
324 struct delayed_work j_work;
325 struct super_block *j_work_sb;
326 atomic_t j_async_throttle;
327 };
328
329 enum journal_state_bits {
330 J_WRITERS_BLOCKED = 1, /* set when new writers not allowed */
331 J_WRITERS_QUEUED, /* set when log is full due to too many writers */
332 J_ABORTED, /* set when log is aborted */
333 };
334
335 #define JOURNAL_DESC_MAGIC "ReIsErLB" /* ick. magic string to find desc blocks in the journal */
336
337 typedef __u32(*hashf_t) (const signed char *, int);
338
339 struct reiserfs_bitmap_info {
340 __u32 free_count;
341 };
342
343 struct proc_dir_entry;
344
345 #if defined( CONFIG_PROC_FS ) && defined( CONFIG_REISERFS_PROC_INFO )
346 typedef unsigned long int stat_cnt_t;
347 typedef struct reiserfs_proc_info_data {
348 spinlock_t lock;
349 int exiting;
350 int max_hash_collisions;
351
352 stat_cnt_t breads;
353 stat_cnt_t bread_miss;
354 stat_cnt_t search_by_key;
355 stat_cnt_t search_by_key_fs_changed;
356 stat_cnt_t search_by_key_restarted;
357
358 stat_cnt_t insert_item_restarted;
359 stat_cnt_t paste_into_item_restarted;
360 stat_cnt_t cut_from_item_restarted;
361 stat_cnt_t delete_solid_item_restarted;
362 stat_cnt_t delete_item_restarted;
363
364 stat_cnt_t leaked_oid;
365 stat_cnt_t leaves_removable;
366
367 /* balances per level. Use explicit 5 as MAX_HEIGHT is not visible yet. */
368 stat_cnt_t balance_at[5]; /* XXX */
369 /* sbk == search_by_key */
370 stat_cnt_t sbk_read_at[5]; /* XXX */
371 stat_cnt_t sbk_fs_changed[5];
372 stat_cnt_t sbk_restarted[5];
373 stat_cnt_t items_at[5]; /* XXX */
374 stat_cnt_t free_at[5]; /* XXX */
375 stat_cnt_t can_node_be_removed[5]; /* XXX */
376 long int lnum[5]; /* XXX */
377 long int rnum[5]; /* XXX */
378 long int lbytes[5]; /* XXX */
379 long int rbytes[5]; /* XXX */
380 stat_cnt_t get_neighbors[5];
381 stat_cnt_t get_neighbors_restart[5];
382 stat_cnt_t need_l_neighbor[5];
383 stat_cnt_t need_r_neighbor[5];
384
385 stat_cnt_t free_block;
386 struct __scan_bitmap_stats {
387 stat_cnt_t call;
388 stat_cnt_t wait;
389 stat_cnt_t bmap;
390 stat_cnt_t retry;
391 stat_cnt_t in_journal_hint;
392 stat_cnt_t in_journal_nohint;
393 stat_cnt_t stolen;
394 } scan_bitmap;
395 struct __journal_stats {
396 stat_cnt_t in_journal;
397 stat_cnt_t in_journal_bitmap;
398 stat_cnt_t in_journal_reusable;
399 stat_cnt_t lock_journal;
400 stat_cnt_t lock_journal_wait;
401 stat_cnt_t journal_being;
402 stat_cnt_t journal_relock_writers;
403 stat_cnt_t journal_relock_wcount;
404 stat_cnt_t mark_dirty;
405 stat_cnt_t mark_dirty_already;
406 stat_cnt_t mark_dirty_notjournal;
407 stat_cnt_t restore_prepared;
408 stat_cnt_t prepare;
409 stat_cnt_t prepare_retry;
410 } journal;
411 } reiserfs_proc_info_data_t;
412 #else
413 typedef struct reiserfs_proc_info_data {
414 } reiserfs_proc_info_data_t;
415 #endif
416
417 /* reiserfs union of in-core super block data */
418 struct reiserfs_sb_info {
419 struct buffer_head *s_sbh; /* Buffer containing the super block */
420 /* both the comment and the choice of
421 name are unclear for s_rs -Hans */
422 struct reiserfs_super_block *s_rs; /* Pointer to the super block in the buffer */
423 struct reiserfs_bitmap_info *s_ap_bitmap;
424 struct reiserfs_journal *s_journal; /* pointer to journal information */
425 unsigned short s_mount_state; /* reiserfs state (valid, invalid) */
426
427 /* Serialize writers access, replace the old bkl */
428 struct mutex lock;
429 /* Owner of the lock (can be recursive) */
430 struct task_struct *lock_owner;
431 /* Depth of the lock, start from -1 like the bkl */
432 int lock_depth;
433
434 /* Comment? -Hans */
435 void (*end_io_handler) (struct buffer_head *, int);
436 hashf_t s_hash_function; /* pointer to function which is used
437 to sort names in directory. Set on
438 mount */
439 unsigned long s_mount_opt; /* reiserfs's mount options are set
440 here (currently - NOTAIL, NOLOG,
441 REPLAYONLY) */
442
443 struct { /* This is a structure that describes block allocator options */
444 unsigned long bits; /* Bitfield for enable/disable kind of options */
445 unsigned long large_file_size; /* size started from which we consider file to be a large one(in blocks) */
446 int border; /* percentage of disk, border takes */
447 int preallocmin; /* Minimal file size (in blocks) starting from which we do preallocations */
448 int preallocsize; /* Number of blocks we try to prealloc when file
449 reaches preallocmin size (in blocks) or
450 prealloc_list is empty. */
451 } s_alloc_options;
452
453 /* Comment? -Hans */
454 wait_queue_head_t s_wait;
455 /* To be obsoleted soon by per buffer seals.. -Hans */
456 atomic_t s_generation_counter; // increased by one every time the
457 // tree gets re-balanced
458 unsigned long s_properties; /* File system properties. Currently holds
459 on-disk FS format */
460
461 /* session statistics */
462 int s_disk_reads;
463 int s_disk_writes;
464 int s_fix_nodes;
465 int s_do_balance;
466 int s_unneeded_left_neighbor;
467 int s_good_search_by_key_reada;
468 int s_bmaps;
469 int s_bmaps_without_search;
470 int s_direct2indirect;
471 int s_indirect2direct;
472 /* set up when it's ok for reiserfs_read_inode2() to read from
473 disk inode with nlink==0. Currently this is only used during
474 finish_unfinished() processing at mount time */
475 int s_is_unlinked_ok;
476 reiserfs_proc_info_data_t s_proc_info_data;
477 struct proc_dir_entry *procdir;
478 int reserved_blocks; /* amount of blocks reserved for further allocations */
479 spinlock_t bitmap_lock; /* this lock on now only used to protect reserved_blocks variable */
480 struct dentry *priv_root; /* root of /.reiserfs_priv */
481 struct dentry *xattr_root; /* root of /.reiserfs_priv/xattrs */
482 int j_errno;
483 #ifdef CONFIG_QUOTA
484 char *s_qf_names[MAXQUOTAS];
485 int s_jquota_fmt;
486 #endif
487 char *s_jdev; /* Stored jdev for mount option showing */
488 #ifdef CONFIG_REISERFS_CHECK
489
490 struct tree_balance *cur_tb; /*
491 * Detects whether more than one
492 * copy of tb exists per superblock
493 * as a means of checking whether
494 * do_balance is executing concurrently
495 * against another tree reader/writer
496 * on a same mount point.
497 */
498 #endif
499 };
500
501 /* Definitions of reiserfs on-disk properties: */
502 #define REISERFS_3_5 0
503 #define REISERFS_3_6 1
504 #define REISERFS_OLD_FORMAT 2
505
506 enum reiserfs_mount_options {
507 /* Mount options */
508 REISERFS_LARGETAIL, /* large tails will be created in a session */
509 REISERFS_SMALLTAIL, /* small (for files less than block size) tails will be created in a session */
510 REPLAYONLY, /* replay journal and return 0. Use by fsck */
511 REISERFS_CONVERT, /* -o conv: causes conversion of old
512 format super block to the new
513 format. If not specified - old
514 partition will be dealt with in a
515 manner of 3.5.x */
516
517 /* -o hash={tea, rupasov, r5, detect} is meant for properly mounting
518 ** reiserfs disks from 3.5.19 or earlier. 99% of the time, this option
519 ** is not required. If the normal autodection code can't determine which
520 ** hash to use (because both hashes had the same value for a file)
521 ** use this option to force a specific hash. It won't allow you to override
522 ** the existing hash on the FS, so if you have a tea hash disk, and mount
523 ** with -o hash=rupasov, the mount will fail.
524 */
525 FORCE_TEA_HASH, /* try to force tea hash on mount */
526 FORCE_RUPASOV_HASH, /* try to force rupasov hash on mount */
527 FORCE_R5_HASH, /* try to force rupasov hash on mount */
528 FORCE_HASH_DETECT, /* try to detect hash function on mount */
529
530 REISERFS_DATA_LOG,
531 REISERFS_DATA_ORDERED,
532 REISERFS_DATA_WRITEBACK,
533
534 /* used for testing experimental features, makes benchmarking new
535 features with and without more convenient, should never be used by
536 users in any code shipped to users (ideally) */
537
538 REISERFS_NO_BORDER,
539 REISERFS_NO_UNHASHED_RELOCATION,
540 REISERFS_HASHED_RELOCATION,
541 REISERFS_ATTRS,
542 REISERFS_XATTRS_USER,
543 REISERFS_POSIXACL,
544 REISERFS_EXPOSE_PRIVROOT,
545 REISERFS_BARRIER_NONE,
546 REISERFS_BARRIER_FLUSH,
547
548 /* Actions on error */
549 REISERFS_ERROR_PANIC,
550 REISERFS_ERROR_RO,
551 REISERFS_ERROR_CONTINUE,
552
553 REISERFS_USRQUOTA, /* User quota option specified */
554 REISERFS_GRPQUOTA, /* Group quota option specified */
555
556 REISERFS_TEST1,
557 REISERFS_TEST2,
558 REISERFS_TEST3,
559 REISERFS_TEST4,
560 REISERFS_UNSUPPORTED_OPT,
561 };
562
563 #define reiserfs_r5_hash(s) (REISERFS_SB(s)->s_mount_opt & (1 << FORCE_R5_HASH))
564 #define reiserfs_rupasov_hash(s) (REISERFS_SB(s)->s_mount_opt & (1 << FORCE_RUPASOV_HASH))
565 #define reiserfs_tea_hash(s) (REISERFS_SB(s)->s_mount_opt & (1 << FORCE_TEA_HASH))
566 #define reiserfs_hash_detect(s) (REISERFS_SB(s)->s_mount_opt & (1 << FORCE_HASH_DETECT))
567 #define reiserfs_no_border(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_NO_BORDER))
568 #define reiserfs_no_unhashed_relocation(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_NO_UNHASHED_RELOCATION))
569 #define reiserfs_hashed_relocation(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_HASHED_RELOCATION))
570 #define reiserfs_test4(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_TEST4))
571
572 #define have_large_tails(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_LARGETAIL))
573 #define have_small_tails(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_SMALLTAIL))
574 #define replay_only(s) (REISERFS_SB(s)->s_mount_opt & (1 << REPLAYONLY))
575 #define reiserfs_attrs(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_ATTRS))
576 #define old_format_only(s) (REISERFS_SB(s)->s_properties & (1 << REISERFS_3_5))
577 #define convert_reiserfs(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_CONVERT))
578 #define reiserfs_data_log(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_LOG))
579 #define reiserfs_data_ordered(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_ORDERED))
580 #define reiserfs_data_writeback(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_WRITEBACK))
581 #define reiserfs_xattrs_user(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_XATTRS_USER))
582 #define reiserfs_posixacl(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_POSIXACL))
583 #define reiserfs_expose_privroot(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_EXPOSE_PRIVROOT))
584 #define reiserfs_xattrs_optional(s) (reiserfs_xattrs_user(s) || reiserfs_posixacl(s))
585 #define reiserfs_barrier_none(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_BARRIER_NONE))
586 #define reiserfs_barrier_flush(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_BARRIER_FLUSH))
587
588 #define reiserfs_error_panic(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_ERROR_PANIC))
589 #define reiserfs_error_ro(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_ERROR_RO))
590
591 void reiserfs_file_buffer(struct buffer_head *bh, int list);
592 extern struct file_system_type reiserfs_fs_type;
593 int reiserfs_resize(struct super_block *, unsigned long);
594
595 #define CARRY_ON 0
596 #define SCHEDULE_OCCURRED 1
597
598 #define SB_BUFFER_WITH_SB(s) (REISERFS_SB(s)->s_sbh)
599 #define SB_JOURNAL(s) (REISERFS_SB(s)->s_journal)
600 #define SB_JOURNAL_1st_RESERVED_BLOCK(s) (SB_JOURNAL(s)->j_1st_reserved_block)
601 #define SB_JOURNAL_LEN_FREE(s) (SB_JOURNAL(s)->j_journal_len_free)
602 #define SB_AP_BITMAP(s) (REISERFS_SB(s)->s_ap_bitmap)
603
604 #define SB_DISK_JOURNAL_HEAD(s) (SB_JOURNAL(s)->j_header_bh->)
605
606 /* A safe version of the "bdevname", which returns the "s_id" field of
607 * a superblock or else "Null superblock" if the super block is NULL.
608 */
reiserfs_bdevname(struct super_block * s)609 static inline char *reiserfs_bdevname(struct super_block *s)
610 {
611 return (s == NULL) ? "Null superblock" : s->s_id;
612 }
613
614 #define reiserfs_is_journal_aborted(journal) (unlikely (__reiserfs_is_journal_aborted (journal)))
__reiserfs_is_journal_aborted(struct reiserfs_journal * journal)615 static inline int __reiserfs_is_journal_aborted(struct reiserfs_journal
616 *journal)
617 {
618 return test_bit(J_ABORTED, &journal->j_state);
619 }
620
621 /*
622 * Locking primitives. The write lock is a per superblock
623 * special mutex that has properties close to the Big Kernel Lock
624 * which was used in the previous locking scheme.
625 */
626 void reiserfs_write_lock(struct super_block *s);
627 void reiserfs_write_unlock(struct super_block *s);
628 int reiserfs_write_lock_once(struct super_block *s);
629 void reiserfs_write_unlock_once(struct super_block *s, int lock_depth);
630
631 #ifdef CONFIG_REISERFS_CHECK
632 void reiserfs_lock_check_recursive(struct super_block *s);
633 #else
reiserfs_lock_check_recursive(struct super_block * s)634 static inline void reiserfs_lock_check_recursive(struct super_block *s) { }
635 #endif
636
637 /*
638 * Several mutexes depend on the write lock.
639 * However sometimes we want to relax the write lock while we hold
640 * these mutexes, according to the release/reacquire on schedule()
641 * properties of the Bkl that were used.
642 * Reiserfs performances and locking were based on this scheme.
643 * Now that the write lock is a mutex and not the bkl anymore, doing so
644 * may result in a deadlock:
645 *
646 * A acquire write_lock
647 * A acquire j_commit_mutex
648 * A release write_lock and wait for something
649 * B acquire write_lock
650 * B can't acquire j_commit_mutex and sleep
651 * A can't acquire write lock anymore
652 * deadlock
653 *
654 * What we do here is avoiding such deadlock by playing the same game
655 * than the Bkl: if we can't acquire a mutex that depends on the write lock,
656 * we release the write lock, wait a bit and then retry.
657 *
658 * The mutexes concerned by this hack are:
659 * - The commit mutex of a journal list
660 * - The flush mutex
661 * - The journal lock
662 * - The inode mutex
663 */
reiserfs_mutex_lock_safe(struct mutex * m,struct super_block * s)664 static inline void reiserfs_mutex_lock_safe(struct mutex *m,
665 struct super_block *s)
666 {
667 reiserfs_lock_check_recursive(s);
668 reiserfs_write_unlock(s);
669 mutex_lock(m);
670 reiserfs_write_lock(s);
671 }
672
673 static inline void
reiserfs_mutex_lock_nested_safe(struct mutex * m,unsigned int subclass,struct super_block * s)674 reiserfs_mutex_lock_nested_safe(struct mutex *m, unsigned int subclass,
675 struct super_block *s)
676 {
677 reiserfs_lock_check_recursive(s);
678 reiserfs_write_unlock(s);
679 mutex_lock_nested(m, subclass);
680 reiserfs_write_lock(s);
681 }
682
683 static inline void
reiserfs_down_read_safe(struct rw_semaphore * sem,struct super_block * s)684 reiserfs_down_read_safe(struct rw_semaphore *sem, struct super_block *s)
685 {
686 reiserfs_lock_check_recursive(s);
687 reiserfs_write_unlock(s);
688 down_read(sem);
689 reiserfs_write_lock(s);
690 }
691
692 /*
693 * When we schedule, we usually want to also release the write lock,
694 * according to the previous bkl based locking scheme of reiserfs.
695 */
reiserfs_cond_resched(struct super_block * s)696 static inline void reiserfs_cond_resched(struct super_block *s)
697 {
698 if (need_resched()) {
699 reiserfs_write_unlock(s);
700 schedule();
701 reiserfs_write_lock(s);
702 }
703 }
704
705 struct fid;
706
707 /* in reading the #defines, it may help to understand that they employ
708 the following abbreviations:
709
710 B = Buffer
711 I = Item header
712 H = Height within the tree (should be changed to LEV)
713 N = Number of the item in the node
714 STAT = stat data
715 DEH = Directory Entry Header
716 EC = Entry Count
717 E = Entry number
718 UL = Unsigned Long
719 BLKH = BLocK Header
720 UNFM = UNForMatted node
721 DC = Disk Child
722 P = Path
723
724 These #defines are named by concatenating these abbreviations,
725 where first comes the arguments, and last comes the return value,
726 of the macro.
727
728 */
729
730 #define USE_INODE_GENERATION_COUNTER
731
732 #define REISERFS_PREALLOCATE
733 #define DISPLACE_NEW_PACKING_LOCALITIES
734 #define PREALLOCATION_SIZE 9
735
736 /* n must be power of 2 */
737 #define _ROUND_UP(x,n) (((x)+(n)-1u) & ~((n)-1u))
738
739 // to be ok for alpha and others we have to align structures to 8 byte
740 // boundary.
741 // FIXME: do not change 4 by anything else: there is code which relies on that
742 #define ROUND_UP(x) _ROUND_UP(x,8LL)
743
744 /* debug levels. Right now, CONFIG_REISERFS_CHECK means print all debug
745 ** messages.
746 */
747 #define REISERFS_DEBUG_CODE 5 /* extra messages to help find/debug errors */
748
749 void __reiserfs_warning(struct super_block *s, const char *id,
750 const char *func, const char *fmt, ...);
751 #define reiserfs_warning(s, id, fmt, args...) \
752 __reiserfs_warning(s, id, __func__, fmt, ##args)
753 /* assertions handling */
754
755 /** always check a condition and panic if it's false. */
756 #define __RASSERT(cond, scond, format, args...) \
757 do { \
758 if (!(cond)) \
759 reiserfs_panic(NULL, "assertion failure", "(" #cond ") at " \
760 __FILE__ ":%i:%s: " format "\n", \
761 in_interrupt() ? -1 : task_pid_nr(current), \
762 __LINE__, __func__ , ##args); \
763 } while (0)
764
765 #define RASSERT(cond, format, args...) __RASSERT(cond, #cond, format, ##args)
766
767 #if defined( CONFIG_REISERFS_CHECK )
768 #define RFALSE(cond, format, args...) __RASSERT(!(cond), "!(" #cond ")", format, ##args)
769 #else
770 #define RFALSE( cond, format, args... ) do {;} while( 0 )
771 #endif
772
773 #define CONSTF __attribute_const__
774 /*
775 * Disk Data Structures
776 */
777
778 /***************************************************************************/
779 /* SUPER BLOCK */
780 /***************************************************************************/
781
782 /*
783 * Structure of super block on disk, a version of which in RAM is often accessed as REISERFS_SB(s)->s_rs
784 * the version in RAM is part of a larger structure containing fields never written to disk.
785 */
786 #define UNSET_HASH 0 // read_super will guess about, what hash names
787 // in directories were sorted with
788 #define TEA_HASH 1
789 #define YURA_HASH 2
790 #define R5_HASH 3
791 #define DEFAULT_HASH R5_HASH
792
793 struct journal_params {
794 __le32 jp_journal_1st_block; /* where does journal start from on its
795 * device */
796 __le32 jp_journal_dev; /* journal device st_rdev */
797 __le32 jp_journal_size; /* size of the journal */
798 __le32 jp_journal_trans_max; /* max number of blocks in a transaction. */
799 __le32 jp_journal_magic; /* random value made on fs creation (this
800 * was sb_journal_block_count) */
801 __le32 jp_journal_max_batch; /* max number of blocks to batch into a
802 * trans */
803 __le32 jp_journal_max_commit_age; /* in seconds, how old can an async
804 * commit be */
805 __le32 jp_journal_max_trans_age; /* in seconds, how old can a transaction
806 * be */
807 };
808
809 /* this is the super from 3.5.X, where X >= 10 */
810 struct reiserfs_super_block_v1 {
811 __le32 s_block_count; /* blocks count */
812 __le32 s_free_blocks; /* free blocks count */
813 __le32 s_root_block; /* root block number */
814 struct journal_params s_journal;
815 __le16 s_blocksize; /* block size */
816 __le16 s_oid_maxsize; /* max size of object id array, see
817 * get_objectid() commentary */
818 __le16 s_oid_cursize; /* current size of object id array */
819 __le16 s_umount_state; /* this is set to 1 when filesystem was
820 * umounted, to 2 - when not */
821 char s_magic[10]; /* reiserfs magic string indicates that
822 * file system is reiserfs:
823 * "ReIsErFs" or "ReIsEr2Fs" or "ReIsEr3Fs" */
824 __le16 s_fs_state; /* it is set to used by fsck to mark which
825 * phase of rebuilding is done */
826 __le32 s_hash_function_code; /* indicate, what hash function is being use
827 * to sort names in a directory*/
828 __le16 s_tree_height; /* height of disk tree */
829 __le16 s_bmap_nr; /* amount of bitmap blocks needed to address
830 * each block of file system */
831 __le16 s_version; /* this field is only reliable on filesystem
832 * with non-standard journal */
833 __le16 s_reserved_for_journal; /* size in blocks of journal area on main
834 * device, we need to keep after
835 * making fs with non-standard journal */
836 } __attribute__ ((__packed__));
837
838 #define SB_SIZE_V1 (sizeof(struct reiserfs_super_block_v1))
839
840 /* this is the on disk super block */
841 struct reiserfs_super_block {
842 struct reiserfs_super_block_v1 s_v1;
843 __le32 s_inode_generation;
844 __le32 s_flags; /* Right now used only by inode-attributes, if enabled */
845 unsigned char s_uuid[16]; /* filesystem unique identifier */
846 unsigned char s_label[16]; /* filesystem volume label */
847 __le16 s_mnt_count; /* Count of mounts since last fsck */
848 __le16 s_max_mnt_count; /* Maximum mounts before check */
849 __le32 s_lastcheck; /* Timestamp of last fsck */
850 __le32 s_check_interval; /* Interval between checks */
851 char s_unused[76]; /* zero filled by mkreiserfs and
852 * reiserfs_convert_objectid_map_v1()
853 * so any additions must be updated
854 * there as well. */
855 } __attribute__ ((__packed__));
856
857 #define SB_SIZE (sizeof(struct reiserfs_super_block))
858
859 #define REISERFS_VERSION_1 0
860 #define REISERFS_VERSION_2 2
861
862 // on-disk super block fields converted to cpu form
863 #define SB_DISK_SUPER_BLOCK(s) (REISERFS_SB(s)->s_rs)
864 #define SB_V1_DISK_SUPER_BLOCK(s) (&(SB_DISK_SUPER_BLOCK(s)->s_v1))
865 #define SB_BLOCKSIZE(s) \
866 le32_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_blocksize))
867 #define SB_BLOCK_COUNT(s) \
868 le32_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_block_count))
869 #define SB_FREE_BLOCKS(s) \
870 le32_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_free_blocks))
871 #define SB_REISERFS_MAGIC(s) \
872 (SB_V1_DISK_SUPER_BLOCK(s)->s_magic)
873 #define SB_ROOT_BLOCK(s) \
874 le32_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_root_block))
875 #define SB_TREE_HEIGHT(s) \
876 le16_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_tree_height))
877 #define SB_REISERFS_STATE(s) \
878 le16_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_umount_state))
879 #define SB_VERSION(s) le16_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_version))
880 #define SB_BMAP_NR(s) le16_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_bmap_nr))
881
882 #define PUT_SB_BLOCK_COUNT(s, val) \
883 do { SB_V1_DISK_SUPER_BLOCK(s)->s_block_count = cpu_to_le32(val); } while (0)
884 #define PUT_SB_FREE_BLOCKS(s, val) \
885 do { SB_V1_DISK_SUPER_BLOCK(s)->s_free_blocks = cpu_to_le32(val); } while (0)
886 #define PUT_SB_ROOT_BLOCK(s, val) \
887 do { SB_V1_DISK_SUPER_BLOCK(s)->s_root_block = cpu_to_le32(val); } while (0)
888 #define PUT_SB_TREE_HEIGHT(s, val) \
889 do { SB_V1_DISK_SUPER_BLOCK(s)->s_tree_height = cpu_to_le16(val); } while (0)
890 #define PUT_SB_REISERFS_STATE(s, val) \
891 do { SB_V1_DISK_SUPER_BLOCK(s)->s_umount_state = cpu_to_le16(val); } while (0)
892 #define PUT_SB_VERSION(s, val) \
893 do { SB_V1_DISK_SUPER_BLOCK(s)->s_version = cpu_to_le16(val); } while (0)
894 #define PUT_SB_BMAP_NR(s, val) \
895 do { SB_V1_DISK_SUPER_BLOCK(s)->s_bmap_nr = cpu_to_le16 (val); } while (0)
896
897 #define SB_ONDISK_JP(s) (&SB_V1_DISK_SUPER_BLOCK(s)->s_journal)
898 #define SB_ONDISK_JOURNAL_SIZE(s) \
899 le32_to_cpu ((SB_ONDISK_JP(s)->jp_journal_size))
900 #define SB_ONDISK_JOURNAL_1st_BLOCK(s) \
901 le32_to_cpu ((SB_ONDISK_JP(s)->jp_journal_1st_block))
902 #define SB_ONDISK_JOURNAL_DEVICE(s) \
903 le32_to_cpu ((SB_ONDISK_JP(s)->jp_journal_dev))
904 #define SB_ONDISK_RESERVED_FOR_JOURNAL(s) \
905 le16_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_reserved_for_journal))
906
907 #define is_block_in_log_or_reserved_area(s, block) \
908 block >= SB_JOURNAL_1st_RESERVED_BLOCK(s) \
909 && block < SB_JOURNAL_1st_RESERVED_BLOCK(s) + \
910 ((!is_reiserfs_jr(SB_DISK_SUPER_BLOCK(s)) ? \
911 SB_ONDISK_JOURNAL_SIZE(s) + 1 : SB_ONDISK_RESERVED_FOR_JOURNAL(s)))
912
913 int is_reiserfs_3_5(struct reiserfs_super_block *rs);
914 int is_reiserfs_3_6(struct reiserfs_super_block *rs);
915 int is_reiserfs_jr(struct reiserfs_super_block *rs);
916
917 /* ReiserFS leaves the first 64k unused, so that partition labels have
918 enough space. If someone wants to write a fancy bootloader that
919 needs more than 64k, let us know, and this will be increased in size.
920 This number must be larger than than the largest block size on any
921 platform, or code will break. -Hans */
922 #define REISERFS_DISK_OFFSET_IN_BYTES (64 * 1024)
923 #define REISERFS_FIRST_BLOCK unused_define
924 #define REISERFS_JOURNAL_OFFSET_IN_BYTES REISERFS_DISK_OFFSET_IN_BYTES
925
926 /* the spot for the super in versions 3.5 - 3.5.10 (inclusive) */
927 #define REISERFS_OLD_DISK_OFFSET_IN_BYTES (8 * 1024)
928
929 /* reiserfs internal error code (used by search_by_key and fix_nodes)) */
930 #define CARRY_ON 0
931 #define REPEAT_SEARCH -1
932 #define IO_ERROR -2
933 #define NO_DISK_SPACE -3
934 #define NO_BALANCING_NEEDED (-4)
935 #define NO_MORE_UNUSED_CONTIGUOUS_BLOCKS (-5)
936 #define QUOTA_EXCEEDED -6
937
938 typedef __u32 b_blocknr_t;
939 typedef __le32 unp_t;
940
941 struct unfm_nodeinfo {
942 unp_t unfm_nodenum;
943 unsigned short unfm_freespace;
944 };
945
946 /* there are two formats of keys: 3.5 and 3.6
947 */
948 #define KEY_FORMAT_3_5 0
949 #define KEY_FORMAT_3_6 1
950
951 /* there are two stat datas */
952 #define STAT_DATA_V1 0
953 #define STAT_DATA_V2 1
954
REISERFS_I(const struct inode * inode)955 static inline struct reiserfs_inode_info *REISERFS_I(const struct inode *inode)
956 {
957 return container_of(inode, struct reiserfs_inode_info, vfs_inode);
958 }
959
REISERFS_SB(const struct super_block * sb)960 static inline struct reiserfs_sb_info *REISERFS_SB(const struct super_block *sb)
961 {
962 return sb->s_fs_info;
963 }
964
965 /* Don't trust REISERFS_SB(sb)->s_bmap_nr, it's a u16
966 * which overflows on large file systems. */
reiserfs_bmap_count(struct super_block * sb)967 static inline __u32 reiserfs_bmap_count(struct super_block *sb)
968 {
969 return (SB_BLOCK_COUNT(sb) - 1) / (sb->s_blocksize * 8) + 1;
970 }
971
bmap_would_wrap(unsigned bmap_nr)972 static inline int bmap_would_wrap(unsigned bmap_nr)
973 {
974 return bmap_nr > ((1LL << 16) - 1);
975 }
976
977 /** this says about version of key of all items (but stat data) the
978 object consists of */
979 #define get_inode_item_key_version( inode ) \
980 ((REISERFS_I(inode)->i_flags & i_item_key_version_mask) ? KEY_FORMAT_3_6 : KEY_FORMAT_3_5)
981
982 #define set_inode_item_key_version( inode, version ) \
983 ({ if((version)==KEY_FORMAT_3_6) \
984 REISERFS_I(inode)->i_flags |= i_item_key_version_mask; \
985 else \
986 REISERFS_I(inode)->i_flags &= ~i_item_key_version_mask; })
987
988 #define get_inode_sd_version(inode) \
989 ((REISERFS_I(inode)->i_flags & i_stat_data_version_mask) ? STAT_DATA_V2 : STAT_DATA_V1)
990
991 #define set_inode_sd_version(inode, version) \
992 ({ if((version)==STAT_DATA_V2) \
993 REISERFS_I(inode)->i_flags |= i_stat_data_version_mask; \
994 else \
995 REISERFS_I(inode)->i_flags &= ~i_stat_data_version_mask; })
996
997 /* This is an aggressive tail suppression policy, I am hoping it
998 improves our benchmarks. The principle behind it is that percentage
999 space saving is what matters, not absolute space saving. This is
1000 non-intuitive, but it helps to understand it if you consider that the
1001 cost to access 4 blocks is not much more than the cost to access 1
1002 block, if you have to do a seek and rotate. A tail risks a
1003 non-linear disk access that is significant as a percentage of total
1004 time cost for a 4 block file and saves an amount of space that is
1005 less significant as a percentage of space, or so goes the hypothesis.
1006 -Hans */
1007 #define STORE_TAIL_IN_UNFM_S1(n_file_size,n_tail_size,n_block_size) \
1008 (\
1009 (!(n_tail_size)) || \
1010 (((n_tail_size) > MAX_DIRECT_ITEM_LEN(n_block_size)) || \
1011 ( (n_file_size) >= (n_block_size) * 4 ) || \
1012 ( ( (n_file_size) >= (n_block_size) * 3 ) && \
1013 ( (n_tail_size) >= (MAX_DIRECT_ITEM_LEN(n_block_size))/4) ) || \
1014 ( ( (n_file_size) >= (n_block_size) * 2 ) && \
1015 ( (n_tail_size) >= (MAX_DIRECT_ITEM_LEN(n_block_size))/2) ) || \
1016 ( ( (n_file_size) >= (n_block_size) ) && \
1017 ( (n_tail_size) >= (MAX_DIRECT_ITEM_LEN(n_block_size) * 3)/4) ) ) \
1018 )
1019
1020 /* Another strategy for tails, this one means only create a tail if all the
1021 file would fit into one DIRECT item.
1022 Primary intention for this one is to increase performance by decreasing
1023 seeking.
1024 */
1025 #define STORE_TAIL_IN_UNFM_S2(n_file_size,n_tail_size,n_block_size) \
1026 (\
1027 (!(n_tail_size)) || \
1028 (((n_file_size) > MAX_DIRECT_ITEM_LEN(n_block_size)) ) \
1029 )
1030
1031 /*
1032 * values for s_umount_state field
1033 */
1034 #define REISERFS_VALID_FS 1
1035 #define REISERFS_ERROR_FS 2
1036
1037 //
1038 // there are 5 item types currently
1039 //
1040 #define TYPE_STAT_DATA 0
1041 #define TYPE_INDIRECT 1
1042 #define TYPE_DIRECT 2
1043 #define TYPE_DIRENTRY 3
1044 #define TYPE_MAXTYPE 3
1045 #define TYPE_ANY 15 // FIXME: comment is required
1046
1047 /***************************************************************************/
1048 /* KEY & ITEM HEAD */
1049 /***************************************************************************/
1050
1051 //
1052 // directories use this key as well as old files
1053 //
1054 struct offset_v1 {
1055 __le32 k_offset;
1056 __le32 k_uniqueness;
1057 } __attribute__ ((__packed__));
1058
1059 struct offset_v2 {
1060 __le64 v;
1061 } __attribute__ ((__packed__));
1062
offset_v2_k_type(const struct offset_v2 * v2)1063 static inline __u16 offset_v2_k_type(const struct offset_v2 *v2)
1064 {
1065 __u8 type = le64_to_cpu(v2->v) >> 60;
1066 return (type <= TYPE_MAXTYPE) ? type : TYPE_ANY;
1067 }
1068
set_offset_v2_k_type(struct offset_v2 * v2,int type)1069 static inline void set_offset_v2_k_type(struct offset_v2 *v2, int type)
1070 {
1071 v2->v =
1072 (v2->v & cpu_to_le64(~0ULL >> 4)) | cpu_to_le64((__u64) type << 60);
1073 }
1074
offset_v2_k_offset(const struct offset_v2 * v2)1075 static inline loff_t offset_v2_k_offset(const struct offset_v2 *v2)
1076 {
1077 return le64_to_cpu(v2->v) & (~0ULL >> 4);
1078 }
1079
set_offset_v2_k_offset(struct offset_v2 * v2,loff_t offset)1080 static inline void set_offset_v2_k_offset(struct offset_v2 *v2, loff_t offset)
1081 {
1082 offset &= (~0ULL >> 4);
1083 v2->v = (v2->v & cpu_to_le64(15ULL << 60)) | cpu_to_le64(offset);
1084 }
1085
1086 /* Key of an item determines its location in the S+tree, and
1087 is composed of 4 components */
1088 struct reiserfs_key {
1089 __le32 k_dir_id; /* packing locality: by default parent
1090 directory object id */
1091 __le32 k_objectid; /* object identifier */
1092 union {
1093 struct offset_v1 k_offset_v1;
1094 struct offset_v2 k_offset_v2;
1095 } __attribute__ ((__packed__)) u;
1096 } __attribute__ ((__packed__));
1097
1098 struct in_core_key {
1099 __u32 k_dir_id; /* packing locality: by default parent
1100 directory object id */
1101 __u32 k_objectid; /* object identifier */
1102 __u64 k_offset;
1103 __u8 k_type;
1104 };
1105
1106 struct cpu_key {
1107 struct in_core_key on_disk_key;
1108 int version;
1109 int key_length; /* 3 in all cases but direct2indirect and
1110 indirect2direct conversion */
1111 };
1112
1113 /* Our function for comparing keys can compare keys of different
1114 lengths. It takes as a parameter the length of the keys it is to
1115 compare. These defines are used in determining what is to be passed
1116 to it as that parameter. */
1117 #define REISERFS_FULL_KEY_LEN 4
1118 #define REISERFS_SHORT_KEY_LEN 2
1119
1120 /* The result of the key compare */
1121 #define FIRST_GREATER 1
1122 #define SECOND_GREATER -1
1123 #define KEYS_IDENTICAL 0
1124 #define KEY_FOUND 1
1125 #define KEY_NOT_FOUND 0
1126
1127 #define KEY_SIZE (sizeof(struct reiserfs_key))
1128 #define SHORT_KEY_SIZE (sizeof (__u32) + sizeof (__u32))
1129
1130 /* return values for search_by_key and clones */
1131 #define ITEM_FOUND 1
1132 #define ITEM_NOT_FOUND 0
1133 #define ENTRY_FOUND 1
1134 #define ENTRY_NOT_FOUND 0
1135 #define DIRECTORY_NOT_FOUND -1
1136 #define REGULAR_FILE_FOUND -2
1137 #define DIRECTORY_FOUND -3
1138 #define BYTE_FOUND 1
1139 #define BYTE_NOT_FOUND 0
1140 #define FILE_NOT_FOUND -1
1141
1142 #define POSITION_FOUND 1
1143 #define POSITION_NOT_FOUND 0
1144
1145 // return values for reiserfs_find_entry and search_by_entry_key
1146 #define NAME_FOUND 1
1147 #define NAME_NOT_FOUND 0
1148 #define GOTO_PREVIOUS_ITEM 2
1149 #define NAME_FOUND_INVISIBLE 3
1150
1151 /* Everything in the filesystem is stored as a set of items. The
1152 item head contains the key of the item, its free space (for
1153 indirect items) and specifies the location of the item itself
1154 within the block. */
1155
1156 struct item_head {
1157 /* Everything in the tree is found by searching for it based on
1158 * its key.*/
1159 struct reiserfs_key ih_key;
1160 union {
1161 /* The free space in the last unformatted node of an
1162 indirect item if this is an indirect item. This
1163 equals 0xFFFF iff this is a direct item or stat data
1164 item. Note that the key, not this field, is used to
1165 determine the item type, and thus which field this
1166 union contains. */
1167 __le16 ih_free_space_reserved;
1168 /* Iff this is a directory item, this field equals the
1169 number of directory entries in the directory item. */
1170 __le16 ih_entry_count;
1171 } __attribute__ ((__packed__)) u;
1172 __le16 ih_item_len; /* total size of the item body */
1173 __le16 ih_item_location; /* an offset to the item body
1174 * within the block */
1175 __le16 ih_version; /* 0 for all old items, 2 for new
1176 ones. Highest bit is set by fsck
1177 temporary, cleaned after all
1178 done */
1179 } __attribute__ ((__packed__));
1180 /* size of item header */
1181 #define IH_SIZE (sizeof(struct item_head))
1182
1183 #define ih_free_space(ih) le16_to_cpu((ih)->u.ih_free_space_reserved)
1184 #define ih_version(ih) le16_to_cpu((ih)->ih_version)
1185 #define ih_entry_count(ih) le16_to_cpu((ih)->u.ih_entry_count)
1186 #define ih_location(ih) le16_to_cpu((ih)->ih_item_location)
1187 #define ih_item_len(ih) le16_to_cpu((ih)->ih_item_len)
1188
1189 #define put_ih_free_space(ih, val) do { (ih)->u.ih_free_space_reserved = cpu_to_le16(val); } while(0)
1190 #define put_ih_version(ih, val) do { (ih)->ih_version = cpu_to_le16(val); } while (0)
1191 #define put_ih_entry_count(ih, val) do { (ih)->u.ih_entry_count = cpu_to_le16(val); } while (0)
1192 #define put_ih_location(ih, val) do { (ih)->ih_item_location = cpu_to_le16(val); } while (0)
1193 #define put_ih_item_len(ih, val) do { (ih)->ih_item_len = cpu_to_le16(val); } while (0)
1194
1195 #define unreachable_item(ih) (ih_version(ih) & (1 << 15))
1196
1197 #define get_ih_free_space(ih) (ih_version (ih) == KEY_FORMAT_3_6 ? 0 : ih_free_space (ih))
1198 #define set_ih_free_space(ih,val) put_ih_free_space((ih), ((ih_version(ih) == KEY_FORMAT_3_6) ? 0 : (val)))
1199
1200 /* these operate on indirect items, where you've got an array of ints
1201 ** at a possibly unaligned location. These are a noop on ia32
1202 **
1203 ** p is the array of __u32, i is the index into the array, v is the value
1204 ** to store there.
1205 */
1206 #define get_block_num(p, i) get_unaligned_le32((p) + (i))
1207 #define put_block_num(p, i, v) put_unaligned_le32((v), (p) + (i))
1208
1209 //
1210 // in old version uniqueness field shows key type
1211 //
1212 #define V1_SD_UNIQUENESS 0
1213 #define V1_INDIRECT_UNIQUENESS 0xfffffffe
1214 #define V1_DIRECT_UNIQUENESS 0xffffffff
1215 #define V1_DIRENTRY_UNIQUENESS 500
1216 #define V1_ANY_UNIQUENESS 555 // FIXME: comment is required
1217
1218 //
1219 // here are conversion routines
1220 //
1221 static inline int uniqueness2type(__u32 uniqueness) CONSTF;
uniqueness2type(__u32 uniqueness)1222 static inline int uniqueness2type(__u32 uniqueness)
1223 {
1224 switch ((int)uniqueness) {
1225 case V1_SD_UNIQUENESS:
1226 return TYPE_STAT_DATA;
1227 case V1_INDIRECT_UNIQUENESS:
1228 return TYPE_INDIRECT;
1229 case V1_DIRECT_UNIQUENESS:
1230 return TYPE_DIRECT;
1231 case V1_DIRENTRY_UNIQUENESS:
1232 return TYPE_DIRENTRY;
1233 case V1_ANY_UNIQUENESS:
1234 default:
1235 return TYPE_ANY;
1236 }
1237 }
1238
1239 static inline __u32 type2uniqueness(int type) CONSTF;
type2uniqueness(int type)1240 static inline __u32 type2uniqueness(int type)
1241 {
1242 switch (type) {
1243 case TYPE_STAT_DATA:
1244 return V1_SD_UNIQUENESS;
1245 case TYPE_INDIRECT:
1246 return V1_INDIRECT_UNIQUENESS;
1247 case TYPE_DIRECT:
1248 return V1_DIRECT_UNIQUENESS;
1249 case TYPE_DIRENTRY:
1250 return V1_DIRENTRY_UNIQUENESS;
1251 case TYPE_ANY:
1252 default:
1253 return V1_ANY_UNIQUENESS;
1254 }
1255 }
1256
1257 //
1258 // key is pointer to on disk key which is stored in le, result is cpu,
1259 // there is no way to get version of object from key, so, provide
1260 // version to these defines
1261 //
le_key_k_offset(int version,const struct reiserfs_key * key)1262 static inline loff_t le_key_k_offset(int version,
1263 const struct reiserfs_key *key)
1264 {
1265 return (version == KEY_FORMAT_3_5) ?
1266 le32_to_cpu(key->u.k_offset_v1.k_offset) :
1267 offset_v2_k_offset(&(key->u.k_offset_v2));
1268 }
1269
le_ih_k_offset(const struct item_head * ih)1270 static inline loff_t le_ih_k_offset(const struct item_head *ih)
1271 {
1272 return le_key_k_offset(ih_version(ih), &(ih->ih_key));
1273 }
1274
le_key_k_type(int version,const struct reiserfs_key * key)1275 static inline loff_t le_key_k_type(int version, const struct reiserfs_key *key)
1276 {
1277 return (version == KEY_FORMAT_3_5) ?
1278 uniqueness2type(le32_to_cpu(key->u.k_offset_v1.k_uniqueness)) :
1279 offset_v2_k_type(&(key->u.k_offset_v2));
1280 }
1281
le_ih_k_type(const struct item_head * ih)1282 static inline loff_t le_ih_k_type(const struct item_head *ih)
1283 {
1284 return le_key_k_type(ih_version(ih), &(ih->ih_key));
1285 }
1286
set_le_key_k_offset(int version,struct reiserfs_key * key,loff_t offset)1287 static inline void set_le_key_k_offset(int version, struct reiserfs_key *key,
1288 loff_t offset)
1289 {
1290 (version == KEY_FORMAT_3_5) ? (void)(key->u.k_offset_v1.k_offset = cpu_to_le32(offset)) : /* jdm check */
1291 (void)(set_offset_v2_k_offset(&(key->u.k_offset_v2), offset));
1292 }
1293
set_le_ih_k_offset(struct item_head * ih,loff_t offset)1294 static inline void set_le_ih_k_offset(struct item_head *ih, loff_t offset)
1295 {
1296 set_le_key_k_offset(ih_version(ih), &(ih->ih_key), offset);
1297 }
1298
set_le_key_k_type(int version,struct reiserfs_key * key,int type)1299 static inline void set_le_key_k_type(int version, struct reiserfs_key *key,
1300 int type)
1301 {
1302 (version == KEY_FORMAT_3_5) ?
1303 (void)(key->u.k_offset_v1.k_uniqueness =
1304 cpu_to_le32(type2uniqueness(type)))
1305 : (void)(set_offset_v2_k_type(&(key->u.k_offset_v2), type));
1306 }
1307
set_le_ih_k_type(struct item_head * ih,int type)1308 static inline void set_le_ih_k_type(struct item_head *ih, int type)
1309 {
1310 set_le_key_k_type(ih_version(ih), &(ih->ih_key), type);
1311 }
1312
is_direntry_le_key(int version,struct reiserfs_key * key)1313 static inline int is_direntry_le_key(int version, struct reiserfs_key *key)
1314 {
1315 return le_key_k_type(version, key) == TYPE_DIRENTRY;
1316 }
1317
is_direct_le_key(int version,struct reiserfs_key * key)1318 static inline int is_direct_le_key(int version, struct reiserfs_key *key)
1319 {
1320 return le_key_k_type(version, key) == TYPE_DIRECT;
1321 }
1322
is_indirect_le_key(int version,struct reiserfs_key * key)1323 static inline int is_indirect_le_key(int version, struct reiserfs_key *key)
1324 {
1325 return le_key_k_type(version, key) == TYPE_INDIRECT;
1326 }
1327
is_statdata_le_key(int version,struct reiserfs_key * key)1328 static inline int is_statdata_le_key(int version, struct reiserfs_key *key)
1329 {
1330 return le_key_k_type(version, key) == TYPE_STAT_DATA;
1331 }
1332
1333 //
1334 // item header has version.
1335 //
is_direntry_le_ih(struct item_head * ih)1336 static inline int is_direntry_le_ih(struct item_head *ih)
1337 {
1338 return is_direntry_le_key(ih_version(ih), &ih->ih_key);
1339 }
1340
is_direct_le_ih(struct item_head * ih)1341 static inline int is_direct_le_ih(struct item_head *ih)
1342 {
1343 return is_direct_le_key(ih_version(ih), &ih->ih_key);
1344 }
1345
is_indirect_le_ih(struct item_head * ih)1346 static inline int is_indirect_le_ih(struct item_head *ih)
1347 {
1348 return is_indirect_le_key(ih_version(ih), &ih->ih_key);
1349 }
1350
is_statdata_le_ih(struct item_head * ih)1351 static inline int is_statdata_le_ih(struct item_head *ih)
1352 {
1353 return is_statdata_le_key(ih_version(ih), &ih->ih_key);
1354 }
1355
1356 //
1357 // key is pointer to cpu key, result is cpu
1358 //
cpu_key_k_offset(const struct cpu_key * key)1359 static inline loff_t cpu_key_k_offset(const struct cpu_key *key)
1360 {
1361 return key->on_disk_key.k_offset;
1362 }
1363
cpu_key_k_type(const struct cpu_key * key)1364 static inline loff_t cpu_key_k_type(const struct cpu_key *key)
1365 {
1366 return key->on_disk_key.k_type;
1367 }
1368
set_cpu_key_k_offset(struct cpu_key * key,loff_t offset)1369 static inline void set_cpu_key_k_offset(struct cpu_key *key, loff_t offset)
1370 {
1371 key->on_disk_key.k_offset = offset;
1372 }
1373
set_cpu_key_k_type(struct cpu_key * key,int type)1374 static inline void set_cpu_key_k_type(struct cpu_key *key, int type)
1375 {
1376 key->on_disk_key.k_type = type;
1377 }
1378
cpu_key_k_offset_dec(struct cpu_key * key)1379 static inline void cpu_key_k_offset_dec(struct cpu_key *key)
1380 {
1381 key->on_disk_key.k_offset--;
1382 }
1383
1384 #define is_direntry_cpu_key(key) (cpu_key_k_type (key) == TYPE_DIRENTRY)
1385 #define is_direct_cpu_key(key) (cpu_key_k_type (key) == TYPE_DIRECT)
1386 #define is_indirect_cpu_key(key) (cpu_key_k_type (key) == TYPE_INDIRECT)
1387 #define is_statdata_cpu_key(key) (cpu_key_k_type (key) == TYPE_STAT_DATA)
1388
1389 /* are these used ? */
1390 #define is_direntry_cpu_ih(ih) (is_direntry_cpu_key (&((ih)->ih_key)))
1391 #define is_direct_cpu_ih(ih) (is_direct_cpu_key (&((ih)->ih_key)))
1392 #define is_indirect_cpu_ih(ih) (is_indirect_cpu_key (&((ih)->ih_key)))
1393 #define is_statdata_cpu_ih(ih) (is_statdata_cpu_key (&((ih)->ih_key)))
1394
1395 #define I_K_KEY_IN_ITEM(ih, key, n_blocksize) \
1396 (!COMP_SHORT_KEYS(ih, key) && \
1397 I_OFF_BYTE_IN_ITEM(ih, k_offset(key), n_blocksize))
1398
1399 /* maximal length of item */
1400 #define MAX_ITEM_LEN(block_size) (block_size - BLKH_SIZE - IH_SIZE)
1401 #define MIN_ITEM_LEN 1
1402
1403 /* object identifier for root dir */
1404 #define REISERFS_ROOT_OBJECTID 2
1405 #define REISERFS_ROOT_PARENT_OBJECTID 1
1406
1407 extern struct reiserfs_key root_key;
1408
1409 /*
1410 * Picture represents a leaf of the S+tree
1411 * ______________________________________________________
1412 * | | Array of | | |
1413 * |Block | Object-Item | F r e e | Objects- |
1414 * | head | Headers | S p a c e | Items |
1415 * |______|_______________|___________________|___________|
1416 */
1417
1418 /* Header of a disk block. More precisely, header of a formatted leaf
1419 or internal node, and not the header of an unformatted node. */
1420 struct block_head {
1421 __le16 blk_level; /* Level of a block in the tree. */
1422 __le16 blk_nr_item; /* Number of keys/items in a block. */
1423 __le16 blk_free_space; /* Block free space in bytes. */
1424 __le16 blk_reserved;
1425 /* dump this in v4/planA */
1426 struct reiserfs_key blk_right_delim_key; /* kept only for compatibility */
1427 };
1428
1429 #define BLKH_SIZE (sizeof(struct block_head))
1430 #define blkh_level(p_blkh) (le16_to_cpu((p_blkh)->blk_level))
1431 #define blkh_nr_item(p_blkh) (le16_to_cpu((p_blkh)->blk_nr_item))
1432 #define blkh_free_space(p_blkh) (le16_to_cpu((p_blkh)->blk_free_space))
1433 #define blkh_reserved(p_blkh) (le16_to_cpu((p_blkh)->blk_reserved))
1434 #define set_blkh_level(p_blkh,val) ((p_blkh)->blk_level = cpu_to_le16(val))
1435 #define set_blkh_nr_item(p_blkh,val) ((p_blkh)->blk_nr_item = cpu_to_le16(val))
1436 #define set_blkh_free_space(p_blkh,val) ((p_blkh)->blk_free_space = cpu_to_le16(val))
1437 #define set_blkh_reserved(p_blkh,val) ((p_blkh)->blk_reserved = cpu_to_le16(val))
1438 #define blkh_right_delim_key(p_blkh) ((p_blkh)->blk_right_delim_key)
1439 #define set_blkh_right_delim_key(p_blkh,val) ((p_blkh)->blk_right_delim_key = val)
1440
1441 /*
1442 * values for blk_level field of the struct block_head
1443 */
1444
1445 #define FREE_LEVEL 0 /* when node gets removed from the tree its
1446 blk_level is set to FREE_LEVEL. It is then
1447 used to see whether the node is still in the
1448 tree */
1449
1450 #define DISK_LEAF_NODE_LEVEL 1 /* Leaf node level. */
1451
1452 /* Given the buffer head of a formatted node, resolve to the block head of that node. */
1453 #define B_BLK_HEAD(bh) ((struct block_head *)((bh)->b_data))
1454 /* Number of items that are in buffer. */
1455 #define B_NR_ITEMS(bh) (blkh_nr_item(B_BLK_HEAD(bh)))
1456 #define B_LEVEL(bh) (blkh_level(B_BLK_HEAD(bh)))
1457 #define B_FREE_SPACE(bh) (blkh_free_space(B_BLK_HEAD(bh)))
1458
1459 #define PUT_B_NR_ITEMS(bh, val) do { set_blkh_nr_item(B_BLK_HEAD(bh), val); } while (0)
1460 #define PUT_B_LEVEL(bh, val) do { set_blkh_level(B_BLK_HEAD(bh), val); } while (0)
1461 #define PUT_B_FREE_SPACE(bh, val) do { set_blkh_free_space(B_BLK_HEAD(bh), val); } while (0)
1462
1463 /* Get right delimiting key. -- little endian */
1464 #define B_PRIGHT_DELIM_KEY(bh) (&(blk_right_delim_key(B_BLK_HEAD(bh))))
1465
1466 /* Does the buffer contain a disk leaf. */
1467 #define B_IS_ITEMS_LEVEL(bh) (B_LEVEL(bh) == DISK_LEAF_NODE_LEVEL)
1468
1469 /* Does the buffer contain a disk internal node */
1470 #define B_IS_KEYS_LEVEL(bh) (B_LEVEL(bh) > DISK_LEAF_NODE_LEVEL \
1471 && B_LEVEL(bh) <= MAX_HEIGHT)
1472
1473 /***************************************************************************/
1474 /* STAT DATA */
1475 /***************************************************************************/
1476
1477 //
1478 // old stat data is 32 bytes long. We are going to distinguish new one by
1479 // different size
1480 //
1481 struct stat_data_v1 {
1482 __le16 sd_mode; /* file type, permissions */
1483 __le16 sd_nlink; /* number of hard links */
1484 __le16 sd_uid; /* owner */
1485 __le16 sd_gid; /* group */
1486 __le32 sd_size; /* file size */
1487 __le32 sd_atime; /* time of last access */
1488 __le32 sd_mtime; /* time file was last modified */
1489 __le32 sd_ctime; /* time inode (stat data) was last changed (except changes to sd_atime and sd_mtime) */
1490 union {
1491 __le32 sd_rdev;
1492 __le32 sd_blocks; /* number of blocks file uses */
1493 } __attribute__ ((__packed__)) u;
1494 __le32 sd_first_direct_byte; /* first byte of file which is stored
1495 in a direct item: except that if it
1496 equals 1 it is a symlink and if it
1497 equals ~(__u32)0 there is no
1498 direct item. The existence of this
1499 field really grates on me. Let's
1500 replace it with a macro based on
1501 sd_size and our tail suppression
1502 policy. Someday. -Hans */
1503 } __attribute__ ((__packed__));
1504
1505 #define SD_V1_SIZE (sizeof(struct stat_data_v1))
1506 #define stat_data_v1(ih) (ih_version (ih) == KEY_FORMAT_3_5)
1507 #define sd_v1_mode(sdp) (le16_to_cpu((sdp)->sd_mode))
1508 #define set_sd_v1_mode(sdp,v) ((sdp)->sd_mode = cpu_to_le16(v))
1509 #define sd_v1_nlink(sdp) (le16_to_cpu((sdp)->sd_nlink))
1510 #define set_sd_v1_nlink(sdp,v) ((sdp)->sd_nlink = cpu_to_le16(v))
1511 #define sd_v1_uid(sdp) (le16_to_cpu((sdp)->sd_uid))
1512 #define set_sd_v1_uid(sdp,v) ((sdp)->sd_uid = cpu_to_le16(v))
1513 #define sd_v1_gid(sdp) (le16_to_cpu((sdp)->sd_gid))
1514 #define set_sd_v1_gid(sdp,v) ((sdp)->sd_gid = cpu_to_le16(v))
1515 #define sd_v1_size(sdp) (le32_to_cpu((sdp)->sd_size))
1516 #define set_sd_v1_size(sdp,v) ((sdp)->sd_size = cpu_to_le32(v))
1517 #define sd_v1_atime(sdp) (le32_to_cpu((sdp)->sd_atime))
1518 #define set_sd_v1_atime(sdp,v) ((sdp)->sd_atime = cpu_to_le32(v))
1519 #define sd_v1_mtime(sdp) (le32_to_cpu((sdp)->sd_mtime))
1520 #define set_sd_v1_mtime(sdp,v) ((sdp)->sd_mtime = cpu_to_le32(v))
1521 #define sd_v1_ctime(sdp) (le32_to_cpu((sdp)->sd_ctime))
1522 #define set_sd_v1_ctime(sdp,v) ((sdp)->sd_ctime = cpu_to_le32(v))
1523 #define sd_v1_rdev(sdp) (le32_to_cpu((sdp)->u.sd_rdev))
1524 #define set_sd_v1_rdev(sdp,v) ((sdp)->u.sd_rdev = cpu_to_le32(v))
1525 #define sd_v1_blocks(sdp) (le32_to_cpu((sdp)->u.sd_blocks))
1526 #define set_sd_v1_blocks(sdp,v) ((sdp)->u.sd_blocks = cpu_to_le32(v))
1527 #define sd_v1_first_direct_byte(sdp) \
1528 (le32_to_cpu((sdp)->sd_first_direct_byte))
1529 #define set_sd_v1_first_direct_byte(sdp,v) \
1530 ((sdp)->sd_first_direct_byte = cpu_to_le32(v))
1531
1532 /* inode flags stored in sd_attrs (nee sd_reserved) */
1533
1534 /* we want common flags to have the same values as in ext2,
1535 so chattr(1) will work without problems */
1536 #define REISERFS_IMMUTABLE_FL FS_IMMUTABLE_FL
1537 #define REISERFS_APPEND_FL FS_APPEND_FL
1538 #define REISERFS_SYNC_FL FS_SYNC_FL
1539 #define REISERFS_NOATIME_FL FS_NOATIME_FL
1540 #define REISERFS_NODUMP_FL FS_NODUMP_FL
1541 #define REISERFS_SECRM_FL FS_SECRM_FL
1542 #define REISERFS_UNRM_FL FS_UNRM_FL
1543 #define REISERFS_COMPR_FL FS_COMPR_FL
1544 #define REISERFS_NOTAIL_FL FS_NOTAIL_FL
1545
1546 /* persistent flags that file inherits from the parent directory */
1547 #define REISERFS_INHERIT_MASK ( REISERFS_IMMUTABLE_FL | \
1548 REISERFS_SYNC_FL | \
1549 REISERFS_NOATIME_FL | \
1550 REISERFS_NODUMP_FL | \
1551 REISERFS_SECRM_FL | \
1552 REISERFS_COMPR_FL | \
1553 REISERFS_NOTAIL_FL )
1554
1555 /* Stat Data on disk (reiserfs version of UFS disk inode minus the
1556 address blocks) */
1557 struct stat_data {
1558 __le16 sd_mode; /* file type, permissions */
1559 __le16 sd_attrs; /* persistent inode flags */
1560 __le32 sd_nlink; /* number of hard links */
1561 __le64 sd_size; /* file size */
1562 __le32 sd_uid; /* owner */
1563 __le32 sd_gid; /* group */
1564 __le32 sd_atime; /* time of last access */
1565 __le32 sd_mtime; /* time file was last modified */
1566 __le32 sd_ctime; /* time inode (stat data) was last changed (except changes to sd_atime and sd_mtime) */
1567 __le32 sd_blocks;
1568 union {
1569 __le32 sd_rdev;
1570 __le32 sd_generation;
1571 //__le32 sd_first_direct_byte;
1572 /* first byte of file which is stored in a
1573 direct item: except that if it equals 1
1574 it is a symlink and if it equals
1575 ~(__u32)0 there is no direct item. The
1576 existence of this field really grates
1577 on me. Let's replace it with a macro
1578 based on sd_size and our tail
1579 suppression policy? */
1580 } __attribute__ ((__packed__)) u;
1581 } __attribute__ ((__packed__));
1582 //
1583 // this is 44 bytes long
1584 //
1585 #define SD_SIZE (sizeof(struct stat_data))
1586 #define SD_V2_SIZE SD_SIZE
1587 #define stat_data_v2(ih) (ih_version (ih) == KEY_FORMAT_3_6)
1588 #define sd_v2_mode(sdp) (le16_to_cpu((sdp)->sd_mode))
1589 #define set_sd_v2_mode(sdp,v) ((sdp)->sd_mode = cpu_to_le16(v))
1590 /* sd_reserved */
1591 /* set_sd_reserved */
1592 #define sd_v2_nlink(sdp) (le32_to_cpu((sdp)->sd_nlink))
1593 #define set_sd_v2_nlink(sdp,v) ((sdp)->sd_nlink = cpu_to_le32(v))
1594 #define sd_v2_size(sdp) (le64_to_cpu((sdp)->sd_size))
1595 #define set_sd_v2_size(sdp,v) ((sdp)->sd_size = cpu_to_le64(v))
1596 #define sd_v2_uid(sdp) (le32_to_cpu((sdp)->sd_uid))
1597 #define set_sd_v2_uid(sdp,v) ((sdp)->sd_uid = cpu_to_le32(v))
1598 #define sd_v2_gid(sdp) (le32_to_cpu((sdp)->sd_gid))
1599 #define set_sd_v2_gid(sdp,v) ((sdp)->sd_gid = cpu_to_le32(v))
1600 #define sd_v2_atime(sdp) (le32_to_cpu((sdp)->sd_atime))
1601 #define set_sd_v2_atime(sdp,v) ((sdp)->sd_atime = cpu_to_le32(v))
1602 #define sd_v2_mtime(sdp) (le32_to_cpu((sdp)->sd_mtime))
1603 #define set_sd_v2_mtime(sdp,v) ((sdp)->sd_mtime = cpu_to_le32(v))
1604 #define sd_v2_ctime(sdp) (le32_to_cpu((sdp)->sd_ctime))
1605 #define set_sd_v2_ctime(sdp,v) ((sdp)->sd_ctime = cpu_to_le32(v))
1606 #define sd_v2_blocks(sdp) (le32_to_cpu((sdp)->sd_blocks))
1607 #define set_sd_v2_blocks(sdp,v) ((sdp)->sd_blocks = cpu_to_le32(v))
1608 #define sd_v2_rdev(sdp) (le32_to_cpu((sdp)->u.sd_rdev))
1609 #define set_sd_v2_rdev(sdp,v) ((sdp)->u.sd_rdev = cpu_to_le32(v))
1610 #define sd_v2_generation(sdp) (le32_to_cpu((sdp)->u.sd_generation))
1611 #define set_sd_v2_generation(sdp,v) ((sdp)->u.sd_generation = cpu_to_le32(v))
1612 #define sd_v2_attrs(sdp) (le16_to_cpu((sdp)->sd_attrs))
1613 #define set_sd_v2_attrs(sdp,v) ((sdp)->sd_attrs = cpu_to_le16(v))
1614
1615 /***************************************************************************/
1616 /* DIRECTORY STRUCTURE */
1617 /***************************************************************************/
1618 /*
1619 Picture represents the structure of directory items
1620 ________________________________________________
1621 | Array of | | | | | |
1622 | directory |N-1| N-2 | .... | 1st |0th|
1623 | entry headers | | | | | |
1624 |_______________|___|_____|________|_______|___|
1625 <---- directory entries ------>
1626
1627 First directory item has k_offset component 1. We store "." and ".."
1628 in one item, always, we never split "." and ".." into differing
1629 items. This makes, among other things, the code for removing
1630 directories simpler. */
1631 #define SD_OFFSET 0
1632 #define SD_UNIQUENESS 0
1633 #define DOT_OFFSET 1
1634 #define DOT_DOT_OFFSET 2
1635 #define DIRENTRY_UNIQUENESS 500
1636
1637 /* */
1638 #define FIRST_ITEM_OFFSET 1
1639
1640 /*
1641 Q: How to get key of object pointed to by entry from entry?
1642
1643 A: Each directory entry has its header. This header has deh_dir_id and deh_objectid fields, those are key
1644 of object, entry points to */
1645
1646 /* NOT IMPLEMENTED:
1647 Directory will someday contain stat data of object */
1648
1649 struct reiserfs_de_head {
1650 __le32 deh_offset; /* third component of the directory entry key */
1651 __le32 deh_dir_id; /* objectid of the parent directory of the object, that is referenced
1652 by directory entry */
1653 __le32 deh_objectid; /* objectid of the object, that is referenced by directory entry */
1654 __le16 deh_location; /* offset of name in the whole item */
1655 __le16 deh_state; /* whether 1) entry contains stat data (for future), and 2) whether
1656 entry is hidden (unlinked) */
1657 } __attribute__ ((__packed__));
1658 #define DEH_SIZE sizeof(struct reiserfs_de_head)
1659 #define deh_offset(p_deh) (le32_to_cpu((p_deh)->deh_offset))
1660 #define deh_dir_id(p_deh) (le32_to_cpu((p_deh)->deh_dir_id))
1661 #define deh_objectid(p_deh) (le32_to_cpu((p_deh)->deh_objectid))
1662 #define deh_location(p_deh) (le16_to_cpu((p_deh)->deh_location))
1663 #define deh_state(p_deh) (le16_to_cpu((p_deh)->deh_state))
1664
1665 #define put_deh_offset(p_deh,v) ((p_deh)->deh_offset = cpu_to_le32((v)))
1666 #define put_deh_dir_id(p_deh,v) ((p_deh)->deh_dir_id = cpu_to_le32((v)))
1667 #define put_deh_objectid(p_deh,v) ((p_deh)->deh_objectid = cpu_to_le32((v)))
1668 #define put_deh_location(p_deh,v) ((p_deh)->deh_location = cpu_to_le16((v)))
1669 #define put_deh_state(p_deh,v) ((p_deh)->deh_state = cpu_to_le16((v)))
1670
1671 /* empty directory contains two entries "." and ".." and their headers */
1672 #define EMPTY_DIR_SIZE \
1673 (DEH_SIZE * 2 + ROUND_UP (strlen (".")) + ROUND_UP (strlen ("..")))
1674
1675 /* old format directories have this size when empty */
1676 #define EMPTY_DIR_SIZE_V1 (DEH_SIZE * 2 + 3)
1677
1678 #define DEH_Statdata 0 /* not used now */
1679 #define DEH_Visible 2
1680
1681 /* 64 bit systems (and the S/390) need to be aligned explicitly -jdm */
1682 #if BITS_PER_LONG == 64 || defined(__s390__) || defined(__hppa__)
1683 # define ADDR_UNALIGNED_BITS (3)
1684 #endif
1685
1686 /* These are only used to manipulate deh_state.
1687 * Because of this, we'll use the ext2_ bit routines,
1688 * since they are little endian */
1689 #ifdef ADDR_UNALIGNED_BITS
1690
1691 # define aligned_address(addr) ((void *)((long)(addr) & ~((1UL << ADDR_UNALIGNED_BITS) - 1)))
1692 # define unaligned_offset(addr) (((int)((long)(addr) & ((1 << ADDR_UNALIGNED_BITS) - 1))) << 3)
1693
1694 # define set_bit_unaligned(nr, addr) \
1695 __test_and_set_bit_le((nr) + unaligned_offset(addr), aligned_address(addr))
1696 # define clear_bit_unaligned(nr, addr) \
1697 __test_and_clear_bit_le((nr) + unaligned_offset(addr), aligned_address(addr))
1698 # define test_bit_unaligned(nr, addr) \
1699 test_bit_le((nr) + unaligned_offset(addr), aligned_address(addr))
1700
1701 #else
1702
1703 # define set_bit_unaligned(nr, addr) __test_and_set_bit_le(nr, addr)
1704 # define clear_bit_unaligned(nr, addr) __test_and_clear_bit_le(nr, addr)
1705 # define test_bit_unaligned(nr, addr) test_bit_le(nr, addr)
1706
1707 #endif
1708
1709 #define mark_de_with_sd(deh) set_bit_unaligned (DEH_Statdata, &((deh)->deh_state))
1710 #define mark_de_without_sd(deh) clear_bit_unaligned (DEH_Statdata, &((deh)->deh_state))
1711 #define mark_de_visible(deh) set_bit_unaligned (DEH_Visible, &((deh)->deh_state))
1712 #define mark_de_hidden(deh) clear_bit_unaligned (DEH_Visible, &((deh)->deh_state))
1713
1714 #define de_with_sd(deh) test_bit_unaligned (DEH_Statdata, &((deh)->deh_state))
1715 #define de_visible(deh) test_bit_unaligned (DEH_Visible, &((deh)->deh_state))
1716 #define de_hidden(deh) !test_bit_unaligned (DEH_Visible, &((deh)->deh_state))
1717
1718 extern void make_empty_dir_item_v1(char *body, __le32 dirid, __le32 objid,
1719 __le32 par_dirid, __le32 par_objid);
1720 extern void make_empty_dir_item(char *body, __le32 dirid, __le32 objid,
1721 __le32 par_dirid, __le32 par_objid);
1722
1723 /* array of the entry headers */
1724 /* get item body */
1725 #define B_I_PITEM(bh,ih) ( (bh)->b_data + ih_location(ih) )
1726 #define B_I_DEH(bh,ih) ((struct reiserfs_de_head *)(B_I_PITEM(bh,ih)))
1727
1728 /* length of the directory entry in directory item. This define
1729 calculates length of i-th directory entry using directory entry
1730 locations from dir entry head. When it calculates length of 0-th
1731 directory entry, it uses length of whole item in place of entry
1732 location of the non-existent following entry in the calculation.
1733 See picture above.*/
1734 /*
1735 #define I_DEH_N_ENTRY_LENGTH(ih,deh,i) \
1736 ((i) ? (deh_location((deh)-1) - deh_location((deh))) : (ih_item_len((ih)) - deh_location((deh))))
1737 */
entry_length(const struct buffer_head * bh,const struct item_head * ih,int pos_in_item)1738 static inline int entry_length(const struct buffer_head *bh,
1739 const struct item_head *ih, int pos_in_item)
1740 {
1741 struct reiserfs_de_head *deh;
1742
1743 deh = B_I_DEH(bh, ih) + pos_in_item;
1744 if (pos_in_item)
1745 return deh_location(deh - 1) - deh_location(deh);
1746
1747 return ih_item_len(ih) - deh_location(deh);
1748 }
1749
1750 /* number of entries in the directory item, depends on ENTRY_COUNT being at the start of directory dynamic data. */
1751 #define I_ENTRY_COUNT(ih) (ih_entry_count((ih)))
1752
1753 /* name by bh, ih and entry_num */
1754 #define B_I_E_NAME(bh,ih,entry_num) ((char *)(bh->b_data + ih_location(ih) + deh_location(B_I_DEH(bh,ih)+(entry_num))))
1755
1756 // two entries per block (at least)
1757 #define REISERFS_MAX_NAME(block_size) 255
1758
1759 /* this structure is used for operations on directory entries. It is
1760 not a disk structure. */
1761 /* When reiserfs_find_entry or search_by_entry_key find directory
1762 entry, they return filled reiserfs_dir_entry structure */
1763 struct reiserfs_dir_entry {
1764 struct buffer_head *de_bh;
1765 int de_item_num;
1766 struct item_head *de_ih;
1767 int de_entry_num;
1768 struct reiserfs_de_head *de_deh;
1769 int de_entrylen;
1770 int de_namelen;
1771 char *de_name;
1772 unsigned long *de_gen_number_bit_string;
1773
1774 __u32 de_dir_id;
1775 __u32 de_objectid;
1776
1777 struct cpu_key de_entry_key;
1778 };
1779
1780 /* these defines are useful when a particular member of a reiserfs_dir_entry is needed */
1781
1782 /* pointer to file name, stored in entry */
1783 #define B_I_DEH_ENTRY_FILE_NAME(bh,ih,deh) (B_I_PITEM (bh, ih) + deh_location(deh))
1784
1785 /* length of name */
1786 #define I_DEH_N_ENTRY_FILE_NAME_LENGTH(ih,deh,entry_num) \
1787 (I_DEH_N_ENTRY_LENGTH (ih, deh, entry_num) - (de_with_sd (deh) ? SD_SIZE : 0))
1788
1789 /* hash value occupies bits from 7 up to 30 */
1790 #define GET_HASH_VALUE(offset) ((offset) & 0x7fffff80LL)
1791 /* generation number occupies 7 bits starting from 0 up to 6 */
1792 #define GET_GENERATION_NUMBER(offset) ((offset) & 0x7fLL)
1793 #define MAX_GENERATION_NUMBER 127
1794
1795 #define SET_GENERATION_NUMBER(offset,gen_number) (GET_HASH_VALUE(offset)|(gen_number))
1796
1797 /*
1798 * Picture represents an internal node of the reiserfs tree
1799 * ______________________________________________________
1800 * | | Array of | Array of | Free |
1801 * |block | keys | pointers | space |
1802 * | head | N | N+1 | |
1803 * |______|_______________|___________________|___________|
1804 */
1805
1806 /***************************************************************************/
1807 /* DISK CHILD */
1808 /***************************************************************************/
1809 /* Disk child pointer: The pointer from an internal node of the tree
1810 to a node that is on disk. */
1811 struct disk_child {
1812 __le32 dc_block_number; /* Disk child's block number. */
1813 __le16 dc_size; /* Disk child's used space. */
1814 __le16 dc_reserved;
1815 };
1816
1817 #define DC_SIZE (sizeof(struct disk_child))
1818 #define dc_block_number(dc_p) (le32_to_cpu((dc_p)->dc_block_number))
1819 #define dc_size(dc_p) (le16_to_cpu((dc_p)->dc_size))
1820 #define put_dc_block_number(dc_p, val) do { (dc_p)->dc_block_number = cpu_to_le32(val); } while(0)
1821 #define put_dc_size(dc_p, val) do { (dc_p)->dc_size = cpu_to_le16(val); } while(0)
1822
1823 /* Get disk child by buffer header and position in the tree node. */
1824 #define B_N_CHILD(bh, n_pos) ((struct disk_child *)\
1825 ((bh)->b_data + BLKH_SIZE + B_NR_ITEMS(bh) * KEY_SIZE + DC_SIZE * (n_pos)))
1826
1827 /* Get disk child number by buffer header and position in the tree node. */
1828 #define B_N_CHILD_NUM(bh, n_pos) (dc_block_number(B_N_CHILD(bh, n_pos)))
1829 #define PUT_B_N_CHILD_NUM(bh, n_pos, val) \
1830 (put_dc_block_number(B_N_CHILD(bh, n_pos), val))
1831
1832 /* maximal value of field child_size in structure disk_child */
1833 /* child size is the combined size of all items and their headers */
1834 #define MAX_CHILD_SIZE(bh) ((int)( (bh)->b_size - BLKH_SIZE ))
1835
1836 /* amount of used space in buffer (not including block head) */
1837 #define B_CHILD_SIZE(cur) (MAX_CHILD_SIZE(cur)-(B_FREE_SPACE(cur)))
1838
1839 /* max and min number of keys in internal node */
1840 #define MAX_NR_KEY(bh) ( (MAX_CHILD_SIZE(bh)-DC_SIZE)/(KEY_SIZE+DC_SIZE) )
1841 #define MIN_NR_KEY(bh) (MAX_NR_KEY(bh)/2)
1842
1843 /***************************************************************************/
1844 /* PATH STRUCTURES AND DEFINES */
1845 /***************************************************************************/
1846
1847 /* Search_by_key fills up the path from the root to the leaf as it descends the tree looking for the
1848 key. It uses reiserfs_bread to try to find buffers in the cache given their block number. If it
1849 does not find them in the cache it reads them from disk. For each node search_by_key finds using
1850 reiserfs_bread it then uses bin_search to look through that node. bin_search will find the
1851 position of the block_number of the next node if it is looking through an internal node. If it
1852 is looking through a leaf node bin_search will find the position of the item which has key either
1853 equal to given key, or which is the maximal key less than the given key. */
1854
1855 struct path_element {
1856 struct buffer_head *pe_buffer; /* Pointer to the buffer at the path in the tree. */
1857 int pe_position; /* Position in the tree node which is placed in the */
1858 /* buffer above. */
1859 };
1860
1861 #define MAX_HEIGHT 5 /* maximal height of a tree. don't change this without changing JOURNAL_PER_BALANCE_CNT */
1862 #define EXTENDED_MAX_HEIGHT 7 /* Must be equals MAX_HEIGHT + FIRST_PATH_ELEMENT_OFFSET */
1863 #define FIRST_PATH_ELEMENT_OFFSET 2 /* Must be equal to at least 2. */
1864
1865 #define ILLEGAL_PATH_ELEMENT_OFFSET 1 /* Must be equal to FIRST_PATH_ELEMENT_OFFSET - 1 */
1866 #define MAX_FEB_SIZE 6 /* this MUST be MAX_HEIGHT + 1. See about FEB below */
1867
1868 /* We need to keep track of who the ancestors of nodes are. When we
1869 perform a search we record which nodes were visited while
1870 descending the tree looking for the node we searched for. This list
1871 of nodes is called the path. This information is used while
1872 performing balancing. Note that this path information may become
1873 invalid, and this means we must check it when using it to see if it
1874 is still valid. You'll need to read search_by_key and the comments
1875 in it, especially about decrement_counters_in_path(), to understand
1876 this structure.
1877
1878 Paths make the code so much harder to work with and debug.... An
1879 enormous number of bugs are due to them, and trying to write or modify
1880 code that uses them just makes my head hurt. They are based on an
1881 excessive effort to avoid disturbing the precious VFS code.:-( The
1882 gods only know how we are going to SMP the code that uses them.
1883 znodes are the way! */
1884
1885 #define PATH_READA 0x1 /* do read ahead */
1886 #define PATH_READA_BACK 0x2 /* read backwards */
1887
1888 struct treepath {
1889 int path_length; /* Length of the array above. */
1890 int reada;
1891 struct path_element path_elements[EXTENDED_MAX_HEIGHT]; /* Array of the path elements. */
1892 int pos_in_item;
1893 };
1894
1895 #define pos_in_item(path) ((path)->pos_in_item)
1896
1897 #define INITIALIZE_PATH(var) \
1898 struct treepath var = {.path_length = ILLEGAL_PATH_ELEMENT_OFFSET, .reada = 0,}
1899
1900 /* Get path element by path and path position. */
1901 #define PATH_OFFSET_PELEMENT(path, n_offset) ((path)->path_elements + (n_offset))
1902
1903 /* Get buffer header at the path by path and path position. */
1904 #define PATH_OFFSET_PBUFFER(path, n_offset) (PATH_OFFSET_PELEMENT(path, n_offset)->pe_buffer)
1905
1906 /* Get position in the element at the path by path and path position. */
1907 #define PATH_OFFSET_POSITION(path, n_offset) (PATH_OFFSET_PELEMENT(path, n_offset)->pe_position)
1908
1909 #define PATH_PLAST_BUFFER(path) (PATH_OFFSET_PBUFFER((path), (path)->path_length))
1910 /* you know, to the person who didn't
1911 write this the macro name does not
1912 at first suggest what it does.
1913 Maybe POSITION_FROM_PATH_END? Or
1914 maybe we should just focus on
1915 dumping paths... -Hans */
1916 #define PATH_LAST_POSITION(path) (PATH_OFFSET_POSITION((path), (path)->path_length))
1917
1918 #define PATH_PITEM_HEAD(path) B_N_PITEM_HEAD(PATH_PLAST_BUFFER(path), PATH_LAST_POSITION(path))
1919
1920 /* in do_balance leaf has h == 0 in contrast with path structure,
1921 where root has level == 0. That is why we need these defines */
1922 #define PATH_H_PBUFFER(path, h) PATH_OFFSET_PBUFFER (path, path->path_length - (h)) /* tb->S[h] */
1923 #define PATH_H_PPARENT(path, h) PATH_H_PBUFFER (path, (h) + 1) /* tb->F[h] or tb->S[0]->b_parent */
1924 #define PATH_H_POSITION(path, h) PATH_OFFSET_POSITION (path, path->path_length - (h))
1925 #define PATH_H_B_ITEM_ORDER(path, h) PATH_H_POSITION(path, h + 1) /* tb->S[h]->b_item_order */
1926
1927 #define PATH_H_PATH_OFFSET(path, n_h) ((path)->path_length - (n_h))
1928
1929 #define get_last_bh(path) PATH_PLAST_BUFFER(path)
1930 #define get_ih(path) PATH_PITEM_HEAD(path)
1931 #define get_item_pos(path) PATH_LAST_POSITION(path)
1932 #define get_item(path) ((void *)B_N_PITEM(PATH_PLAST_BUFFER(path), PATH_LAST_POSITION (path)))
1933 #define item_moved(ih,path) comp_items(ih, path)
1934 #define path_changed(ih,path) comp_items (ih, path)
1935
1936 /***************************************************************************/
1937 /* MISC */
1938 /***************************************************************************/
1939
1940 /* Size of pointer to the unformatted node. */
1941 #define UNFM_P_SIZE (sizeof(unp_t))
1942 #define UNFM_P_SHIFT 2
1943
1944 // in in-core inode key is stored on le form
1945 #define INODE_PKEY(inode) ((struct reiserfs_key *)(REISERFS_I(inode)->i_key))
1946
1947 #define MAX_UL_INT 0xffffffff
1948 #define MAX_INT 0x7ffffff
1949 #define MAX_US_INT 0xffff
1950
1951 // reiserfs version 2 has max offset 60 bits. Version 1 - 32 bit offset
1952 #define U32_MAX (~(__u32)0)
1953
max_reiserfs_offset(struct inode * inode)1954 static inline loff_t max_reiserfs_offset(struct inode *inode)
1955 {
1956 if (get_inode_item_key_version(inode) == KEY_FORMAT_3_5)
1957 return (loff_t) U32_MAX;
1958
1959 return (loff_t) ((~(__u64) 0) >> 4);
1960 }
1961
1962 /*#define MAX_KEY_UNIQUENESS MAX_UL_INT*/
1963 #define MAX_KEY_OBJECTID MAX_UL_INT
1964
1965 #define MAX_B_NUM MAX_UL_INT
1966 #define MAX_FC_NUM MAX_US_INT
1967
1968 /* the purpose is to detect overflow of an unsigned short */
1969 #define REISERFS_LINK_MAX (MAX_US_INT - 1000)
1970
1971 /* The following defines are used in reiserfs_insert_item and reiserfs_append_item */
1972 #define REISERFS_KERNEL_MEM 0 /* reiserfs kernel memory mode */
1973 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
1974
1975 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
1976 #define get_generation(s) atomic_read (&fs_generation(s))
1977 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
1978 #define __fs_changed(gen,s) (gen != get_generation (s))
1979 #define fs_changed(gen,s) \
1980 ({ \
1981 reiserfs_cond_resched(s); \
1982 __fs_changed(gen, s); \
1983 })
1984
1985 /***************************************************************************/
1986 /* FIXATE NODES */
1987 /***************************************************************************/
1988
1989 #define VI_TYPE_LEFT_MERGEABLE 1
1990 #define VI_TYPE_RIGHT_MERGEABLE 2
1991
1992 /* To make any changes in the tree we always first find node, that
1993 contains item to be changed/deleted or place to insert a new
1994 item. We call this node S. To do balancing we need to decide what
1995 we will shift to left/right neighbor, or to a new node, where new
1996 item will be etc. To make this analysis simpler we build virtual
1997 node. Virtual node is an array of items, that will replace items of
1998 node S. (For instance if we are going to delete an item, virtual
1999 node does not contain it). Virtual node keeps information about
2000 item sizes and types, mergeability of first and last items, sizes
2001 of all entries in directory item. We use this array of items when
2002 calculating what we can shift to neighbors and how many nodes we
2003 have to have if we do not any shiftings, if we shift to left/right
2004 neighbor or to both. */
2005 struct virtual_item {
2006 int vi_index; // index in the array of item operations
2007 unsigned short vi_type; // left/right mergeability
2008 unsigned short vi_item_len; /* length of item that it will have after balancing */
2009 struct item_head *vi_ih;
2010 const char *vi_item; // body of item (old or new)
2011 const void *vi_new_data; // 0 always but paste mode
2012 void *vi_uarea; // item specific area
2013 };
2014
2015 struct virtual_node {
2016 char *vn_free_ptr; /* this is a pointer to the free space in the buffer */
2017 unsigned short vn_nr_item; /* number of items in virtual node */
2018 short vn_size; /* size of node , that node would have if it has unlimited size and no balancing is performed */
2019 short vn_mode; /* mode of balancing (paste, insert, delete, cut) */
2020 short vn_affected_item_num;
2021 short vn_pos_in_item;
2022 struct item_head *vn_ins_ih; /* item header of inserted item, 0 for other modes */
2023 const void *vn_data;
2024 struct virtual_item *vn_vi; /* array of items (including a new one, excluding item to be deleted) */
2025 };
2026
2027 /* used by directory items when creating virtual nodes */
2028 struct direntry_uarea {
2029 int flags;
2030 __u16 entry_count;
2031 __u16 entry_sizes[1];
2032 } __attribute__ ((__packed__));
2033
2034 /***************************************************************************/
2035 /* TREE BALANCE */
2036 /***************************************************************************/
2037
2038 /* This temporary structure is used in tree balance algorithms, and
2039 constructed as we go to the extent that its various parts are
2040 needed. It contains arrays of nodes that can potentially be
2041 involved in the balancing of node S, and parameters that define how
2042 each of the nodes must be balanced. Note that in these algorithms
2043 for balancing the worst case is to need to balance the current node
2044 S and the left and right neighbors and all of their parents plus
2045 create a new node. We implement S1 balancing for the leaf nodes
2046 and S0 balancing for the internal nodes (S1 and S0 are defined in
2047 our papers.)*/
2048
2049 #define MAX_FREE_BLOCK 7 /* size of the array of buffers to free at end of do_balance */
2050
2051 /* maximum number of FEB blocknrs on a single level */
2052 #define MAX_AMOUNT_NEEDED 2
2053
2054 /* someday somebody will prefix every field in this struct with tb_ */
2055 struct tree_balance {
2056 int tb_mode;
2057 int need_balance_dirty;
2058 struct super_block *tb_sb;
2059 struct reiserfs_transaction_handle *transaction_handle;
2060 struct treepath *tb_path;
2061 struct buffer_head *L[MAX_HEIGHT]; /* array of left neighbors of nodes in the path */
2062 struct buffer_head *R[MAX_HEIGHT]; /* array of right neighbors of nodes in the path */
2063 struct buffer_head *FL[MAX_HEIGHT]; /* array of fathers of the left neighbors */
2064 struct buffer_head *FR[MAX_HEIGHT]; /* array of fathers of the right neighbors */
2065 struct buffer_head *CFL[MAX_HEIGHT]; /* array of common parents of center node and its left neighbor */
2066 struct buffer_head *CFR[MAX_HEIGHT]; /* array of common parents of center node and its right neighbor */
2067
2068 struct buffer_head *FEB[MAX_FEB_SIZE]; /* array of empty buffers. Number of buffers in array equals
2069 cur_blknum. */
2070 struct buffer_head *used[MAX_FEB_SIZE];
2071 struct buffer_head *thrown[MAX_FEB_SIZE];
2072 int lnum[MAX_HEIGHT]; /* array of number of items which must be
2073 shifted to the left in order to balance the
2074 current node; for leaves includes item that
2075 will be partially shifted; for internal
2076 nodes, it is the number of child pointers
2077 rather than items. It includes the new item
2078 being created. The code sometimes subtracts
2079 one to get the number of wholly shifted
2080 items for other purposes. */
2081 int rnum[MAX_HEIGHT]; /* substitute right for left in comment above */
2082 int lkey[MAX_HEIGHT]; /* array indexed by height h mapping the key delimiting L[h] and
2083 S[h] to its item number within the node CFL[h] */
2084 int rkey[MAX_HEIGHT]; /* substitute r for l in comment above */
2085 int insert_size[MAX_HEIGHT]; /* the number of bytes by we are trying to add or remove from
2086 S[h]. A negative value means removing. */
2087 int blknum[MAX_HEIGHT]; /* number of nodes that will replace node S[h] after
2088 balancing on the level h of the tree. If 0 then S is
2089 being deleted, if 1 then S is remaining and no new nodes
2090 are being created, if 2 or 3 then 1 or 2 new nodes is
2091 being created */
2092
2093 /* fields that are used only for balancing leaves of the tree */
2094 int cur_blknum; /* number of empty blocks having been already allocated */
2095 int s0num; /* number of items that fall into left most node when S[0] splits */
2096 int s1num; /* number of items that fall into first new node when S[0] splits */
2097 int s2num; /* number of items that fall into second new node when S[0] splits */
2098 int lbytes; /* number of bytes which can flow to the left neighbor from the left */
2099 /* most liquid item that cannot be shifted from S[0] entirely */
2100 /* if -1 then nothing will be partially shifted */
2101 int rbytes; /* number of bytes which will flow to the right neighbor from the right */
2102 /* most liquid item that cannot be shifted from S[0] entirely */
2103 /* if -1 then nothing will be partially shifted */
2104 int s1bytes; /* number of bytes which flow to the first new node when S[0] splits */
2105 /* note: if S[0] splits into 3 nodes, then items do not need to be cut */
2106 int s2bytes;
2107 struct buffer_head *buf_to_free[MAX_FREE_BLOCK]; /* buffers which are to be freed after do_balance finishes by unfix_nodes */
2108 char *vn_buf; /* kmalloced memory. Used to create
2109 virtual node and keep map of
2110 dirtied bitmap blocks */
2111 int vn_buf_size; /* size of the vn_buf */
2112 struct virtual_node *tb_vn; /* VN starts after bitmap of bitmap blocks */
2113
2114 int fs_gen; /* saved value of `reiserfs_generation' counter
2115 see FILESYSTEM_CHANGED() macro in reiserfs_fs.h */
2116 #ifdef DISPLACE_NEW_PACKING_LOCALITIES
2117 struct in_core_key key; /* key pointer, to pass to block allocator or
2118 another low-level subsystem */
2119 #endif
2120 };
2121
2122 /* These are modes of balancing */
2123
2124 /* When inserting an item. */
2125 #define M_INSERT 'i'
2126 /* When inserting into (directories only) or appending onto an already
2127 existent item. */
2128 #define M_PASTE 'p'
2129 /* When deleting an item. */
2130 #define M_DELETE 'd'
2131 /* When truncating an item or removing an entry from a (directory) item. */
2132 #define M_CUT 'c'
2133
2134 /* used when balancing on leaf level skipped (in reiserfsck) */
2135 #define M_INTERNAL 'n'
2136
2137 /* When further balancing is not needed, then do_balance does not need
2138 to be called. */
2139 #define M_SKIP_BALANCING 's'
2140 #define M_CONVERT 'v'
2141
2142 /* modes of leaf_move_items */
2143 #define LEAF_FROM_S_TO_L 0
2144 #define LEAF_FROM_S_TO_R 1
2145 #define LEAF_FROM_R_TO_L 2
2146 #define LEAF_FROM_L_TO_R 3
2147 #define LEAF_FROM_S_TO_SNEW 4
2148
2149 #define FIRST_TO_LAST 0
2150 #define LAST_TO_FIRST 1
2151
2152 /* used in do_balance for passing parent of node information that has
2153 been gotten from tb struct */
2154 struct buffer_info {
2155 struct tree_balance *tb;
2156 struct buffer_head *bi_bh;
2157 struct buffer_head *bi_parent;
2158 int bi_position;
2159 };
2160
sb_from_tb(struct tree_balance * tb)2161 static inline struct super_block *sb_from_tb(struct tree_balance *tb)
2162 {
2163 return tb ? tb->tb_sb : NULL;
2164 }
2165
sb_from_bi(struct buffer_info * bi)2166 static inline struct super_block *sb_from_bi(struct buffer_info *bi)
2167 {
2168 return bi ? sb_from_tb(bi->tb) : NULL;
2169 }
2170
2171 /* there are 4 types of items: stat data, directory item, indirect, direct.
2172 +-------------------+------------+--------------+------------+
2173 | | k_offset | k_uniqueness | mergeable? |
2174 +-------------------+------------+--------------+------------+
2175 | stat data | 0 | 0 | no |
2176 +-------------------+------------+--------------+------------+
2177 | 1st directory item| DOT_OFFSET |DIRENTRY_UNIQUENESS| no |
2178 | non 1st directory | hash value | | yes |
2179 | item | | | |
2180 +-------------------+------------+--------------+------------+
2181 | indirect item | offset + 1 |TYPE_INDIRECT | if this is not the first indirect item of the object
2182 +-------------------+------------+--------------+------------+
2183 | direct item | offset + 1 |TYPE_DIRECT | if not this is not the first direct item of the object
2184 +-------------------+------------+--------------+------------+
2185 */
2186
2187 struct item_operations {
2188 int (*bytes_number) (struct item_head * ih, int block_size);
2189 void (*decrement_key) (struct cpu_key *);
2190 int (*is_left_mergeable) (struct reiserfs_key * ih,
2191 unsigned long bsize);
2192 void (*print_item) (struct item_head *, char *item);
2193 void (*check_item) (struct item_head *, char *item);
2194
2195 int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
2196 int is_affected, int insert_size);
2197 int (*check_left) (struct virtual_item * vi, int free,
2198 int start_skip, int end_skip);
2199 int (*check_right) (struct virtual_item * vi, int free);
2200 int (*part_size) (struct virtual_item * vi, int from, int to);
2201 int (*unit_num) (struct virtual_item * vi);
2202 void (*print_vi) (struct virtual_item * vi);
2203 };
2204
2205 extern struct item_operations *item_ops[TYPE_ANY + 1];
2206
2207 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
2208 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
2209 #define op_print_item(ih,item) item_ops[le_ih_k_type (ih)]->print_item (ih, item)
2210 #define op_check_item(ih,item) item_ops[le_ih_k_type (ih)]->check_item (ih, item)
2211 #define op_create_vi(vn,vi,is_affected,insert_size) item_ops[le_ih_k_type ((vi)->vi_ih)]->create_vi (vn,vi,is_affected,insert_size)
2212 #define op_check_left(vi,free,start_skip,end_skip) item_ops[(vi)->vi_index]->check_left (vi, free, start_skip, end_skip)
2213 #define op_check_right(vi,free) item_ops[(vi)->vi_index]->check_right (vi, free)
2214 #define op_part_size(vi,from,to) item_ops[(vi)->vi_index]->part_size (vi, from, to)
2215 #define op_unit_num(vi) item_ops[(vi)->vi_index]->unit_num (vi)
2216 #define op_print_vi(vi) item_ops[(vi)->vi_index]->print_vi (vi)
2217
2218 #define COMP_SHORT_KEYS comp_short_keys
2219
2220 /* number of blocks pointed to by the indirect item */
2221 #define I_UNFM_NUM(ih) (ih_item_len(ih) / UNFM_P_SIZE)
2222
2223 /* the used space within the unformatted node corresponding to pos within the item pointed to by ih */
2224 #define I_POS_UNFM_SIZE(ih,pos,size) (((pos) == I_UNFM_NUM(ih) - 1 ) ? (size) - ih_free_space(ih) : (size))
2225
2226 /* number of bytes contained by the direct item or the unformatted nodes the indirect item points to */
2227
2228 /* get the item header */
2229 #define B_N_PITEM_HEAD(bh,item_num) ( (struct item_head * )((bh)->b_data + BLKH_SIZE) + (item_num) )
2230
2231 /* get key */
2232 #define B_N_PDELIM_KEY(bh,item_num) ( (struct reiserfs_key * )((bh)->b_data + BLKH_SIZE) + (item_num) )
2233
2234 /* get the key */
2235 #define B_N_PKEY(bh,item_num) ( &(B_N_PITEM_HEAD(bh,item_num)->ih_key) )
2236
2237 /* get item body */
2238 #define B_N_PITEM(bh,item_num) ( (bh)->b_data + ih_location(B_N_PITEM_HEAD((bh),(item_num))))
2239
2240 /* get the stat data by the buffer header and the item order */
2241 #define B_N_STAT_DATA(bh,nr) \
2242 ( (struct stat_data *)((bh)->b_data + ih_location(B_N_PITEM_HEAD((bh),(nr))) ) )
2243
2244 /* following defines use reiserfs buffer header and item header */
2245
2246 /* get stat-data */
2247 #define B_I_STAT_DATA(bh, ih) ( (struct stat_data * )((bh)->b_data + ih_location(ih)) )
2248
2249 // this is 3976 for size==4096
2250 #define MAX_DIRECT_ITEM_LEN(size) ((size) - BLKH_SIZE - 2*IH_SIZE - SD_SIZE - UNFM_P_SIZE)
2251
2252 /* indirect items consist of entries which contain blocknrs, pos
2253 indicates which entry, and B_I_POS_UNFM_POINTER resolves to the
2254 blocknr contained by the entry pos points to */
2255 #define B_I_POS_UNFM_POINTER(bh,ih,pos) le32_to_cpu(*(((unp_t *)B_I_PITEM(bh,ih)) + (pos)))
2256 #define PUT_B_I_POS_UNFM_POINTER(bh,ih,pos, val) do {*(((unp_t *)B_I_PITEM(bh,ih)) + (pos)) = cpu_to_le32(val); } while (0)
2257
2258 struct reiserfs_iget_args {
2259 __u32 objectid;
2260 __u32 dirid;
2261 };
2262
2263 /***************************************************************************/
2264 /* FUNCTION DECLARATIONS */
2265 /***************************************************************************/
2266
2267 #define get_journal_desc_magic(bh) (bh->b_data + bh->b_size - 12)
2268
2269 #define journal_trans_half(blocksize) \
2270 ((blocksize - sizeof (struct reiserfs_journal_desc) + sizeof (__u32) - 12) / sizeof (__u32))
2271
2272 /* journal.c see journal.c for all the comments here */
2273
2274 /* first block written in a commit. */
2275 struct reiserfs_journal_desc {
2276 __le32 j_trans_id; /* id of commit */
2277 __le32 j_len; /* length of commit. len +1 is the commit block */
2278 __le32 j_mount_id; /* mount id of this trans */
2279 __le32 j_realblock[1]; /* real locations for each block */
2280 };
2281
2282 #define get_desc_trans_id(d) le32_to_cpu((d)->j_trans_id)
2283 #define get_desc_trans_len(d) le32_to_cpu((d)->j_len)
2284 #define get_desc_mount_id(d) le32_to_cpu((d)->j_mount_id)
2285
2286 #define set_desc_trans_id(d,val) do { (d)->j_trans_id = cpu_to_le32 (val); } while (0)
2287 #define set_desc_trans_len(d,val) do { (d)->j_len = cpu_to_le32 (val); } while (0)
2288 #define set_desc_mount_id(d,val) do { (d)->j_mount_id = cpu_to_le32 (val); } while (0)
2289
2290 /* last block written in a commit */
2291 struct reiserfs_journal_commit {
2292 __le32 j_trans_id; /* must match j_trans_id from the desc block */
2293 __le32 j_len; /* ditto */
2294 __le32 j_realblock[1]; /* real locations for each block */
2295 };
2296
2297 #define get_commit_trans_id(c) le32_to_cpu((c)->j_trans_id)
2298 #define get_commit_trans_len(c) le32_to_cpu((c)->j_len)
2299 #define get_commit_mount_id(c) le32_to_cpu((c)->j_mount_id)
2300
2301 #define set_commit_trans_id(c,val) do { (c)->j_trans_id = cpu_to_le32 (val); } while (0)
2302 #define set_commit_trans_len(c,val) do { (c)->j_len = cpu_to_le32 (val); } while (0)
2303
2304 /* this header block gets written whenever a transaction is considered fully flushed, and is more recent than the
2305 ** last fully flushed transaction. fully flushed means all the log blocks and all the real blocks are on disk,
2306 ** and this transaction does not need to be replayed.
2307 */
2308 struct reiserfs_journal_header {
2309 __le32 j_last_flush_trans_id; /* id of last fully flushed transaction */
2310 __le32 j_first_unflushed_offset; /* offset in the log of where to start replay after a crash */
2311 __le32 j_mount_id;
2312 /* 12 */ struct journal_params jh_journal;
2313 };
2314
2315 /* biggest tunable defines are right here */
2316 #define JOURNAL_BLOCK_COUNT 8192 /* number of blocks in the journal */
2317 #define JOURNAL_TRANS_MAX_DEFAULT 1024 /* biggest possible single transaction, don't change for now (8/3/99) */
2318 #define JOURNAL_TRANS_MIN_DEFAULT 256
2319 #define JOURNAL_MAX_BATCH_DEFAULT 900 /* max blocks to batch into one transaction, don't make this any bigger than 900 */
2320 #define JOURNAL_MIN_RATIO 2
2321 #define JOURNAL_MAX_COMMIT_AGE 30
2322 #define JOURNAL_MAX_TRANS_AGE 30
2323 #define JOURNAL_PER_BALANCE_CNT (3 * (MAX_HEIGHT-2) + 9)
2324 #define JOURNAL_BLOCKS_PER_OBJECT(sb) (JOURNAL_PER_BALANCE_CNT * 3 + \
2325 2 * (REISERFS_QUOTA_INIT_BLOCKS(sb) + \
2326 REISERFS_QUOTA_TRANS_BLOCKS(sb)))
2327
2328 #ifdef CONFIG_QUOTA
2329 #define REISERFS_QUOTA_OPTS ((1 << REISERFS_USRQUOTA) | (1 << REISERFS_GRPQUOTA))
2330 /* We need to update data and inode (atime) */
2331 #define REISERFS_QUOTA_TRANS_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & REISERFS_QUOTA_OPTS ? 2 : 0)
2332 /* 1 balancing, 1 bitmap, 1 data per write + stat data update */
2333 #define REISERFS_QUOTA_INIT_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & REISERFS_QUOTA_OPTS ? \
2334 (DQUOT_INIT_ALLOC*(JOURNAL_PER_BALANCE_CNT+2)+DQUOT_INIT_REWRITE+1) : 0)
2335 /* same as with INIT */
2336 #define REISERFS_QUOTA_DEL_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & REISERFS_QUOTA_OPTS ? \
2337 (DQUOT_DEL_ALLOC*(JOURNAL_PER_BALANCE_CNT+2)+DQUOT_DEL_REWRITE+1) : 0)
2338 #else
2339 #define REISERFS_QUOTA_TRANS_BLOCKS(s) 0
2340 #define REISERFS_QUOTA_INIT_BLOCKS(s) 0
2341 #define REISERFS_QUOTA_DEL_BLOCKS(s) 0
2342 #endif
2343
2344 /* both of these can be as low as 1, or as high as you want. The min is the
2345 ** number of 4k bitmap nodes preallocated on mount. New nodes are allocated
2346 ** as needed, and released when transactions are committed. On release, if
2347 ** the current number of nodes is > max, the node is freed, otherwise,
2348 ** it is put on a free list for faster use later.
2349 */
2350 #define REISERFS_MIN_BITMAP_NODES 10
2351 #define REISERFS_MAX_BITMAP_NODES 100
2352
2353 #define JBH_HASH_SHIFT 13 /* these are based on journal hash size of 8192 */
2354 #define JBH_HASH_MASK 8191
2355
2356 #define _jhashfn(sb,block) \
2357 (((unsigned long)sb>>L1_CACHE_SHIFT) ^ \
2358 (((block)<<(JBH_HASH_SHIFT - 6)) ^ ((block) >> 13) ^ ((block) << (JBH_HASH_SHIFT - 12))))
2359 #define journal_hash(t,sb,block) ((t)[_jhashfn((sb),(block)) & JBH_HASH_MASK])
2360
2361 // We need these to make journal.c code more readable
2362 #define journal_find_get_block(s, block) __find_get_block(SB_JOURNAL(s)->j_dev_bd, block, s->s_blocksize)
2363 #define journal_getblk(s, block) __getblk(SB_JOURNAL(s)->j_dev_bd, block, s->s_blocksize)
2364 #define journal_bread(s, block) __bread(SB_JOURNAL(s)->j_dev_bd, block, s->s_blocksize)
2365
2366 enum reiserfs_bh_state_bits {
2367 BH_JDirty = BH_PrivateStart, /* buffer is in current transaction */
2368 BH_JDirty_wait,
2369 BH_JNew, /* disk block was taken off free list before
2370 * being in a finished transaction, or
2371 * written to disk. Can be reused immed. */
2372 BH_JPrepared,
2373 BH_JRestore_dirty,
2374 BH_JTest, // debugging only will go away
2375 };
2376
2377 BUFFER_FNS(JDirty, journaled);
2378 TAS_BUFFER_FNS(JDirty, journaled);
2379 BUFFER_FNS(JDirty_wait, journal_dirty);
2380 TAS_BUFFER_FNS(JDirty_wait, journal_dirty);
2381 BUFFER_FNS(JNew, journal_new);
2382 TAS_BUFFER_FNS(JNew, journal_new);
2383 BUFFER_FNS(JPrepared, journal_prepared);
2384 TAS_BUFFER_FNS(JPrepared, journal_prepared);
2385 BUFFER_FNS(JRestore_dirty, journal_restore_dirty);
2386 TAS_BUFFER_FNS(JRestore_dirty, journal_restore_dirty);
2387 BUFFER_FNS(JTest, journal_test);
2388 TAS_BUFFER_FNS(JTest, journal_test);
2389
2390 /*
2391 ** transaction handle which is passed around for all journal calls
2392 */
2393 struct reiserfs_transaction_handle {
2394 struct super_block *t_super; /* super for this FS when journal_begin was
2395 called. saves calls to reiserfs_get_super
2396 also used by nested transactions to make
2397 sure they are nesting on the right FS
2398 _must_ be first in the handle
2399 */
2400 int t_refcount;
2401 int t_blocks_logged; /* number of blocks this writer has logged */
2402 int t_blocks_allocated; /* number of blocks this writer allocated */
2403 unsigned int t_trans_id; /* sanity check, equals the current trans id */
2404 void *t_handle_save; /* save existing current->journal_info */
2405 unsigned displace_new_blocks:1; /* if new block allocation occurres, that block
2406 should be displaced from others */
2407 struct list_head t_list;
2408 };
2409
2410 /* used to keep track of ordered and tail writes, attached to the buffer
2411 * head through b_journal_head.
2412 */
2413 struct reiserfs_jh {
2414 struct reiserfs_journal_list *jl;
2415 struct buffer_head *bh;
2416 struct list_head list;
2417 };
2418
2419 void reiserfs_free_jh(struct buffer_head *bh);
2420 int reiserfs_add_tail_list(struct inode *inode, struct buffer_head *bh);
2421 int reiserfs_add_ordered_list(struct inode *inode, struct buffer_head *bh);
2422 int journal_mark_dirty(struct reiserfs_transaction_handle *,
2423 struct super_block *, struct buffer_head *bh);
2424
reiserfs_file_data_log(struct inode * inode)2425 static inline int reiserfs_file_data_log(struct inode *inode)
2426 {
2427 if (reiserfs_data_log(inode->i_sb) ||
2428 (REISERFS_I(inode)->i_flags & i_data_log))
2429 return 1;
2430 return 0;
2431 }
2432
reiserfs_transaction_running(struct super_block * s)2433 static inline int reiserfs_transaction_running(struct super_block *s)
2434 {
2435 struct reiserfs_transaction_handle *th = current->journal_info;
2436 if (th && th->t_super == s)
2437 return 1;
2438 if (th && th->t_super == NULL)
2439 BUG();
2440 return 0;
2441 }
2442
reiserfs_transaction_free_space(struct reiserfs_transaction_handle * th)2443 static inline int reiserfs_transaction_free_space(struct reiserfs_transaction_handle *th)
2444 {
2445 return th->t_blocks_allocated - th->t_blocks_logged;
2446 }
2447
2448 struct reiserfs_transaction_handle *reiserfs_persistent_transaction(struct
2449 super_block
2450 *,
2451 int count);
2452 int reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *);
2453 int reiserfs_commit_page(struct inode *inode, struct page *page,
2454 unsigned from, unsigned to);
2455 int reiserfs_flush_old_commits(struct super_block *);
2456 int reiserfs_commit_for_inode(struct inode *);
2457 int reiserfs_inode_needs_commit(struct inode *);
2458 void reiserfs_update_inode_transaction(struct inode *);
2459 void reiserfs_wait_on_write_block(struct super_block *s);
2460 void reiserfs_block_writes(struct reiserfs_transaction_handle *th);
2461 void reiserfs_allow_writes(struct super_block *s);
2462 void reiserfs_check_lock_depth(struct super_block *s, char *caller);
2463 int reiserfs_prepare_for_journal(struct super_block *, struct buffer_head *bh,
2464 int wait);
2465 void reiserfs_restore_prepared_buffer(struct super_block *,
2466 struct buffer_head *bh);
2467 int journal_init(struct super_block *, const char *j_dev_name, int old_format,
2468 unsigned int);
2469 int journal_release(struct reiserfs_transaction_handle *, struct super_block *);
2470 int journal_release_error(struct reiserfs_transaction_handle *,
2471 struct super_block *);
2472 int journal_end(struct reiserfs_transaction_handle *, struct super_block *,
2473 unsigned long);
2474 int journal_end_sync(struct reiserfs_transaction_handle *, struct super_block *,
2475 unsigned long);
2476 int journal_mark_freed(struct reiserfs_transaction_handle *,
2477 struct super_block *, b_blocknr_t blocknr);
2478 int journal_transaction_should_end(struct reiserfs_transaction_handle *, int);
2479 int reiserfs_in_journal(struct super_block *sb, unsigned int bmap_nr,
2480 int bit_nr, int searchall, b_blocknr_t *next);
2481 int journal_begin(struct reiserfs_transaction_handle *,
2482 struct super_block *sb, unsigned long);
2483 int journal_join_abort(struct reiserfs_transaction_handle *,
2484 struct super_block *sb, unsigned long);
2485 void reiserfs_abort_journal(struct super_block *sb, int errno);
2486 void reiserfs_abort(struct super_block *sb, int errno, const char *fmt, ...);
2487 int reiserfs_allocate_list_bitmaps(struct super_block *s,
2488 struct reiserfs_list_bitmap *, unsigned int);
2489
2490 void add_save_link(struct reiserfs_transaction_handle *th,
2491 struct inode *inode, int truncate);
2492 int remove_save_link(struct inode *inode, int truncate);
2493
2494 /* objectid.c */
2495 __u32 reiserfs_get_unused_objectid(struct reiserfs_transaction_handle *th);
2496 void reiserfs_release_objectid(struct reiserfs_transaction_handle *th,
2497 __u32 objectid_to_release);
2498 int reiserfs_convert_objectid_map_v1(struct super_block *);
2499
2500 /* stree.c */
2501 int B_IS_IN_TREE(const struct buffer_head *);
2502 extern void copy_item_head(struct item_head *to,
2503 const struct item_head *from);
2504
2505 // first key is in cpu form, second - le
2506 extern int comp_short_keys(const struct reiserfs_key *le_key,
2507 const struct cpu_key *cpu_key);
2508 extern void le_key2cpu_key(struct cpu_key *to, const struct reiserfs_key *from);
2509
2510 // both are in le form
2511 extern int comp_le_keys(const struct reiserfs_key *,
2512 const struct reiserfs_key *);
2513 extern int comp_short_le_keys(const struct reiserfs_key *,
2514 const struct reiserfs_key *);
2515
2516 //
2517 // get key version from on disk key - kludge
2518 //
le_key_version(const struct reiserfs_key * key)2519 static inline int le_key_version(const struct reiserfs_key *key)
2520 {
2521 int type;
2522
2523 type = offset_v2_k_type(&(key->u.k_offset_v2));
2524 if (type != TYPE_DIRECT && type != TYPE_INDIRECT
2525 && type != TYPE_DIRENTRY)
2526 return KEY_FORMAT_3_5;
2527
2528 return KEY_FORMAT_3_6;
2529
2530 }
2531
copy_key(struct reiserfs_key * to,const struct reiserfs_key * from)2532 static inline void copy_key(struct reiserfs_key *to,
2533 const struct reiserfs_key *from)
2534 {
2535 memcpy(to, from, KEY_SIZE);
2536 }
2537
2538 int comp_items(const struct item_head *stored_ih, const struct treepath *path);
2539 const struct reiserfs_key *get_rkey(const struct treepath *chk_path,
2540 const struct super_block *sb);
2541 int search_by_key(struct super_block *, const struct cpu_key *,
2542 struct treepath *, int);
2543 #define search_item(s,key,path) search_by_key (s, key, path, DISK_LEAF_NODE_LEVEL)
2544 int search_for_position_by_key(struct super_block *sb,
2545 const struct cpu_key *cpu_key,
2546 struct treepath *search_path);
2547 extern void decrement_bcount(struct buffer_head *bh);
2548 void decrement_counters_in_path(struct treepath *search_path);
2549 void pathrelse(struct treepath *search_path);
2550 int reiserfs_check_path(struct treepath *p);
2551 void pathrelse_and_restore(struct super_block *s, struct treepath *search_path);
2552
2553 int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
2554 struct treepath *path,
2555 const struct cpu_key *key,
2556 struct item_head *ih,
2557 struct inode *inode, const char *body);
2558
2559 int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th,
2560 struct treepath *path,
2561 const struct cpu_key *key,
2562 struct inode *inode,
2563 const char *body, int paste_size);
2564
2565 int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
2566 struct treepath *path,
2567 struct cpu_key *key,
2568 struct inode *inode,
2569 struct page *page, loff_t new_file_size);
2570
2571 int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
2572 struct treepath *path,
2573 const struct cpu_key *key,
2574 struct inode *inode, struct buffer_head *un_bh);
2575
2576 void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
2577 struct inode *inode, struct reiserfs_key *key);
2578 int reiserfs_delete_object(struct reiserfs_transaction_handle *th,
2579 struct inode *inode);
2580 int reiserfs_do_truncate(struct reiserfs_transaction_handle *th,
2581 struct inode *inode, struct page *,
2582 int update_timestamps);
2583
2584 #define i_block_size(inode) ((inode)->i_sb->s_blocksize)
2585 #define file_size(inode) ((inode)->i_size)
2586 #define tail_size(inode) (file_size (inode) & (i_block_size (inode) - 1))
2587
2588 #define tail_has_to_be_packed(inode) (have_large_tails ((inode)->i_sb)?\
2589 !STORE_TAIL_IN_UNFM_S1(file_size (inode), tail_size(inode), inode->i_sb->s_blocksize):have_small_tails ((inode)->i_sb)?!STORE_TAIL_IN_UNFM_S2(file_size (inode), tail_size(inode), inode->i_sb->s_blocksize):0 )
2590
2591 void padd_item(char *item, int total_length, int length);
2592
2593 /* inode.c */
2594 /* args for the create parameter of reiserfs_get_block */
2595 #define GET_BLOCK_NO_CREATE 0 /* don't create new blocks or convert tails */
2596 #define GET_BLOCK_CREATE 1 /* add anything you need to find block */
2597 #define GET_BLOCK_NO_HOLE 2 /* return -ENOENT for file holes */
2598 #define GET_BLOCK_READ_DIRECT 4 /* read the tail if indirect item not found */
2599 #define GET_BLOCK_NO_IMUX 8 /* i_mutex is not held, don't preallocate */
2600 #define GET_BLOCK_NO_DANGLE 16 /* don't leave any transactions running */
2601
2602 void reiserfs_read_locked_inode(struct inode *inode,
2603 struct reiserfs_iget_args *args);
2604 int reiserfs_find_actor(struct inode *inode, void *p);
2605 int reiserfs_init_locked_inode(struct inode *inode, void *p);
2606 void reiserfs_evict_inode(struct inode *inode);
2607 int reiserfs_write_inode(struct inode *inode, struct writeback_control *wbc);
2608 int reiserfs_get_block(struct inode *inode, sector_t block,
2609 struct buffer_head *bh_result, int create);
2610 struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
2611 int fh_len, int fh_type);
2612 struct dentry *reiserfs_fh_to_parent(struct super_block *sb, struct fid *fid,
2613 int fh_len, int fh_type);
2614 int reiserfs_encode_fh(struct dentry *dentry, __u32 * data, int *lenp,
2615 int connectable);
2616
2617 int reiserfs_truncate_file(struct inode *, int update_timestamps);
2618 void make_cpu_key(struct cpu_key *cpu_key, struct inode *inode, loff_t offset,
2619 int type, int key_length);
2620 void make_le_item_head(struct item_head *ih, const struct cpu_key *key,
2621 int version,
2622 loff_t offset, int type, int length, int entry_count);
2623 struct inode *reiserfs_iget(struct super_block *s, const struct cpu_key *key);
2624
2625 struct reiserfs_security_handle;
2626 int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
2627 struct inode *dir, umode_t mode,
2628 const char *symname, loff_t i_size,
2629 struct dentry *dentry, struct inode *inode,
2630 struct reiserfs_security_handle *security);
2631
2632 void reiserfs_update_sd_size(struct reiserfs_transaction_handle *th,
2633 struct inode *inode, loff_t size);
2634
reiserfs_update_sd(struct reiserfs_transaction_handle * th,struct inode * inode)2635 static inline void reiserfs_update_sd(struct reiserfs_transaction_handle *th,
2636 struct inode *inode)
2637 {
2638 reiserfs_update_sd_size(th, inode, inode->i_size);
2639 }
2640
2641 void sd_attrs_to_i_attrs(__u16 sd_attrs, struct inode *inode);
2642 void i_attrs_to_sd_attrs(struct inode *inode, __u16 * sd_attrs);
2643 int reiserfs_setattr(struct dentry *dentry, struct iattr *attr);
2644
2645 int __reiserfs_write_begin(struct page *page, unsigned from, unsigned len);
2646
2647 /* namei.c */
2648 void set_de_name_and_namelen(struct reiserfs_dir_entry *de);
2649 int search_by_entry_key(struct super_block *sb, const struct cpu_key *key,
2650 struct treepath *path, struct reiserfs_dir_entry *de);
2651 struct dentry *reiserfs_get_parent(struct dentry *);
2652
2653 #ifdef CONFIG_REISERFS_PROC_INFO
2654 int reiserfs_proc_info_init(struct super_block *sb);
2655 int reiserfs_proc_info_done(struct super_block *sb);
2656 int reiserfs_proc_info_global_init(void);
2657 int reiserfs_proc_info_global_done(void);
2658
2659 #define PROC_EXP( e ) e
2660
2661 #define __PINFO( sb ) REISERFS_SB(sb) -> s_proc_info_data
2662 #define PROC_INFO_MAX( sb, field, value ) \
2663 __PINFO( sb ).field = \
2664 max( REISERFS_SB( sb ) -> s_proc_info_data.field, value )
2665 #define PROC_INFO_INC( sb, field ) ( ++ ( __PINFO( sb ).field ) )
2666 #define PROC_INFO_ADD( sb, field, val ) ( __PINFO( sb ).field += ( val ) )
2667 #define PROC_INFO_BH_STAT( sb, bh, level ) \
2668 PROC_INFO_INC( sb, sbk_read_at[ ( level ) ] ); \
2669 PROC_INFO_ADD( sb, free_at[ ( level ) ], B_FREE_SPACE( bh ) ); \
2670 PROC_INFO_ADD( sb, items_at[ ( level ) ], B_NR_ITEMS( bh ) )
2671 #else
reiserfs_proc_info_init(struct super_block * sb)2672 static inline int reiserfs_proc_info_init(struct super_block *sb)
2673 {
2674 return 0;
2675 }
2676
reiserfs_proc_info_done(struct super_block * sb)2677 static inline int reiserfs_proc_info_done(struct super_block *sb)
2678 {
2679 return 0;
2680 }
2681
reiserfs_proc_info_global_init(void)2682 static inline int reiserfs_proc_info_global_init(void)
2683 {
2684 return 0;
2685 }
2686
reiserfs_proc_info_global_done(void)2687 static inline int reiserfs_proc_info_global_done(void)
2688 {
2689 return 0;
2690 }
2691
2692 #define PROC_EXP( e )
2693 #define VOID_V ( ( void ) 0 )
2694 #define PROC_INFO_MAX( sb, field, value ) VOID_V
2695 #define PROC_INFO_INC( sb, field ) VOID_V
2696 #define PROC_INFO_ADD( sb, field, val ) VOID_V
2697 #define PROC_INFO_BH_STAT(sb, bh, n_node_level) VOID_V
2698 #endif
2699
2700 /* dir.c */
2701 extern const struct inode_operations reiserfs_dir_inode_operations;
2702 extern const struct inode_operations reiserfs_symlink_inode_operations;
2703 extern const struct inode_operations reiserfs_special_inode_operations;
2704 extern const struct file_operations reiserfs_dir_operations;
2705 int reiserfs_readdir_dentry(struct dentry *, void *, filldir_t, loff_t *);
2706
2707 /* tail_conversion.c */
2708 int direct2indirect(struct reiserfs_transaction_handle *, struct inode *,
2709 struct treepath *, struct buffer_head *, loff_t);
2710 int indirect2direct(struct reiserfs_transaction_handle *, struct inode *,
2711 struct page *, struct treepath *, const struct cpu_key *,
2712 loff_t, char *);
2713 void reiserfs_unmap_buffer(struct buffer_head *);
2714
2715 /* file.c */
2716 extern const struct inode_operations reiserfs_file_inode_operations;
2717 extern const struct file_operations reiserfs_file_operations;
2718 extern const struct address_space_operations reiserfs_address_space_operations;
2719
2720 /* fix_nodes.c */
2721
2722 int fix_nodes(int n_op_mode, struct tree_balance *tb,
2723 struct item_head *ins_ih, const void *);
2724 void unfix_nodes(struct tree_balance *);
2725
2726 /* prints.c */
2727 void __reiserfs_panic(struct super_block *s, const char *id,
2728 const char *function, const char *fmt, ...)
2729 __attribute__ ((noreturn));
2730 #define reiserfs_panic(s, id, fmt, args...) \
2731 __reiserfs_panic(s, id, __func__, fmt, ##args)
2732 void __reiserfs_error(struct super_block *s, const char *id,
2733 const char *function, const char *fmt, ...);
2734 #define reiserfs_error(s, id, fmt, args...) \
2735 __reiserfs_error(s, id, __func__, fmt, ##args)
2736 void reiserfs_info(struct super_block *s, const char *fmt, ...);
2737 void reiserfs_debug(struct super_block *s, int level, const char *fmt, ...);
2738 void print_indirect_item(struct buffer_head *bh, int item_num);
2739 void store_print_tb(struct tree_balance *tb);
2740 void print_cur_tb(char *mes);
2741 void print_de(struct reiserfs_dir_entry *de);
2742 void print_bi(struct buffer_info *bi, char *mes);
2743 #define PRINT_LEAF_ITEMS 1 /* print all items */
2744 #define PRINT_DIRECTORY_ITEMS 2 /* print directory items */
2745 #define PRINT_DIRECT_ITEMS 4 /* print contents of direct items */
2746 void print_block(struct buffer_head *bh, ...);
2747 void print_bmap(struct super_block *s, int silent);
2748 void print_bmap_block(int i, char *data, int size, int silent);
2749 /*void print_super_block (struct super_block * s, char * mes);*/
2750 void print_objectid_map(struct super_block *s);
2751 void print_block_head(struct buffer_head *bh, char *mes);
2752 void check_leaf(struct buffer_head *bh);
2753 void check_internal(struct buffer_head *bh);
2754 void print_statistics(struct super_block *s);
2755 char *reiserfs_hashname(int code);
2756
2757 /* lbalance.c */
2758 int leaf_move_items(int shift_mode, struct tree_balance *tb, int mov_num,
2759 int mov_bytes, struct buffer_head *Snew);
2760 int leaf_shift_left(struct tree_balance *tb, int shift_num, int shift_bytes);
2761 int leaf_shift_right(struct tree_balance *tb, int shift_num, int shift_bytes);
2762 void leaf_delete_items(struct buffer_info *cur_bi, int last_first, int first,
2763 int del_num, int del_bytes);
2764 void leaf_insert_into_buf(struct buffer_info *bi, int before,
2765 struct item_head *inserted_item_ih,
2766 const char *inserted_item_body, int zeros_number);
2767 void leaf_paste_in_buffer(struct buffer_info *bi, int pasted_item_num,
2768 int pos_in_item, int paste_size, const char *body,
2769 int zeros_number);
2770 void leaf_cut_from_buffer(struct buffer_info *bi, int cut_item_num,
2771 int pos_in_item, int cut_size);
2772 void leaf_paste_entries(struct buffer_info *bi, int item_num, int before,
2773 int new_entry_count, struct reiserfs_de_head *new_dehs,
2774 const char *records, int paste_size);
2775 /* ibalance.c */
2776 int balance_internal(struct tree_balance *, int, int, struct item_head *,
2777 struct buffer_head **);
2778
2779 /* do_balance.c */
2780 void do_balance_mark_leaf_dirty(struct tree_balance *tb,
2781 struct buffer_head *bh, int flag);
2782 #define do_balance_mark_internal_dirty do_balance_mark_leaf_dirty
2783 #define do_balance_mark_sb_dirty do_balance_mark_leaf_dirty
2784
2785 void do_balance(struct tree_balance *tb, struct item_head *ih,
2786 const char *body, int flag);
2787 void reiserfs_invalidate_buffer(struct tree_balance *tb,
2788 struct buffer_head *bh);
2789
2790 int get_left_neighbor_position(struct tree_balance *tb, int h);
2791 int get_right_neighbor_position(struct tree_balance *tb, int h);
2792 void replace_key(struct tree_balance *tb, struct buffer_head *, int,
2793 struct buffer_head *, int);
2794 void make_empty_node(struct buffer_info *);
2795 struct buffer_head *get_FEB(struct tree_balance *);
2796
2797 /* bitmap.c */
2798
2799 /* structure contains hints for block allocator, and it is a container for
2800 * arguments, such as node, search path, transaction_handle, etc. */
2801 struct __reiserfs_blocknr_hint {
2802 struct inode *inode; /* inode passed to allocator, if we allocate unf. nodes */
2803 sector_t block; /* file offset, in blocks */
2804 struct in_core_key key;
2805 struct treepath *path; /* search path, used by allocator to deternine search_start by
2806 * various ways */
2807 struct reiserfs_transaction_handle *th; /* transaction handle is needed to log super blocks and
2808 * bitmap blocks changes */
2809 b_blocknr_t beg, end;
2810 b_blocknr_t search_start; /* a field used to transfer search start value (block number)
2811 * between different block allocator procedures
2812 * (determine_search_start() and others) */
2813 int prealloc_size; /* is set in determine_prealloc_size() function, used by underlayed
2814 * function that do actual allocation */
2815
2816 unsigned formatted_node:1; /* the allocator uses different polices for getting disk space for
2817 * formatted/unformatted blocks with/without preallocation */
2818 unsigned preallocate:1;
2819 };
2820
2821 typedef struct __reiserfs_blocknr_hint reiserfs_blocknr_hint_t;
2822
2823 int reiserfs_parse_alloc_options(struct super_block *, char *);
2824 void reiserfs_init_alloc_options(struct super_block *s);
2825
2826 /*
2827 * given a directory, this will tell you what packing locality
2828 * to use for a new object underneat it. The locality is returned
2829 * in disk byte order (le).
2830 */
2831 __le32 reiserfs_choose_packing(struct inode *dir);
2832
2833 int reiserfs_init_bitmap_cache(struct super_block *sb);
2834 void reiserfs_free_bitmap_cache(struct super_block *sb);
2835 void reiserfs_cache_bitmap_metadata(struct super_block *sb, struct buffer_head *bh, struct reiserfs_bitmap_info *info);
2836 struct buffer_head *reiserfs_read_bitmap_block(struct super_block *sb, unsigned int bitmap);
2837 int is_reusable(struct super_block *s, b_blocknr_t block, int bit_value);
2838 void reiserfs_free_block(struct reiserfs_transaction_handle *th, struct inode *,
2839 b_blocknr_t, int for_unformatted);
2840 int reiserfs_allocate_blocknrs(reiserfs_blocknr_hint_t *, b_blocknr_t *, int,
2841 int);
reiserfs_new_form_blocknrs(struct tree_balance * tb,b_blocknr_t * new_blocknrs,int amount_needed)2842 static inline int reiserfs_new_form_blocknrs(struct tree_balance *tb,
2843 b_blocknr_t * new_blocknrs,
2844 int amount_needed)
2845 {
2846 reiserfs_blocknr_hint_t hint = {
2847 .th = tb->transaction_handle,
2848 .path = tb->tb_path,
2849 .inode = NULL,
2850 .key = tb->key,
2851 .block = 0,
2852 .formatted_node = 1
2853 };
2854 return reiserfs_allocate_blocknrs(&hint, new_blocknrs, amount_needed,
2855 0);
2856 }
2857
reiserfs_new_unf_blocknrs(struct reiserfs_transaction_handle * th,struct inode * inode,b_blocknr_t * new_blocknrs,struct treepath * path,sector_t block)2858 static inline int reiserfs_new_unf_blocknrs(struct reiserfs_transaction_handle
2859 *th, struct inode *inode,
2860 b_blocknr_t * new_blocknrs,
2861 struct treepath *path,
2862 sector_t block)
2863 {
2864 reiserfs_blocknr_hint_t hint = {
2865 .th = th,
2866 .path = path,
2867 .inode = inode,
2868 .block = block,
2869 .formatted_node = 0,
2870 .preallocate = 0
2871 };
2872 return reiserfs_allocate_blocknrs(&hint, new_blocknrs, 1, 0);
2873 }
2874
2875 #ifdef REISERFS_PREALLOCATE
reiserfs_new_unf_blocknrs2(struct reiserfs_transaction_handle * th,struct inode * inode,b_blocknr_t * new_blocknrs,struct treepath * path,sector_t block)2876 static inline int reiserfs_new_unf_blocknrs2(struct reiserfs_transaction_handle
2877 *th, struct inode *inode,
2878 b_blocknr_t * new_blocknrs,
2879 struct treepath *path,
2880 sector_t block)
2881 {
2882 reiserfs_blocknr_hint_t hint = {
2883 .th = th,
2884 .path = path,
2885 .inode = inode,
2886 .block = block,
2887 .formatted_node = 0,
2888 .preallocate = 1
2889 };
2890 return reiserfs_allocate_blocknrs(&hint, new_blocknrs, 1, 0);
2891 }
2892
2893 void reiserfs_discard_prealloc(struct reiserfs_transaction_handle *th,
2894 struct inode *inode);
2895 void reiserfs_discard_all_prealloc(struct reiserfs_transaction_handle *th);
2896 #endif
2897
2898 /* hashes.c */
2899 __u32 keyed_hash(const signed char *msg, int len);
2900 __u32 yura_hash(const signed char *msg, int len);
2901 __u32 r5_hash(const signed char *msg, int len);
2902
2903 #define reiserfs_set_le_bit __set_bit_le
2904 #define reiserfs_test_and_set_le_bit __test_and_set_bit_le
2905 #define reiserfs_clear_le_bit __clear_bit_le
2906 #define reiserfs_test_and_clear_le_bit __test_and_clear_bit_le
2907 #define reiserfs_test_le_bit test_bit_le
2908 #define reiserfs_find_next_zero_le_bit find_next_zero_bit_le
2909
2910 /* sometimes reiserfs_truncate may require to allocate few new blocks
2911 to perform indirect2direct conversion. People probably used to
2912 think, that truncate should work without problems on a filesystem
2913 without free disk space. They may complain that they can not
2914 truncate due to lack of free disk space. This spare space allows us
2915 to not worry about it. 500 is probably too much, but it should be
2916 absolutely safe */
2917 #define SPARE_SPACE 500
2918
2919 /* prototypes from ioctl.c */
2920 long reiserfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
2921 long reiserfs_compat_ioctl(struct file *filp,
2922 unsigned int cmd, unsigned long arg);
2923 int reiserfs_unpack(struct inode *inode, struct file *filp);
2924