1 /*
2 ** Write ahead logging implementation copyright Chris Mason 2000
3 **
4 ** The background commits make this code very interelated, and
5 ** overly complex. I need to rethink things a bit....The major players:
6 **
7 ** journal_begin -- call with the number of blocks you expect to log.
8 ** If the current transaction is too
9 ** old, it will block until the current transaction is
10 ** finished, and then start a new one.
11 ** Usually, your transaction will get joined in with
12 ** previous ones for speed.
13 **
14 ** journal_join -- same as journal_begin, but won't block on the current
15 ** transaction regardless of age. Don't ever call
16 ** this. Ever. There are only two places it should be
17 ** called from, and they are both inside this file.
18 **
19 ** journal_mark_dirty -- adds blocks into this transaction. clears any flags
20 ** that might make them get sent to disk
21 ** and then marks them BH_JDirty. Puts the buffer head
22 ** into the current transaction hash.
23 **
24 ** journal_end -- if the current transaction is batchable, it does nothing
25 ** otherwise, it could do an async/synchronous commit, or
26 ** a full flush of all log and real blocks in the
27 ** transaction.
28 **
29 ** flush_old_commits -- if the current transaction is too old, it is ended and
30 ** commit blocks are sent to disk. Forces commit blocks
31 ** to disk for all backgrounded commits that have been
32 ** around too long.
33 ** -- Note, if you call this as an immediate flush from
34 ** from within kupdate, it will ignore the immediate flag
35 **
36 ** The commit thread -- a writer process for async commits. It allows a
37 ** a process to request a log flush on a task queue.
38 ** the commit will happen once the commit thread wakes up.
39 ** The benefit here is the writer (with whatever
40 ** related locks it has) doesn't have to wait for the
41 ** log blocks to hit disk if it doesn't want to.
42 */
43
44 #include <linux/config.h>
45 #include <asm/uaccess.h>
46 #include <asm/system.h>
47
48 #include <linux/sched.h>
49 #include <asm/semaphore.h>
50
51 #include <linux/vmalloc.h>
52 #include <linux/reiserfs_fs.h>
53
54 #include <linux/kernel.h>
55 #include <linux/errno.h>
56 #include <linux/fcntl.h>
57 #include <linux/locks.h>
58 #include <linux/stat.h>
59 #include <linux/string.h>
60 #include <linux/smp_lock.h>
61
62 /* the number of mounted filesystems. This is used to decide when to
63 ** start and kill the commit thread
64 */
65 static int reiserfs_mounted_fs_count = 0 ;
66
67 /* wake this up when you add something to the commit thread task queue */
68 DECLARE_WAIT_QUEUE_HEAD(reiserfs_commit_thread_wait) ;
69
70 /* wait on this if you need to be sure you task queue entries have been run */
71 static DECLARE_WAIT_QUEUE_HEAD(reiserfs_commit_thread_done) ;
72 DECLARE_TASK_QUEUE(reiserfs_commit_thread_tq) ;
73
74 #define JOURNAL_TRANS_HALF 1018 /* must be correct to keep the desc and commit
75 structs at 4k */
76 #define BUFNR 64 /*read ahead */
77
78 /* cnode stat bits. Move these into reiserfs_fs.h */
79
80 #define BLOCK_FREED 2 /* this block was freed, and can't be written. */
81 #define BLOCK_FREED_HOLDER 3 /* this block was freed during this transaction, and can't be written */
82
83 #define BLOCK_NEEDS_FLUSH 4 /* used in flush_journal_list */
84
85 /* flags for do_journal_end */
86 #define FLUSH_ALL 1 /* flush commit and real blocks */
87 #define COMMIT_NOW 2 /* end and commit this transaction */
88 #define WAIT 4 /* wait for the log blocks to hit the disk*/
89
90 /* state bits for the journal */
91 #define WRITERS_BLOCKED 1 /* set when new writers not allowed */
92
93 static int do_journal_end(struct reiserfs_transaction_handle *,struct super_block *,unsigned long nblocks,int flags) ;
94 static int flush_journal_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) ;
95 static int flush_commit_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) ;
96 static int can_dirty(struct reiserfs_journal_cnode *cn) ;
97 static int remove_from_journal_list(struct super_block *s, struct reiserfs_journal_list *jl, struct buffer_head *bh, int remove_freed);
98 static int journal_join(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks);
99 static int release_journal_dev( struct super_block *super,
100 struct reiserfs_journal *journal );
init_journal_hash(struct super_block * p_s_sb)101 static void init_journal_hash(struct super_block *p_s_sb) {
102 memset(SB_JOURNAL(p_s_sb)->j_hash_table, 0, JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *)) ;
103 }
104
105 /*
106 ** clears BH_Dirty and sticks the buffer on the clean list. Called because I can't allow refile_buffer to
107 ** make schedule happen after I've freed a block. Look at remove_from_transaction and journal_mark_freed for
108 ** more details.
109 */
reiserfs_clean_and_file_buffer(struct buffer_head * bh)110 static int reiserfs_clean_and_file_buffer(struct buffer_head *bh) {
111 if (bh) {
112 clear_bit(BH_Dirty, &bh->b_state) ;
113 refile_buffer(bh) ;
114 }
115 return 0 ;
116 }
117
118 static struct reiserfs_bitmap_node *
allocate_bitmap_node(struct super_block * p_s_sb)119 allocate_bitmap_node(struct super_block *p_s_sb) {
120 struct reiserfs_bitmap_node *bn ;
121 static int id = 0 ;
122
123 bn = reiserfs_kmalloc(sizeof(struct reiserfs_bitmap_node), GFP_NOFS, p_s_sb) ;
124 if (!bn) {
125 return NULL ;
126 }
127 bn->data = reiserfs_kmalloc(p_s_sb->s_blocksize, GFP_NOFS, p_s_sb) ;
128 if (!bn->data) {
129 reiserfs_kfree(bn, sizeof(struct reiserfs_bitmap_node), p_s_sb) ;
130 return NULL ;
131 }
132 bn->id = id++ ;
133 memset(bn->data, 0, p_s_sb->s_blocksize) ;
134 INIT_LIST_HEAD(&bn->list) ;
135 return bn ;
136 }
137
138 static struct reiserfs_bitmap_node *
get_bitmap_node(struct super_block * p_s_sb)139 get_bitmap_node(struct super_block *p_s_sb) {
140 struct reiserfs_bitmap_node *bn = NULL;
141 struct list_head *entry = SB_JOURNAL(p_s_sb)->j_bitmap_nodes.next ;
142
143 SB_JOURNAL(p_s_sb)->j_used_bitmap_nodes++ ;
144 repeat:
145
146 if(entry != &SB_JOURNAL(p_s_sb)->j_bitmap_nodes) {
147 bn = list_entry(entry, struct reiserfs_bitmap_node, list) ;
148 list_del(entry) ;
149 memset(bn->data, 0, p_s_sb->s_blocksize) ;
150 SB_JOURNAL(p_s_sb)->j_free_bitmap_nodes-- ;
151 return bn ;
152 }
153 bn = allocate_bitmap_node(p_s_sb) ;
154 if (!bn) {
155 yield();
156 goto repeat ;
157 }
158 return bn ;
159 }
free_bitmap_node(struct super_block * p_s_sb,struct reiserfs_bitmap_node * bn)160 static inline void free_bitmap_node(struct super_block *p_s_sb,
161 struct reiserfs_bitmap_node *bn) {
162 SB_JOURNAL(p_s_sb)->j_used_bitmap_nodes-- ;
163 if (SB_JOURNAL(p_s_sb)->j_free_bitmap_nodes > REISERFS_MAX_BITMAP_NODES) {
164 reiserfs_kfree(bn->data, p_s_sb->s_blocksize, p_s_sb) ;
165 reiserfs_kfree(bn, sizeof(struct reiserfs_bitmap_node), p_s_sb) ;
166 } else {
167 list_add(&bn->list, &SB_JOURNAL(p_s_sb)->j_bitmap_nodes) ;
168 SB_JOURNAL(p_s_sb)->j_free_bitmap_nodes++ ;
169 }
170 }
171
allocate_bitmap_nodes(struct super_block * p_s_sb)172 static void allocate_bitmap_nodes(struct super_block *p_s_sb) {
173 int i ;
174 struct reiserfs_bitmap_node *bn = NULL ;
175 for (i = 0 ; i < REISERFS_MIN_BITMAP_NODES ; i++) {
176 bn = allocate_bitmap_node(p_s_sb) ;
177 if (bn) {
178 list_add(&bn->list, &SB_JOURNAL(p_s_sb)->j_bitmap_nodes) ;
179 SB_JOURNAL(p_s_sb)->j_free_bitmap_nodes++ ;
180 } else {
181 break ; // this is ok, we'll try again when more are needed
182 }
183 }
184 }
185
set_bit_in_list_bitmap(struct super_block * p_s_sb,int block,struct reiserfs_list_bitmap * jb)186 static int set_bit_in_list_bitmap(struct super_block *p_s_sb, int block,
187 struct reiserfs_list_bitmap *jb) {
188 int bmap_nr = block / (p_s_sb->s_blocksize << 3) ;
189 int bit_nr = block % (p_s_sb->s_blocksize << 3) ;
190
191 if (!jb->bitmaps[bmap_nr]) {
192 jb->bitmaps[bmap_nr] = get_bitmap_node(p_s_sb) ;
193 }
194 set_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data) ;
195 return 0 ;
196 }
197
cleanup_bitmap_list(struct super_block * p_s_sb,struct reiserfs_list_bitmap * jb)198 static void cleanup_bitmap_list(struct super_block *p_s_sb,
199 struct reiserfs_list_bitmap *jb) {
200 int i;
201 if (jb->bitmaps == NULL)
202 return ;
203
204 for (i = 0 ; i < SB_BMAP_NR(p_s_sb) ; i++) {
205 if (jb->bitmaps[i]) {
206 free_bitmap_node(p_s_sb, jb->bitmaps[i]) ;
207 jb->bitmaps[i] = NULL ;
208 }
209 }
210 }
211
212 /*
213 ** only call this on FS unmount.
214 */
free_list_bitmaps(struct super_block * p_s_sb,struct reiserfs_list_bitmap * jb_array)215 static int free_list_bitmaps(struct super_block *p_s_sb,
216 struct reiserfs_list_bitmap *jb_array) {
217 int i ;
218 struct reiserfs_list_bitmap *jb ;
219 for (i = 0 ; i < JOURNAL_NUM_BITMAPS ; i++) {
220 jb = jb_array + i ;
221 jb->journal_list = NULL ;
222 cleanup_bitmap_list(p_s_sb, jb) ;
223 vfree(jb->bitmaps) ;
224 jb->bitmaps = NULL ;
225 }
226 return 0;
227 }
228
free_bitmap_nodes(struct super_block * p_s_sb)229 static int free_bitmap_nodes(struct super_block *p_s_sb) {
230 struct list_head *next = SB_JOURNAL(p_s_sb)->j_bitmap_nodes.next ;
231 struct reiserfs_bitmap_node *bn ;
232
233 while(next != &SB_JOURNAL(p_s_sb)->j_bitmap_nodes) {
234 bn = list_entry(next, struct reiserfs_bitmap_node, list) ;
235 list_del(next) ;
236 reiserfs_kfree(bn->data, p_s_sb->s_blocksize, p_s_sb) ;
237 reiserfs_kfree(bn, sizeof(struct reiserfs_bitmap_node), p_s_sb) ;
238 next = SB_JOURNAL(p_s_sb)->j_bitmap_nodes.next ;
239 SB_JOURNAL(p_s_sb)->j_free_bitmap_nodes-- ;
240 }
241
242 return 0 ;
243 }
244
245 /*
246 ** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
247 ** jb_array is the array to be filled in.
248 */
reiserfs_allocate_list_bitmaps(struct super_block * p_s_sb,struct reiserfs_list_bitmap * jb_array,int bmap_nr)249 int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb,
250 struct reiserfs_list_bitmap *jb_array,
251 int bmap_nr) {
252 int i ;
253 int failed = 0 ;
254 struct reiserfs_list_bitmap *jb ;
255 int mem = bmap_nr * sizeof(struct reiserfs_bitmap_node *) ;
256
257 for (i = 0 ; i < JOURNAL_NUM_BITMAPS ; i++) {
258 jb = jb_array + i ;
259 jb->journal_list = NULL ;
260 jb->bitmaps = vmalloc( mem ) ;
261 if (!jb->bitmaps) {
262 reiserfs_warning(p_s_sb, "clm-2000, unable to allocate bitmaps for journal lists\n") ;
263 failed = 1;
264 break ;
265 }
266 memset(jb->bitmaps, 0, mem) ;
267 }
268 if (failed) {
269 free_list_bitmaps(p_s_sb, jb_array) ;
270 return -1 ;
271 }
272 return 0 ;
273 }
274
275 /*
276 ** find an available list bitmap. If you can't find one, flush a commit list
277 ** and try again
278 */
279 static struct reiserfs_list_bitmap *
get_list_bitmap(struct super_block * p_s_sb,struct reiserfs_journal_list * jl)280 get_list_bitmap(struct super_block *p_s_sb, struct reiserfs_journal_list *jl) {
281 int i,j ;
282 struct reiserfs_list_bitmap *jb = NULL ;
283
284 for (j = 0 ; j < (JOURNAL_NUM_BITMAPS * 3) ; j++) {
285 i = SB_JOURNAL(p_s_sb)->j_list_bitmap_index ;
286 SB_JOURNAL(p_s_sb)->j_list_bitmap_index = (i + 1) % JOURNAL_NUM_BITMAPS ;
287 jb = SB_JOURNAL(p_s_sb)->j_list_bitmap + i ;
288 if (SB_JOURNAL(p_s_sb)->j_list_bitmap[i].journal_list) {
289 flush_commit_list(p_s_sb, SB_JOURNAL(p_s_sb)->j_list_bitmap[i].journal_list, 1) ;
290 if (!SB_JOURNAL(p_s_sb)->j_list_bitmap[i].journal_list) {
291 break ;
292 }
293 } else {
294 break ;
295 }
296 }
297 if (jb->journal_list) { /* double check to make sure if flushed correctly */
298 return NULL ;
299 }
300 jb->journal_list = jl ;
301 return jb ;
302 }
303
304 /*
305 ** allocates a new chunk of X nodes, and links them all together as a list.
306 ** Uses the cnode->next and cnode->prev pointers
307 ** returns NULL on failure
308 */
allocate_cnodes(int num_cnodes)309 static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes) {
310 struct reiserfs_journal_cnode *head ;
311 int i ;
312 if (num_cnodes <= 0) {
313 return NULL ;
314 }
315 head = vmalloc(num_cnodes * sizeof(struct reiserfs_journal_cnode)) ;
316 if (!head) {
317 return NULL ;
318 }
319 memset(head, 0, num_cnodes * sizeof(struct reiserfs_journal_cnode)) ;
320 head[0].prev = NULL ;
321 head[0].next = head + 1 ;
322 for (i = 1 ; i < num_cnodes; i++) {
323 head[i].prev = head + (i - 1) ;
324 head[i].next = head + (i + 1) ; /* if last one, overwrite it after the if */
325 }
326 head[num_cnodes -1].next = NULL ;
327 return head ;
328 }
329
330 /*
331 ** pulls a cnode off the free list, or returns NULL on failure
332 */
get_cnode(struct super_block * p_s_sb)333 static struct reiserfs_journal_cnode *get_cnode(struct super_block *p_s_sb) {
334 struct reiserfs_journal_cnode *cn ;
335
336 reiserfs_check_lock_depth("get_cnode") ;
337
338 if (SB_JOURNAL(p_s_sb)->j_cnode_free <= 0) {
339 return NULL ;
340 }
341 SB_JOURNAL(p_s_sb)->j_cnode_used++ ;
342 SB_JOURNAL(p_s_sb)->j_cnode_free-- ;
343 cn = SB_JOURNAL(p_s_sb)->j_cnode_free_list ;
344 if (!cn) {
345 return cn ;
346 }
347 if (cn->next) {
348 cn->next->prev = NULL ;
349 }
350 SB_JOURNAL(p_s_sb)->j_cnode_free_list = cn->next ;
351 memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ;
352 return cn ;
353 }
354
355 /*
356 ** returns a cnode to the free list
357 */
free_cnode(struct super_block * p_s_sb,struct reiserfs_journal_cnode * cn)358 static void free_cnode(struct super_block *p_s_sb, struct reiserfs_journal_cnode *cn) {
359
360 reiserfs_check_lock_depth("free_cnode") ;
361
362 SB_JOURNAL(p_s_sb)->j_cnode_used-- ;
363 SB_JOURNAL(p_s_sb)->j_cnode_free++ ;
364 /* memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ; */
365 cn->next = SB_JOURNAL(p_s_sb)->j_cnode_free_list ;
366 if (SB_JOURNAL(p_s_sb)->j_cnode_free_list) {
367 SB_JOURNAL(p_s_sb)->j_cnode_free_list->prev = cn ;
368 }
369 cn->prev = NULL ; /* not needed with the memset, but I might kill the memset, and forget to do this */
370 SB_JOURNAL(p_s_sb)->j_cnode_free_list = cn ;
371 }
372
clear_prepared_bits(struct buffer_head * bh)373 static int clear_prepared_bits(struct buffer_head *bh) {
374 clear_bit(BH_JPrepared, &bh->b_state) ;
375 return 0 ;
376 }
377
378 /* buffer is in current transaction */
buffer_journaled(const struct buffer_head * bh)379 inline int buffer_journaled(const struct buffer_head *bh) {
380 if (bh)
381 return test_bit(BH_JDirty, &((struct buffer_head *)bh)->b_state) ;
382 else
383 return 0 ;
384 }
385
386 /* disk block was taken off free list before being in a finished transation, or written to disk
387 ** journal_new blocks can be reused immediately, for any purpose
388 */
buffer_journal_new(const struct buffer_head * bh)389 inline int buffer_journal_new(const struct buffer_head *bh) {
390 if (bh)
391 return test_bit(BH_JNew, &((struct buffer_head *)bh)->b_state) ;
392 else
393 return 0 ;
394 }
395
mark_buffer_journal_new(struct buffer_head * bh)396 inline int mark_buffer_journal_new(struct buffer_head *bh) {
397 if (bh) {
398 set_bit(BH_JNew, &bh->b_state) ;
399 }
400 return 0 ;
401 }
402
mark_buffer_not_journaled(struct buffer_head * bh)403 inline int mark_buffer_not_journaled(struct buffer_head *bh) {
404 if (bh)
405 clear_bit(BH_JDirty, &bh->b_state) ;
406 return 0 ;
407 }
408
409 /* utility function to force a BUG if it is called without the big
410 ** kernel lock held. caller is the string printed just before calling BUG()
411 */
reiserfs_check_lock_depth(char * caller)412 void reiserfs_check_lock_depth(char *caller) {
413 #ifdef CONFIG_SMP
414 if (current->lock_depth < 0) {
415 printk("%s called without kernel lock held\n", caller) ;
416 show_reiserfs_locks() ;
417 BUG() ;
418 }
419 #else
420 ;
421 #endif
422 }
423
424 /* return a cnode with same dev, block number and size in table, or null if not found */
get_journal_hash_dev(struct reiserfs_journal_cnode ** table,kdev_t dev,long bl,int size)425 static inline struct reiserfs_journal_cnode *get_journal_hash_dev(struct reiserfs_journal_cnode **table,
426 kdev_t dev,long bl,int size) {
427 struct reiserfs_journal_cnode *cn ;
428 cn = journal_hash(table, dev, bl) ;
429 while(cn) {
430 if ((cn->blocknr == bl) && (cn->dev == dev))
431 return cn ;
432 cn = cn->hnext ;
433 }
434 return (struct reiserfs_journal_cnode *)0 ;
435 }
436
437 /* returns a cnode with same size, block number and dev as bh in the current transaction hash. NULL if not found */
get_journal_hash(struct super_block * p_s_sb,struct buffer_head * bh)438 static inline struct reiserfs_journal_cnode *get_journal_hash(struct super_block *p_s_sb, struct buffer_head *bh) {
439 struct reiserfs_journal_cnode *cn ;
440 if (bh) {
441 cn = get_journal_hash_dev(SB_JOURNAL(p_s_sb)->j_hash_table, bh->b_dev, bh->b_blocknr, bh->b_size) ;
442 }
443 else {
444 return (struct reiserfs_journal_cnode *)0 ;
445 }
446 return cn ;
447 }
448
449 /* once upon a time, the journal would deadlock. a lot. Now, when
450 ** CONFIG_REISERFS_CHECK is defined, anytime someone enters a
451 ** transaction, it pushes itself into this ugly static list, and pops
452 ** itself off before calling journal_end. I made a SysRq key to dump
453 ** the list, and tell me what the writers are when I'm deadlocked. */
454
455 /* are you depending on the compiler
456 to optimize this function away
457 everywhere it is called? It is not
458 obvious how this works, but I
459 suppose debugging code need not be
460 clear. -Hans */
461 static char *journal_writers[512] ;
push_journal_writer(char * s)462 int push_journal_writer(char *s) {
463 #ifdef CONFIG_REISERFS_CHECK
464 int i ;
465 for (i = 0 ; i < 512 ; i++) {
466 if (!journal_writers[i]) {
467 journal_writers[i] = s ;
468 return i ;
469 }
470 }
471 return -1 ;
472 #else
473 return 0 ;
474 #endif
475 }
pop_journal_writer(int index)476 int pop_journal_writer(int index) {
477 #ifdef CONFIG_REISERFS_CHECK
478 if (index >= 0) {
479 journal_writers[index] = NULL ;
480 }
481 #endif
482 return 0 ;
483 }
484
dump_journal_writers(void)485 int dump_journal_writers(void) {
486 int i ;
487 for (i = 0 ; i < 512 ; i++) {
488 if (journal_writers[i]) {
489 printk("%d: %s\n", i, journal_writers[i]) ;
490 }
491 }
492 return 0 ;
493 }
494
495 /*
496 ** this actually means 'can this block be reallocated yet?'. If you set search_all, a block can only be allocated
497 ** if it is not in the current transaction, was not freed by the current transaction, and has no chance of ever
498 ** being overwritten by a replay after crashing.
499 **
500 ** If you don't set search_all, a block can only be allocated if it is not in the current transaction. Since deleting
501 ** a block removes it from the current transaction, this case should never happen. If you don't set search_all, make
502 ** sure you never write the block without logging it.
503 **
504 ** next_zero_bit is a suggestion about the next block to try for find_forward.
505 ** when bl is rejected because it is set in a journal list bitmap, we search
506 ** for the next zero bit in the bitmap that rejected bl. Then, we return that
507 ** through next_zero_bit for find_forward to try.
508 **
509 ** Just because we return something in next_zero_bit does not mean we won't
510 ** reject it on the next call to reiserfs_in_journal
511 **
512 */
reiserfs_in_journal(struct super_block * p_s_sb,kdev_t dev,int bmap_nr,int bit_nr,int size,int search_all,unsigned int * next_zero_bit)513 int reiserfs_in_journal(struct super_block *p_s_sb, kdev_t dev,
514 int bmap_nr, int bit_nr, int size, int search_all,
515 unsigned int *next_zero_bit) {
516 struct reiserfs_journal_cnode *cn ;
517 struct reiserfs_list_bitmap *jb ;
518 int i ;
519 unsigned long bl;
520
521 *next_zero_bit = 0 ; /* always start this at zero. */
522
523 /* we aren't logging all blocks are safe for reuse */
524 if (reiserfs_dont_log(p_s_sb)) {
525 return 0 ;
526 }
527
528 PROC_INFO_INC( p_s_sb, journal.in_journal );
529 /* If we aren't doing a search_all, this is a metablock, and it will be logged before use.
530 ** if we crash before the transaction that freed it commits, this transaction won't
531 ** have committed either, and the block will never be written
532 */
533 if (search_all) {
534 for (i = 0 ; i < JOURNAL_NUM_BITMAPS ; i++) {
535 PROC_INFO_INC( p_s_sb, journal.in_journal_bitmap );
536 jb = SB_JOURNAL(p_s_sb)->j_list_bitmap + i ;
537 if (jb->journal_list && jb->bitmaps[bmap_nr] &&
538 test_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data)) {
539 *next_zero_bit = find_next_zero_bit((unsigned long *)
540 (jb->bitmaps[bmap_nr]->data),
541 p_s_sb->s_blocksize << 3, bit_nr+1) ;
542 return 1 ;
543 }
544 }
545 }
546
547 bl = bmap_nr * (p_s_sb->s_blocksize << 3) + bit_nr;
548 /* is it in any old transactions? */
549 if (search_all && (cn = get_journal_hash_dev(SB_JOURNAL(p_s_sb)->j_list_hash_table, dev,bl,size))) {
550 return 1;
551 }
552
553 /* is it in the current transaction. This should never happen */
554 if ((cn = get_journal_hash_dev(SB_JOURNAL(p_s_sb)->j_hash_table, dev,bl,size))) {
555 return 1;
556 }
557
558 PROC_INFO_INC( p_s_sb, journal.in_journal_reusable );
559 /* safe for reuse */
560 return 0 ;
561 }
562
563 /* insert cn into table
564 */
insert_journal_hash(struct reiserfs_journal_cnode ** table,struct reiserfs_journal_cnode * cn)565 inline void insert_journal_hash(struct reiserfs_journal_cnode **table, struct reiserfs_journal_cnode *cn) {
566 struct reiserfs_journal_cnode *cn_orig ;
567
568 cn_orig = journal_hash(table, cn->dev, cn->blocknr) ;
569 cn->hnext = cn_orig ;
570 cn->hprev = NULL ;
571 if (cn_orig) {
572 cn_orig->hprev = cn ;
573 }
574 journal_hash(table, cn->dev, cn->blocknr) = cn ;
575 }
576
577 /* lock the current transaction */
lock_journal(struct super_block * p_s_sb)578 inline static void lock_journal(struct super_block *p_s_sb) {
579 PROC_INFO_INC( p_s_sb, journal.lock_journal );
580 while(atomic_read(&(SB_JOURNAL(p_s_sb)->j_wlock)) > 0) {
581 PROC_INFO_INC( p_s_sb, journal.lock_journal_wait );
582 sleep_on(&(SB_JOURNAL(p_s_sb)->j_wait)) ;
583 }
584 atomic_set(&(SB_JOURNAL(p_s_sb)->j_wlock), 1) ;
585 }
586
587 /* unlock the current transaction */
unlock_journal(struct super_block * p_s_sb)588 inline static void unlock_journal(struct super_block *p_s_sb) {
589 atomic_dec(&(SB_JOURNAL(p_s_sb)->j_wlock)) ;
590 wake_up(&(SB_JOURNAL(p_s_sb)->j_wait)) ;
591 }
592
593 /*
594 ** this used to be much more involved, and I'm keeping it just in case things get ugly again.
595 ** it gets called by flush_commit_list, and cleans up any data stored about blocks freed during a
596 ** transaction.
597 */
cleanup_freed_for_journal_list(struct super_block * p_s_sb,struct reiserfs_journal_list * jl)598 static void cleanup_freed_for_journal_list(struct super_block *p_s_sb, struct reiserfs_journal_list *jl) {
599
600 struct reiserfs_list_bitmap *jb = jl->j_list_bitmap ;
601 if (jb) {
602 cleanup_bitmap_list(p_s_sb, jb) ;
603 }
604 jl->j_list_bitmap->journal_list = NULL ;
605 jl->j_list_bitmap = NULL ;
606 }
607
608 /*
609 ** if this journal list still has commit blocks unflushed, send them to disk.
610 **
611 ** log areas must be flushed in order (transaction 2 can't commit before transaction 1)
612 ** Before the commit block can by written, every other log block must be safely on disk
613 **
614 */
flush_commit_list(struct super_block * s,struct reiserfs_journal_list * jl,int flushall)615 static int flush_commit_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) {
616 int i, count ;
617 int index = 0 ;
618 int bn ;
619 int retry_count = 0 ;
620 int orig_commit_left = 0 ;
621 struct buffer_head *tbh = NULL ;
622 struct reiserfs_journal_list *other_jl ;
623
624 reiserfs_check_lock_depth("flush_commit_list") ;
625
626 if (atomic_read(&jl->j_older_commits_done)) {
627 return 0 ;
628 }
629
630 /* before we can put our commit blocks on disk, we have to make sure everyone older than
631 ** us is on disk too
632 */
633 if (jl->j_len <= 0) {
634 return 0 ;
635 }
636 if (flushall) {
637 /* we _must_ make sure the transactions are committed in order. Start with the
638 ** index after this one, wrap all the way around
639 */
640 index = (jl - SB_JOURNAL_LIST(s)) + 1 ;
641 for (i = 0 ; i < JOURNAL_LIST_COUNT ; i++) {
642 other_jl = SB_JOURNAL_LIST(s) + ( (index + i) % JOURNAL_LIST_COUNT) ;
643 if (other_jl && other_jl != jl && other_jl->j_len > 0 && other_jl->j_trans_id > 0 &&
644 other_jl->j_trans_id <= jl->j_trans_id && (atomic_read(&(jl->j_older_commits_done)) == 0)) {
645 flush_commit_list(s, other_jl, 0) ;
646 }
647 }
648 }
649
650 count = 0 ;
651 /* don't flush the commit list for the current transactoin */
652 if (jl == ((SB_JOURNAL_LIST(s) + SB_JOURNAL_LIST_INDEX(s)))) {
653 return 0 ;
654 }
655
656 /* make sure nobody is trying to flush this one at the same time */
657 if (atomic_read(&(jl->j_commit_flushing))) {
658 sleep_on(&(jl->j_commit_wait)) ;
659 if (flushall) {
660 atomic_set(&(jl->j_older_commits_done), 1) ;
661 }
662 return 0 ;
663 }
664
665 /* this commit is done, exit */
666 if (atomic_read(&(jl->j_commit_left)) <= 0) {
667 if (flushall) {
668 atomic_set(&(jl->j_older_commits_done), 1) ;
669 }
670 return 0 ;
671 }
672 /* keeps others from flushing while we are flushing */
673 atomic_set(&(jl->j_commit_flushing), 1) ;
674
675
676 if (jl->j_len > SB_JOURNAL_TRANS_MAX(s)) {
677 reiserfs_panic(s, "journal-512: flush_commit_list: length is %lu, list number %d\n", jl->j_len, jl - SB_JOURNAL_LIST(s)) ;
678 return 0 ;
679 }
680
681 orig_commit_left = atomic_read(&(jl->j_commit_left)) ;
682
683 /* start by checking all the commit blocks in this transaction.
684 ** Add anyone not on disk into tbh. Stop checking once commit_left <= 1, because that means we
685 ** only have the commit block left
686 */
687 retry:
688 count = 0 ;
689 for (i = 0 ; atomic_read(&(jl->j_commit_left)) > 1 && i < (jl->j_len + 1) ; i++) { /* everything but commit_bh */
690 bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start+i) % SB_ONDISK_JOURNAL_SIZE(s);
691 tbh = journal_get_hash_table(s, bn) ;
692
693 /* kill this sanity check */
694 if (count > (orig_commit_left + 2)) {
695 reiserfs_panic(s, "journal-539: flush_commit_list: BAD count(%d) > orig_commit_left(%d)!\n", count, orig_commit_left) ;
696 }
697 if (tbh) {
698 if (buffer_locked(tbh)) { /* wait on it, redo it just to make sure */
699 wait_on_buffer(tbh) ;
700 if (!buffer_uptodate(tbh)) {
701 reiserfs_panic(s, "journal-584, buffer write failed\n") ;
702 }
703 }
704 if (buffer_dirty(tbh)) {
705 reiserfs_warning(s, "journal-569: flush_commit_list, block already dirty!\n") ;
706 } else {
707 mark_buffer_dirty(tbh) ;
708 }
709 ll_rw_block(WRITE, 1, &tbh) ;
710 count++ ;
711 put_bh(tbh) ; /* once for our get_hash */
712 }
713 }
714
715 /* wait on everyone in tbh before writing commit block*/
716 if (count > 0) {
717 for (i = 0 ; atomic_read(&(jl->j_commit_left)) > 1 &&
718 i < (jl->j_len + 1) ; i++) { /* everything but commit_bh */
719 bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s) ;
720 tbh = journal_get_hash_table(s, bn) ;
721
722 wait_on_buffer(tbh) ;
723 if (!buffer_uptodate(tbh)) {
724 reiserfs_panic(s, "journal-601, buffer write failed\n") ;
725 }
726 put_bh(tbh) ; /* once for our get_hash */
727 bforget(tbh) ; /* once due to original getblk in do_journal_end */
728 atomic_dec(&(jl->j_commit_left)) ;
729 }
730 }
731
732 if (atomic_read(&(jl->j_commit_left)) != 1) { /* just the commit_bh left, flush it without calling getblk for everyone */
733 if (retry_count < 2) {
734 reiserfs_warning(s, "journal-582: flush_commit_list, not all log blocks on disk yet, trying again\n") ;
735 retry_count++ ;
736 goto retry;
737 }
738 reiserfs_panic(s, "journal-563: flush_commit_list: BAD, j_commit_left is %u, should be 1\n",
739 atomic_read(&(jl->j_commit_left)));
740 }
741
742 mark_buffer_dirty(jl->j_commit_bh) ;
743 ll_rw_block(WRITE, 1, &(jl->j_commit_bh)) ;
744 wait_on_buffer(jl->j_commit_bh) ;
745 if (!buffer_uptodate(jl->j_commit_bh)) {
746 reiserfs_panic(s, "journal-615: buffer write failed\n") ;
747 }
748 atomic_dec(&(jl->j_commit_left)) ;
749 bforget(jl->j_commit_bh) ;
750
751 /* now, every commit block is on the disk. It is safe to allow blocks freed during this transaction to be reallocated */
752 cleanup_freed_for_journal_list(s, jl) ;
753
754 if (flushall) {
755 atomic_set(&(jl->j_older_commits_done), 1) ;
756 }
757 atomic_set(&(jl->j_commit_flushing), 0) ;
758 wake_up(&(jl->j_commit_wait)) ;
759
760 s->s_dirt = 1 ;
761 return 0 ;
762 }
763
764 /*
765 ** flush_journal_list frequently needs to find a newer transaction for a given block. This does that, or
766 ** returns NULL if it can't find anything
767 */
find_newer_jl_for_cn(struct reiserfs_journal_cnode * cn)768 static struct reiserfs_journal_list *find_newer_jl_for_cn(struct reiserfs_journal_cnode *cn) {
769 kdev_t dev = cn->dev;
770 unsigned long blocknr = cn->blocknr ;
771
772 cn = cn->hprev ;
773 while(cn) {
774 if (cn->dev == dev && cn->blocknr == blocknr && cn->jlist) {
775 return cn->jlist ;
776 }
777 cn = cn->hprev ;
778 }
779 return NULL ;
780 }
781
782
783 /*
784 ** once all the real blocks have been flushed, it is safe to remove them from the
785 ** journal list for this transaction. Aside from freeing the cnode, this also allows the
786 ** block to be reallocated for data blocks if it had been deleted.
787 */
remove_all_from_journal_list(struct super_block * p_s_sb,struct reiserfs_journal_list * jl,int debug)788 static void remove_all_from_journal_list(struct super_block *p_s_sb, struct reiserfs_journal_list *jl, int debug) {
789 struct buffer_head fake_bh ;
790 struct reiserfs_journal_cnode *cn, *last ;
791 cn = jl->j_realblock ;
792
793 /* which is better, to lock once around the whole loop, or
794 ** to lock for each call to remove_from_journal_list?
795 */
796 while(cn) {
797 if (cn->blocknr != 0) {
798 if (debug) {
799 reiserfs_warning(p_s_sb, "block %lu, bh is %d, state %ld\n", cn->blocknr, cn->bh ? 1: 0,
800 cn->state) ;
801 }
802 fake_bh.b_blocknr = cn->blocknr ;
803 fake_bh.b_dev = cn->dev ;
804 cn->state = 0 ;
805 remove_from_journal_list(p_s_sb, jl, &fake_bh, 1) ;
806 }
807 last = cn ;
808 cn = cn->next ;
809 free_cnode(p_s_sb, last) ;
810 }
811 jl->j_realblock = NULL ;
812 }
813
814 /*
815 ** if this timestamp is greater than the timestamp we wrote last to the header block, write it to the header block.
816 ** once this is done, I can safely say the log area for this transaction won't ever be replayed, and I can start
817 ** releasing blocks in this transaction for reuse as data blocks.
818 ** called by flush_journal_list, before it calls remove_all_from_journal_list
819 **
820 */
_update_journal_header_block(struct super_block * p_s_sb,unsigned long offset,unsigned long trans_id)821 static int _update_journal_header_block(struct super_block *p_s_sb, unsigned long offset, unsigned long trans_id) {
822 struct reiserfs_journal_header *jh ;
823 if (trans_id >= SB_JOURNAL(p_s_sb)->j_last_flush_trans_id) {
824 if (buffer_locked((SB_JOURNAL(p_s_sb)->j_header_bh))) {
825 wait_on_buffer((SB_JOURNAL(p_s_sb)->j_header_bh)) ;
826 if (!buffer_uptodate(SB_JOURNAL(p_s_sb)->j_header_bh)) {
827 reiserfs_panic(p_s_sb, "journal-699: buffer write failed\n") ;
828 }
829 }
830 SB_JOURNAL(p_s_sb)->j_last_flush_trans_id = trans_id ;
831 SB_JOURNAL(p_s_sb)->j_first_unflushed_offset = offset ;
832 jh = (struct reiserfs_journal_header *)(SB_JOURNAL(p_s_sb)->j_header_bh->b_data) ;
833 jh->j_last_flush_trans_id = cpu_to_le32(trans_id) ;
834 jh->j_first_unflushed_offset = cpu_to_le32(offset) ;
835 jh->j_mount_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_mount_id) ;
836 set_bit(BH_Dirty, &(SB_JOURNAL(p_s_sb)->j_header_bh->b_state)) ;
837 ll_rw_block(WRITE, 1, &(SB_JOURNAL(p_s_sb)->j_header_bh)) ;
838 wait_on_buffer((SB_JOURNAL(p_s_sb)->j_header_bh)) ;
839 if (!buffer_uptodate(SB_JOURNAL(p_s_sb)->j_header_bh)) {
840 reiserfs_warning( p_s_sb, "reiserfs: journal-837: IO error during journal replay\n" );
841 return -EIO ;
842 }
843 }
844 return 0 ;
845 }
846
update_journal_header_block(struct super_block * p_s_sb,unsigned long offset,unsigned long trans_id)847 static int update_journal_header_block(struct super_block *p_s_sb,
848 unsigned long offset,
849 unsigned long trans_id) {
850 if (_update_journal_header_block(p_s_sb, offset, trans_id)) {
851 reiserfs_panic(p_s_sb, "journal-712: buffer write failed\n") ;
852 }
853 return 0 ;
854 }
855 /*
856 ** flush any and all journal lists older than you are
857 ** can only be called from flush_journal_list
858 */
flush_older_journal_lists(struct super_block * p_s_sb,struct reiserfs_journal_list * jl,unsigned long trans_id)859 static int flush_older_journal_lists(struct super_block *p_s_sb, struct reiserfs_journal_list *jl, unsigned long trans_id) {
860 int i, index ;
861 struct reiserfs_journal_list *other_jl ;
862
863 index = jl - SB_JOURNAL_LIST(p_s_sb) ;
864 for (i = 0 ; i < JOURNAL_LIST_COUNT ; i++) {
865 other_jl = SB_JOURNAL_LIST(p_s_sb) + ((index + i) % JOURNAL_LIST_COUNT) ;
866 if (other_jl && other_jl->j_len > 0 &&
867 other_jl->j_trans_id > 0 &&
868 other_jl->j_trans_id < trans_id &&
869 other_jl != jl) {
870 /* do not flush all */
871 flush_journal_list(p_s_sb, other_jl, 0) ;
872 }
873 }
874 return 0 ;
875 }
876
reiserfs_end_buffer_io_sync(struct buffer_head * bh,int uptodate)877 static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate) {
878 if (buffer_journaled(bh)) {
879 reiserfs_warning(NULL, "clm-2084: pinned buffer %lu:%s sent to disk\n",
880 bh->b_blocknr, kdevname(bh->b_dev)) ;
881 }
882 mark_buffer_uptodate(bh, uptodate) ;
883 unlock_buffer(bh) ;
884 put_bh(bh) ;
885 }
submit_logged_buffer(struct buffer_head * bh)886 static void submit_logged_buffer(struct buffer_head *bh) {
887 lock_buffer(bh) ;
888 get_bh(bh) ;
889 bh->b_end_io = reiserfs_end_buffer_io_sync ;
890 mark_buffer_notjournal_new(bh) ;
891 clear_bit(BH_Dirty, &bh->b_state) ;
892 submit_bh(WRITE, bh) ;
893 }
894
895 /* flush a journal list, both commit and real blocks
896 **
897 ** always set flushall to 1, unless you are calling from inside
898 ** flush_journal_list
899 **
900 ** IMPORTANT. This can only be called while there are no journal writers,
901 ** and the journal is locked. That means it can only be called from
902 ** do_journal_end, or by journal_release
903 */
flush_journal_list(struct super_block * s,struct reiserfs_journal_list * jl,int flushall)904 static int flush_journal_list(struct super_block *s,
905 struct reiserfs_journal_list *jl, int flushall) {
906 struct reiserfs_journal_list *pjl ;
907 struct reiserfs_journal_cnode *cn, *last ;
908 int count ;
909 int was_jwait = 0 ;
910 int was_dirty = 0 ;
911 struct buffer_head *saved_bh ;
912 unsigned long j_len_saved = jl->j_len ;
913
914 if (j_len_saved <= 0) {
915 return 0 ;
916 }
917
918 if (atomic_read(&SB_JOURNAL(s)->j_wcount) != 0) {
919 reiserfs_warning(s, "clm-2048: flush_journal_list called with wcount %d\n",
920 atomic_read(&SB_JOURNAL(s)->j_wcount)) ;
921 }
922 /* if someone is getting the commit list, we must wait for them */
923 while (atomic_read(&(jl->j_commit_flushing))) {
924 sleep_on(&(jl->j_commit_wait)) ;
925 }
926 /* if someone is flushing this list, we must wait for them */
927 while (atomic_read(&(jl->j_flushing))) {
928 sleep_on(&(jl->j_flush_wait)) ;
929 }
930
931 /* this list is now ours, we can change anything we want */
932 atomic_set(&(jl->j_flushing), 1) ;
933
934 count = 0 ;
935 if (j_len_saved > SB_JOURNAL_TRANS_MAX(s)) {
936 reiserfs_panic(s, "journal-715: flush_journal_list, length is %lu, list number %d\n", j_len_saved, jl - SB_JOURNAL_LIST(s)) ;
937 atomic_dec(&(jl->j_flushing)) ;
938 return 0 ;
939 }
940
941 /* if all the work is already done, get out of here */
942 if (atomic_read(&(jl->j_nonzerolen)) <= 0 &&
943 atomic_read(&(jl->j_commit_left)) <= 0) {
944 goto flush_older_and_return ;
945 }
946
947 /* start by putting the commit list on disk. This will also flush
948 ** the commit lists of any olders transactions
949 */
950 flush_commit_list(s, jl, 1) ;
951
952 /* are we done now? */
953 if (atomic_read(&(jl->j_nonzerolen)) <= 0 &&
954 atomic_read(&(jl->j_commit_left)) <= 0) {
955 goto flush_older_and_return ;
956 }
957
958 /* loop through each cnode, see if we need to write it,
959 ** or wait on a more recent transaction, or just ignore it
960 */
961 if (atomic_read(&(SB_JOURNAL(s)->j_wcount)) != 0) {
962 reiserfs_panic(s, "journal-844: panic journal list is flushing, wcount is not 0\n") ;
963 }
964 cn = jl->j_realblock ;
965 while(cn) {
966 was_jwait = 0 ;
967 was_dirty = 0 ;
968 saved_bh = NULL ;
969 /* blocknr of 0 is no longer in the hash, ignore it */
970 if (cn->blocknr == 0) {
971 goto free_cnode ;
972 }
973 pjl = find_newer_jl_for_cn(cn) ;
974 /* the order is important here. We check pjl to make sure we
975 ** don't clear BH_JDirty_wait if we aren't the one writing this
976 ** block to disk
977 */
978 if (!pjl && cn->bh) {
979 saved_bh = cn->bh ;
980
981 /* we do this to make sure nobody releases the buffer while
982 ** we are working with it
983 */
984 get_bh(saved_bh) ;
985
986 if (buffer_journal_dirty(saved_bh)) {
987 was_jwait = 1 ;
988 mark_buffer_notjournal_dirty(saved_bh) ;
989 /* undo the inc from journal_mark_dirty */
990 put_bh(saved_bh) ;
991 }
992 if (can_dirty(cn)) {
993 was_dirty = 1 ;
994 }
995 }
996
997 /* if someone has this block in a newer transaction, just make
998 ** sure they are commited, and don't try writing it to disk
999 */
1000 if (pjl) {
1001 flush_commit_list(s, pjl, 1) ;
1002 goto free_cnode ;
1003 }
1004
1005 /* bh == NULL when the block got to disk on its own, OR,
1006 ** the block got freed in a future transaction
1007 */
1008 if (saved_bh == NULL) {
1009 goto free_cnode ;
1010 }
1011
1012 /* this should never happen. kupdate_one_transaction has this list
1013 ** locked while it works, so we should never see a buffer here that
1014 ** is not marked JDirty_wait
1015 */
1016 if ((!was_jwait) && !buffer_locked(saved_bh)) {
1017 reiserfs_warning(s, "journal-813: BAD! buffer %lu %cdirty %cjwait, not in a newer tranasction\n", saved_bh->b_blocknr,
1018 was_dirty ? ' ' : '!', was_jwait ? ' ' : '!') ;
1019 }
1020 /* kupdate_one_transaction waits on the buffers it is writing, so we
1021 ** should never see locked buffers here
1022 */
1023 if (buffer_locked(saved_bh)) {
1024 reiserfs_warning(s, "clm-2083: locked buffer %lu in flush_journal_list\n",
1025 saved_bh->b_blocknr) ;
1026 wait_on_buffer(saved_bh) ;
1027 if (!buffer_uptodate(saved_bh)) {
1028 reiserfs_panic(s, "journal-923: buffer write failed\n") ;
1029 }
1030 }
1031 if (was_dirty) {
1032 /* we inc again because saved_bh gets decremented at free_cnode */
1033 get_bh(saved_bh) ;
1034 set_bit(BLOCK_NEEDS_FLUSH, &cn->state) ;
1035 submit_logged_buffer(saved_bh) ;
1036 count++ ;
1037 } else {
1038 reiserfs_warning(s, "clm-2082: Unable to flush buffer %lu in flush_journal_list\n",
1039 saved_bh->b_blocknr) ;
1040 }
1041 free_cnode:
1042 last = cn ;
1043 cn = cn->next ;
1044 if (saved_bh) {
1045 /* we incremented this to keep others from taking the buffer head away */
1046 put_bh(saved_bh) ;
1047 if (atomic_read(&(saved_bh->b_count)) < 0) {
1048 reiserfs_warning(s, "journal-945: saved_bh->b_count < 0\n") ;
1049 }
1050 }
1051 }
1052 if (count > 0) {
1053 cn = jl->j_realblock ;
1054 while(cn) {
1055 if (test_bit(BLOCK_NEEDS_FLUSH, &cn->state)) {
1056 if (!cn->bh) {
1057 reiserfs_panic(s, "journal-1011: cn->bh is NULL\n") ;
1058 }
1059 wait_on_buffer(cn->bh) ;
1060 if (!cn->bh) {
1061 reiserfs_panic(s, "journal-1012: cn->bh is NULL\n") ;
1062 }
1063 if (!buffer_uptodate(cn->bh)) {
1064 reiserfs_panic(s, "journal-949: buffer write failed\n") ;
1065 }
1066 refile_buffer(cn->bh) ;
1067 brelse(cn->bh) ;
1068 }
1069 cn = cn->next ;
1070 }
1071 }
1072
1073 flush_older_and_return:
1074 /* before we can update the journal header block, we _must_ flush all
1075 ** real blocks from all older transactions to disk. This is because
1076 ** once the header block is updated, this transaction will not be
1077 ** replayed after a crash
1078 */
1079 if (flushall) {
1080 flush_older_journal_lists(s, jl, jl->j_trans_id) ;
1081 }
1082
1083 /* before we can remove everything from the hash tables for this
1084 ** transaction, we must make sure it can never be replayed
1085 **
1086 ** since we are only called from do_journal_end, we know for sure there
1087 ** are no allocations going on while we are flushing journal lists. So,
1088 ** we only need to update the journal header block for the last list
1089 ** being flushed
1090 */
1091 if (flushall) {
1092 update_journal_header_block(s, (jl->j_start + jl->j_len + 2) % SB_ONDISK_JOURNAL_SIZE(s), jl->j_trans_id) ;
1093 }
1094 remove_all_from_journal_list(s, jl, 0) ;
1095 jl->j_len = 0 ;
1096 atomic_set(&(jl->j_nonzerolen), 0) ;
1097 jl->j_start = 0 ;
1098 jl->j_realblock = NULL ;
1099 jl->j_commit_bh = NULL ;
1100 jl->j_trans_id = 0 ;
1101 atomic_dec(&(jl->j_flushing)) ;
1102 wake_up(&(jl->j_flush_wait)) ;
1103 return 0 ;
1104 }
1105
1106
kupdate_one_transaction(struct super_block * s,struct reiserfs_journal_list * jl)1107 static int kupdate_one_transaction(struct super_block *s,
1108 struct reiserfs_journal_list *jl)
1109 {
1110 struct reiserfs_journal_list *pjl ; /* previous list for this cn */
1111 struct reiserfs_journal_cnode *cn, *walk_cn ;
1112 unsigned long blocknr ;
1113 int run = 0 ;
1114 int orig_trans_id = jl->j_trans_id ;
1115 struct buffer_head *saved_bh ;
1116 int ret = 0 ;
1117
1118 /* if someone is getting the commit list, we must wait for them */
1119 while (atomic_read(&(jl->j_commit_flushing))) {
1120 sleep_on(&(jl->j_commit_wait)) ;
1121 }
1122 /* if someone is flushing this list, we must wait for them */
1123 while (atomic_read(&(jl->j_flushing))) {
1124 sleep_on(&(jl->j_flush_wait)) ;
1125 }
1126 /* was it flushed while we slept? */
1127 if (jl->j_len <= 0 || jl->j_trans_id != orig_trans_id) {
1128 return 0 ;
1129 }
1130
1131 /* this list is now ours, we can change anything we want */
1132 atomic_set(&(jl->j_flushing), 1) ;
1133
1134 loop_start:
1135 cn = jl->j_realblock ;
1136 while(cn) {
1137 saved_bh = NULL ;
1138 /* if the blocknr == 0, this has been cleared from the hash,
1139 ** skip it
1140 */
1141 if (cn->blocknr == 0) {
1142 goto next ;
1143 }
1144 /* look for a more recent transaction that logged this
1145 ** buffer. Only the most recent transaction with a buffer in
1146 ** it is allowed to send that buffer to disk
1147 */
1148 pjl = find_newer_jl_for_cn(cn) ;
1149 if (run == 0 && !pjl && cn->bh && buffer_journal_dirty(cn->bh) &&
1150 can_dirty(cn))
1151 {
1152 if (!test_bit(BH_JPrepared, &cn->bh->b_state)) {
1153 set_bit(BLOCK_NEEDS_FLUSH, &cn->state) ;
1154 submit_logged_buffer(cn->bh) ;
1155 } else {
1156 /* someone else is using this buffer. We can't
1157 ** send it to disk right now because they might
1158 ** be changing/logging it.
1159 */
1160 ret = 1 ;
1161 }
1162 } else if (test_bit(BLOCK_NEEDS_FLUSH, &cn->state)) {
1163 clear_bit(BLOCK_NEEDS_FLUSH, &cn->state) ;
1164 if (!pjl && cn->bh) {
1165 wait_on_buffer(cn->bh) ;
1166 }
1167 /* check again, someone could have logged while we scheduled */
1168 pjl = find_newer_jl_for_cn(cn) ;
1169
1170 /* before the JDirty_wait bit is set, the
1171 ** buffer is added to the hash list. So, if we are
1172 ** run in the middle of a do_journal_end, we will notice
1173 ** if this buffer was logged and added from the latest
1174 ** transaction. In this case, we don't want to decrement
1175 ** b_count
1176 */
1177 if (!pjl && cn->bh && buffer_journal_dirty(cn->bh)) {
1178 blocknr = cn->blocknr ;
1179 walk_cn = cn ;
1180 saved_bh= cn->bh ;
1181 /* update all older transactions to show this block
1182 ** was flushed
1183 */
1184 mark_buffer_notjournal_dirty(cn->bh) ;
1185 while(walk_cn) {
1186 if (walk_cn->bh && walk_cn->blocknr == blocknr &&
1187 walk_cn->dev == cn->dev) {
1188 if (walk_cn->jlist) {
1189 atomic_dec(&(walk_cn->jlist->j_nonzerolen)) ;
1190 }
1191 walk_cn->bh = NULL ;
1192 }
1193 walk_cn = walk_cn->hnext ;
1194 }
1195 if (atomic_read(&saved_bh->b_count) < 1) {
1196 reiserfs_warning(s, "clm-2081: bad count on %lu\n",
1197 saved_bh->b_blocknr) ;
1198 }
1199 brelse(saved_bh) ;
1200 }
1201 }
1202 /*
1203 ** if the more recent transaction is committed to the log,
1204 ** this buffer can be considered flushed. Decrement our
1205 ** counters to reflect one less buffer that needs writing.
1206 **
1207 ** note, this relies on all of the above code being
1208 ** schedule free once pjl comes back non-null.
1209 */
1210 if (pjl && cn->bh && atomic_read(&pjl->j_commit_left) == 0) {
1211 atomic_dec(&cn->jlist->j_nonzerolen) ;
1212 cn->bh = NULL ;
1213 }
1214 next:
1215 cn = cn->next ;
1216 }
1217 /* the first run through the loop sends all the dirty buffers to
1218 ** ll_rw_block.
1219 ** the second run through the loop does all the accounting
1220 */
1221 if (run++ == 0) {
1222 goto loop_start ;
1223 }
1224
1225 atomic_set(&(jl->j_flushing), 0) ;
1226 wake_up(&(jl->j_flush_wait)) ;
1227 return ret ;
1228 }
1229 /* since we never give dirty buffers to bdflush/kupdate, we have to
1230 ** flush them ourselves. This runs through the journal lists, finds
1231 ** old metadata in need of flushing and sends it to disk.
1232 ** this does not end transactions, commit anything, or free
1233 ** cnodes.
1234 **
1235 ** returns the highest transaction id that was flushed last time
1236 */
reiserfs_journal_kupdate(struct super_block * s)1237 static unsigned long reiserfs_journal_kupdate(struct super_block *s) {
1238 struct reiserfs_journal_list *jl ;
1239 int i ;
1240 int start ;
1241 time_t age ;
1242 int ret = 0 ;
1243
1244 start = SB_JOURNAL_LIST_INDEX(s) ;
1245
1246 /* safety check to prevent flush attempts during a mount */
1247 if (start < 0) {
1248 return 0 ;
1249 }
1250 i = (start + 1) % JOURNAL_LIST_COUNT ;
1251 while(i != start) {
1252 jl = SB_JOURNAL_LIST(s) + i ;
1253 age = CURRENT_TIME - jl->j_timestamp ;
1254 if (jl->j_len > 0 && // age >= (JOURNAL_MAX_COMMIT_AGE * 2) &&
1255 atomic_read(&(jl->j_nonzerolen)) > 0 &&
1256 atomic_read(&(jl->j_commit_left)) == 0) {
1257
1258 if (jl->j_trans_id == SB_JOURNAL(s)->j_trans_id) {
1259 break ;
1260 }
1261 /* if ret was already 1, we want to preserve that */
1262 ret |= kupdate_one_transaction(s, jl) ;
1263 }
1264 if (atomic_read(&(jl->j_nonzerolen)) > 0) {
1265 ret |= 1 ;
1266 }
1267 i = (i + 1) % JOURNAL_LIST_COUNT ;
1268 }
1269 return ret ;
1270 }
1271
1272 /*
1273 ** removes any nodes in table with name block and dev as bh.
1274 ** only touchs the hnext and hprev pointers.
1275 */
remove_journal_hash(struct reiserfs_journal_cnode ** table,struct reiserfs_journal_list * jl,struct buffer_head * bh,int remove_freed)1276 void remove_journal_hash(struct reiserfs_journal_cnode **table, struct reiserfs_journal_list *jl,struct buffer_head *bh,
1277 int remove_freed){
1278 struct reiserfs_journal_cnode *cur ;
1279 struct reiserfs_journal_cnode **head ;
1280
1281 if (!bh)
1282 return ;
1283
1284 head= &(journal_hash(table, bh->b_dev, bh->b_blocknr)) ;
1285 if (!head) {
1286 return ;
1287 }
1288 cur = *head ;
1289 while(cur) {
1290 if (cur->blocknr == bh->b_blocknr && cur->dev == bh->b_dev && (jl == NULL || jl == cur->jlist) &&
1291 (!test_bit(BLOCK_FREED, &cur->state) || remove_freed)) {
1292 if (cur->hnext) {
1293 cur->hnext->hprev = cur->hprev ;
1294 }
1295 if (cur->hprev) {
1296 cur->hprev->hnext = cur->hnext ;
1297 } else {
1298 *head = cur->hnext ;
1299 }
1300 cur->blocknr = 0 ;
1301 cur->dev = 0 ;
1302 cur->state = 0 ;
1303 if (cur->bh && cur->jlist) /* anybody who clears the cur->bh will also dec the nonzerolen */
1304 atomic_dec(&(cur->jlist->j_nonzerolen)) ;
1305 cur->bh = NULL ;
1306 cur->jlist = NULL ;
1307 }
1308 cur = cur->hnext ;
1309 }
1310 }
1311
free_journal_ram(struct super_block * p_s_sb)1312 static void free_journal_ram(struct super_block *p_s_sb) {
1313 vfree(SB_JOURNAL(p_s_sb)->j_cnode_free_orig) ;
1314 free_list_bitmaps(p_s_sb, SB_JOURNAL(p_s_sb)->j_list_bitmap) ;
1315 free_bitmap_nodes(p_s_sb) ; /* must be after free_list_bitmaps */
1316 if (SB_JOURNAL(p_s_sb)->j_header_bh) {
1317 brelse(SB_JOURNAL(p_s_sb)->j_header_bh) ;
1318 }
1319 /* j_header_bh is on the journal dev, make sure not to release the journal
1320 * dev until we brelse j_header_bh
1321 */
1322 release_journal_dev(p_s_sb, SB_JOURNAL(p_s_sb));
1323 vfree(SB_JOURNAL(p_s_sb)) ;
1324 }
1325
1326 /*
1327 ** call on unmount. Only set error to 1 if you haven't made your way out
1328 ** of read_super() yet. Any other caller must keep error at 0.
1329 */
do_journal_release(struct reiserfs_transaction_handle * th,struct super_block * p_s_sb,int error)1330 static int do_journal_release(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, int error) {
1331 struct reiserfs_transaction_handle myth ;
1332
1333 /* we only want to flush out transactions if we were called with error == 0
1334 */
1335 if (!error && !(p_s_sb->s_flags & MS_RDONLY)) {
1336 /* end the current trans */
1337 do_journal_end(th, p_s_sb,10, FLUSH_ALL) ;
1338
1339 /* make sure something gets logged to force our way into the flush code */
1340 journal_join(&myth, p_s_sb, 1) ;
1341 reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
1342 journal_mark_dirty(&myth, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
1343 do_journal_end(&myth, p_s_sb,1, FLUSH_ALL) ;
1344 }
1345
1346 /* we decrement before we wake up, because the commit thread dies off
1347 ** when it has been woken up and the count is <= 0
1348 */
1349 reiserfs_mounted_fs_count-- ;
1350 wake_up(&reiserfs_commit_thread_wait) ;
1351 sleep_on(&reiserfs_commit_thread_done) ;
1352
1353 free_journal_ram(p_s_sb) ;
1354
1355 return 0 ;
1356 }
1357
1358 /*
1359 ** call on unmount. flush all journal trans, release all alloc'd ram
1360 */
journal_release(struct reiserfs_transaction_handle * th,struct super_block * p_s_sb)1361 int journal_release(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb) {
1362 return do_journal_release(th, p_s_sb, 0) ;
1363 }
1364 /*
1365 ** only call from an error condition inside reiserfs_read_super!
1366 */
journal_release_error(struct reiserfs_transaction_handle * th,struct super_block * p_s_sb)1367 int journal_release_error(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb) {
1368 return do_journal_release(th, p_s_sb, 1) ;
1369 }
1370
1371 /* compares description block with commit block. returns 1 if they differ, 0 if they are the same */
journal_compare_desc_commit(struct super_block * p_s_sb,struct reiserfs_journal_desc * desc,struct reiserfs_journal_commit * commit)1372 static int journal_compare_desc_commit(struct super_block *p_s_sb, struct reiserfs_journal_desc *desc,
1373 struct reiserfs_journal_commit *commit) {
1374 if (le32_to_cpu(commit->j_trans_id) != le32_to_cpu(desc->j_trans_id) ||
1375 le32_to_cpu(commit->j_len) != le32_to_cpu(desc->j_len) ||
1376 le32_to_cpu(commit->j_len) > SB_JOURNAL_TRANS_MAX(p_s_sb) ||
1377 le32_to_cpu(commit->j_len) <= 0
1378 ) {
1379 return 1 ;
1380 }
1381 return 0 ;
1382 }
1383 /* returns 0 if it did not find a description block
1384 ** returns -1 if it found a corrupt commit block
1385 ** returns 1 if both desc and commit were valid
1386 */
journal_transaction_is_valid(struct super_block * p_s_sb,struct buffer_head * d_bh,unsigned long * oldest_invalid_trans_id,unsigned long * newest_mount_id)1387 static int journal_transaction_is_valid(struct super_block *p_s_sb, struct buffer_head *d_bh, unsigned long *oldest_invalid_trans_id, unsigned long *newest_mount_id) {
1388 struct reiserfs_journal_desc *desc ;
1389 struct reiserfs_journal_commit *commit ;
1390 struct buffer_head *c_bh ;
1391 unsigned long offset ;
1392
1393 if (!d_bh)
1394 return 0 ;
1395
1396 desc = (struct reiserfs_journal_desc *)d_bh->b_data ;
1397 if (le32_to_cpu(desc->j_len) > 0 && !memcmp(desc->j_magic, JOURNAL_DESC_MAGIC, 8)) {
1398 if (oldest_invalid_trans_id && *oldest_invalid_trans_id && le32_to_cpu(desc->j_trans_id) > *oldest_invalid_trans_id) {
1399 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-986: transaction "
1400 "is valid returning because trans_id %d is greater than "
1401 "oldest_invalid %lu\n", le32_to_cpu(desc->j_trans_id),
1402 *oldest_invalid_trans_id);
1403 return 0 ;
1404 }
1405 if (newest_mount_id && *newest_mount_id > le32_to_cpu(desc->j_mount_id)) {
1406 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1087: transaction "
1407 "is valid returning because mount_id %d is less than "
1408 "newest_mount_id %lu\n", desc->j_mount_id,
1409 *newest_mount_id) ;
1410 return -1 ;
1411 }
1412 if ( le32_to_cpu(desc->j_len) > SB_JOURNAL_TRANS_MAX(p_s_sb) ) {
1413 reiserfs_warning(p_s_sb, "journal-2018: Bad transaction length %d encountered, ignoring transaction\n", le32_to_cpu(desc->j_len));
1414 return -1 ;
1415 }
1416 offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
1417
1418 /* ok, we have a journal description block, lets see if the transaction was valid */
1419 c_bh = journal_bread(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
1420 ((offset + le32_to_cpu(desc->j_len) + 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb))) ;
1421 if (!c_bh)
1422 return 0 ;
1423 commit = (struct reiserfs_journal_commit *)c_bh->b_data ;
1424 if (journal_compare_desc_commit(p_s_sb, desc, commit)) {
1425 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
1426 "journal_transaction_is_valid, commit offset %ld had bad "
1427 "time %d or length %d\n",
1428 c_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
1429 le32_to_cpu(commit->j_trans_id),
1430 le32_to_cpu(commit->j_len));
1431 brelse(c_bh) ;
1432 if (oldest_invalid_trans_id) {
1433 *oldest_invalid_trans_id = le32_to_cpu(desc->j_trans_id) ;
1434 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1004: "
1435 "transaction_is_valid setting oldest invalid trans_id "
1436 "to %d\n", le32_to_cpu(desc->j_trans_id)) ;
1437 }
1438 return -1;
1439 }
1440 brelse(c_bh) ;
1441 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1006: found valid "
1442 "transaction start offset %lu, len %d id %d\n",
1443 d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
1444 le32_to_cpu(desc->j_len), le32_to_cpu(desc->j_trans_id)) ;
1445 return 1 ;
1446 } else {
1447 return 0 ;
1448 }
1449 }
1450
brelse_array(struct buffer_head ** heads,int num)1451 static void brelse_array(struct buffer_head **heads, int num) {
1452 int i ;
1453 for (i = 0 ; i < num ; i++) {
1454 brelse(heads[i]) ;
1455 }
1456 }
1457
1458 /*
1459 ** given the start, and values for the oldest acceptable transactions,
1460 ** this either reads in a replays a transaction, or returns because the transaction
1461 ** is invalid, or too old.
1462 */
journal_read_transaction(struct super_block * p_s_sb,unsigned long cur_dblock,unsigned long oldest_start,unsigned long oldest_trans_id,unsigned long newest_mount_id)1463 static int journal_read_transaction(struct super_block *p_s_sb, unsigned long cur_dblock, unsigned long oldest_start,
1464 unsigned long oldest_trans_id, unsigned long newest_mount_id) {
1465 struct reiserfs_journal_desc *desc ;
1466 struct reiserfs_journal_commit *commit ;
1467 unsigned long trans_id = 0 ;
1468 struct buffer_head *c_bh ;
1469 struct buffer_head *d_bh ;
1470 struct buffer_head **log_blocks = NULL ;
1471 struct buffer_head **real_blocks = NULL ;
1472 unsigned long trans_offset ;
1473 int i;
1474
1475 d_bh = journal_bread(p_s_sb, cur_dblock) ;
1476 if (!d_bh)
1477 return 1 ;
1478 desc = (struct reiserfs_journal_desc *)d_bh->b_data ;
1479 trans_offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
1480 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1037: "
1481 "journal_read_transaction, offset %lu, len %d mount_id %d\n",
1482 d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
1483 le32_to_cpu(desc->j_len), le32_to_cpu(desc->j_mount_id)) ;
1484 if (le32_to_cpu(desc->j_trans_id) < oldest_trans_id) {
1485 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1039: "
1486 "journal_read_trans skipping because %lu is too old\n",
1487 cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb)) ;
1488 brelse(d_bh) ;
1489 return 1 ;
1490 }
1491 if (le32_to_cpu(desc->j_mount_id) != newest_mount_id) {
1492 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1146: "
1493 "journal_read_trans skipping because %d is != "
1494 "newest_mount_id %lu\n", le32_to_cpu(desc->j_mount_id),
1495 newest_mount_id) ;
1496 brelse(d_bh) ;
1497 return 1 ;
1498 }
1499 c_bh = journal_bread(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
1500 ((trans_offset + le32_to_cpu(desc->j_len) + 1) %
1501 SB_ONDISK_JOURNAL_SIZE(p_s_sb))) ;
1502 if (!c_bh) {
1503 brelse(d_bh) ;
1504 return 1 ;
1505 }
1506 commit = (struct reiserfs_journal_commit *)c_bh->b_data ;
1507 if (journal_compare_desc_commit(p_s_sb, desc, commit)) {
1508 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal_read_transaction, "
1509 "commit offset %ld had bad time %d or length %d\n",
1510 c_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
1511 le32_to_cpu(commit->j_trans_id), le32_to_cpu(commit->j_len));
1512 brelse(c_bh) ;
1513 brelse(d_bh) ;
1514 return 1;
1515 }
1516 trans_id = le32_to_cpu(desc->j_trans_id) ;
1517 /* now we know we've got a good transaction, and it was inside the valid time ranges */
1518 log_blocks = reiserfs_kmalloc(le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), GFP_NOFS, p_s_sb) ;
1519 real_blocks = reiserfs_kmalloc(le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), GFP_NOFS, p_s_sb) ;
1520 if (!log_blocks || !real_blocks) {
1521 brelse(c_bh) ;
1522 brelse(d_bh) ;
1523 reiserfs_kfree(log_blocks, le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), p_s_sb) ;
1524 reiserfs_kfree(real_blocks, le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), p_s_sb) ;
1525 reiserfs_warning(p_s_sb, "journal-1169: kmalloc failed, unable to mount FS\n") ;
1526 return -1 ;
1527 }
1528 /* get all the buffer heads */
1529 for(i = 0 ; i < le32_to_cpu(desc->j_len) ; i++) {
1530 log_blocks[i] = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + (trans_offset + 1 + i) % SB_ONDISK_JOURNAL_SIZE(p_s_sb));
1531 if (i < JOURNAL_TRANS_HALF) {
1532 real_blocks[i] = sb_getblk(p_s_sb, le32_to_cpu(desc->j_realblock[i])) ;
1533 } else {
1534 real_blocks[i] = sb_getblk(p_s_sb, le32_to_cpu(commit->j_realblock[i - JOURNAL_TRANS_HALF])) ;
1535 }
1536 if ( real_blocks[i]->b_blocknr > SB_BLOCK_COUNT(p_s_sb) ) {
1537 reiserfs_warning(p_s_sb, "journal-1207: REPLAY FAILURE fsck required! Block to replay is outside of filesystem\n");
1538 goto abort_replay;
1539 }
1540 /* make sure we don't try to replay onto log or reserved area */
1541 if (is_block_in_log_or_reserved_area(p_s_sb, real_blocks[i]->b_blocknr)) {
1542 reiserfs_warning(p_s_sb, "journal-1204: REPLAY FAILURE fsck required! Trying to replay onto a log block\n") ;
1543 abort_replay:
1544 brelse_array(log_blocks, i) ;
1545 brelse_array(real_blocks, i) ;
1546 brelse(c_bh) ;
1547 brelse(d_bh) ;
1548 reiserfs_kfree(log_blocks, le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), p_s_sb) ;
1549 reiserfs_kfree(real_blocks, le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), p_s_sb) ;
1550 return -1 ;
1551 }
1552 }
1553 /* read in the log blocks, memcpy to the corresponding real block */
1554 ll_rw_block(READ, le32_to_cpu(desc->j_len), log_blocks) ;
1555 for (i = 0 ; i < le32_to_cpu(desc->j_len) ; i++) {
1556 wait_on_buffer(log_blocks[i]) ;
1557 if (!buffer_uptodate(log_blocks[i])) {
1558 reiserfs_warning(p_s_sb, "journal-1212: REPLAY FAILURE fsck required! buffer write failed\n") ;
1559 brelse_array(log_blocks + i, le32_to_cpu(desc->j_len) - i) ;
1560 brelse_array(real_blocks, le32_to_cpu(desc->j_len)) ;
1561 brelse(c_bh) ;
1562 brelse(d_bh) ;
1563 reiserfs_kfree(log_blocks, le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), p_s_sb) ;
1564 reiserfs_kfree(real_blocks, le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), p_s_sb) ;
1565 return -1 ;
1566 }
1567 memcpy(real_blocks[i]->b_data, log_blocks[i]->b_data, real_blocks[i]->b_size) ;
1568 mark_buffer_uptodate(real_blocks[i], 1) ;
1569 brelse(log_blocks[i]) ;
1570 }
1571 /* flush out the real blocks */
1572 for (i = 0 ; i < le32_to_cpu(desc->j_len) ; i++) {
1573 set_bit(BH_Dirty, &(real_blocks[i]->b_state)) ;
1574 ll_rw_block(WRITE, 1, real_blocks + i) ;
1575 }
1576 for (i = 0 ; i < le32_to_cpu(desc->j_len) ; i++) {
1577 wait_on_buffer(real_blocks[i]) ;
1578 if (!buffer_uptodate(real_blocks[i])) {
1579 reiserfs_warning(p_s_sb, "journal-1226: REPLAY FAILURE, fsck required! buffer write failed\n") ;
1580 brelse_array(real_blocks + i, le32_to_cpu(desc->j_len) - i) ;
1581 brelse(c_bh) ;
1582 brelse(d_bh) ;
1583 reiserfs_kfree(log_blocks, le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), p_s_sb) ;
1584 reiserfs_kfree(real_blocks, le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), p_s_sb) ;
1585 return -1 ;
1586 }
1587 brelse(real_blocks[i]) ;
1588 }
1589 cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + ((trans_offset + le32_to_cpu(desc->j_len) + 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)) ;
1590 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1095: setting journal "
1591 "start to offset %ld\n",
1592 cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb)) ;
1593
1594 /* init starting values for the first transaction, in case this is the last transaction to be replayed. */
1595 SB_JOURNAL(p_s_sb)->j_start = cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
1596 SB_JOURNAL(p_s_sb)->j_last_flush_trans_id = trans_id ;
1597 SB_JOURNAL(p_s_sb)->j_trans_id = trans_id + 1;
1598 brelse(c_bh) ;
1599 brelse(d_bh) ;
1600 reiserfs_kfree(log_blocks, le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), p_s_sb) ;
1601 reiserfs_kfree(real_blocks, le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), p_s_sb) ;
1602 return 0 ;
1603 }
1604
1605 /*
1606 ** read and replay the log
1607 ** on a clean unmount, the journal header's next unflushed pointer will be to an invalid
1608 ** transaction. This tests that before finding all the transactions in the log, whic makes normal mount times fast.
1609 **
1610 ** After a crash, this starts with the next unflushed transaction, and replays until it finds one too old, or invalid.
1611 **
1612 ** On exit, it sets things up so the first transaction will work correctly.
1613 */
reiserfs_breada(kdev_t dev,int block,int bufsize,unsigned int max_block)1614 struct buffer_head * reiserfs_breada (kdev_t dev, int block, int bufsize,
1615 unsigned int max_block)
1616 {
1617 struct buffer_head * bhlist[BUFNR];
1618 unsigned int blocks = BUFNR;
1619 struct buffer_head * bh;
1620 int i, j;
1621
1622 bh = getblk (dev, block, bufsize);
1623 if (buffer_uptodate (bh))
1624 return (bh);
1625
1626 if (block + BUFNR > max_block) {
1627 blocks = max_block - block;
1628 }
1629 bhlist[0] = bh;
1630 j = 1;
1631 for (i = 1; i < blocks; i++) {
1632 bh = getblk (dev, block + i, bufsize);
1633 if (buffer_uptodate (bh)) {
1634 brelse (bh);
1635 break;
1636 }
1637 else bhlist[j++] = bh;
1638 }
1639 ll_rw_block (READ, j, bhlist);
1640 for(i = 1; i < j; i++)
1641 brelse (bhlist[i]);
1642 bh = bhlist[0];
1643 wait_on_buffer (bh);
1644 if (buffer_uptodate (bh))
1645 return bh;
1646 brelse (bh);
1647 return NULL;
1648 }
1649
journal_breada(struct super_block * p_s_sb,int block)1650 static struct buffer_head * journal_breada (struct super_block *p_s_sb, int block)
1651 {
1652 return reiserfs_breada (SB_JOURNAL_DEV(p_s_sb), block, p_s_sb->s_blocksize,
1653 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_ONDISK_JOURNAL_SIZE(p_s_sb));
1654 }
1655
journal_read(struct super_block * p_s_sb)1656 static int journal_read(struct super_block *p_s_sb) {
1657 struct reiserfs_journal_desc *desc ;
1658 unsigned long oldest_trans_id = 0;
1659 unsigned long oldest_invalid_trans_id = 0 ;
1660 time_t start ;
1661 unsigned long oldest_start = 0;
1662 unsigned long cur_dblock = 0 ;
1663 unsigned long newest_mount_id = 9 ;
1664 struct buffer_head *d_bh ;
1665 struct reiserfs_journal_header *jh ;
1666 int valid_journal_header = 0 ;
1667 int replay_count = 0 ;
1668 int continue_replay = 1 ;
1669 int ret ;
1670
1671 cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
1672 printk("reiserfs: checking transaction log (device %s) ...\n",
1673 bdevname(SB_JOURNAL_DEV(p_s_sb))) ;
1674 printk("for (%s)\n",
1675 bdevname(p_s_sb->s_dev)) ;
1676
1677 start = CURRENT_TIME ;
1678
1679 /* step 1, read in the journal header block. Check the transaction it says
1680 ** is the first unflushed, and if that transaction is not valid,
1681 ** replay is done
1682 */
1683 SB_JOURNAL(p_s_sb)->j_header_bh = journal_bread(p_s_sb,
1684 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
1685 SB_ONDISK_JOURNAL_SIZE(p_s_sb)) ;
1686 if (!SB_JOURNAL(p_s_sb)->j_header_bh) {
1687 return 1 ;
1688 }
1689 jh = (struct reiserfs_journal_header *)(SB_JOURNAL(p_s_sb)->j_header_bh->b_data) ;
1690 if (le32_to_cpu(jh->j_first_unflushed_offset) >= 0 &&
1691 le32_to_cpu(jh->j_first_unflushed_offset) < SB_ONDISK_JOURNAL_SIZE(p_s_sb) &&
1692 le32_to_cpu(jh->j_last_flush_trans_id) > 0) {
1693 oldest_start = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
1694 le32_to_cpu(jh->j_first_unflushed_offset) ;
1695 oldest_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1;
1696 newest_mount_id = le32_to_cpu(jh->j_mount_id);
1697 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1153: found in "
1698 "header: first_unflushed_offset %d, last_flushed_trans_id "
1699 "%lu\n", le32_to_cpu(jh->j_first_unflushed_offset),
1700 le32_to_cpu(jh->j_last_flush_trans_id)) ;
1701 valid_journal_header = 1 ;
1702
1703 /* now, we try to read the first unflushed offset. If it is not valid,
1704 ** there is nothing more we can do, and it makes no sense to read
1705 ** through the whole log.
1706 */
1707 d_bh = journal_bread(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + le32_to_cpu(jh->j_first_unflushed_offset)) ;
1708 ret = journal_transaction_is_valid(p_s_sb, d_bh, NULL, NULL) ;
1709 if (!ret) {
1710 continue_replay = 0 ;
1711 }
1712 brelse(d_bh) ;
1713 goto start_log_replay;
1714 }
1715
1716 if (continue_replay && is_read_only(p_s_sb->s_dev)) {
1717 reiserfs_warning(p_s_sb, "clm-2076: device is readonly, unable to replay log\n") ;
1718 return -1 ;
1719 }
1720 if (continue_replay && (p_s_sb->s_flags & MS_RDONLY)) {
1721 printk("Warning, log replay starting on readonly filesystem\n") ;
1722 }
1723
1724 /* ok, there are transactions that need to be replayed. start with the first log block, find
1725 ** all the valid transactions, and pick out the oldest.
1726 */
1727 while(continue_replay && cur_dblock < (SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_ONDISK_JOURNAL_SIZE(p_s_sb))) {
1728 d_bh = journal_breada(p_s_sb, cur_dblock) ;
1729 ret = journal_transaction_is_valid(p_s_sb, d_bh, &oldest_invalid_trans_id, &newest_mount_id) ;
1730 if (ret == 1) {
1731 desc = (struct reiserfs_journal_desc *)d_bh->b_data ;
1732 if (oldest_start == 0) { /* init all oldest_ values */
1733 oldest_trans_id = le32_to_cpu(desc->j_trans_id) ;
1734 oldest_start = d_bh->b_blocknr ;
1735 newest_mount_id = le32_to_cpu(desc->j_mount_id) ;
1736 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1179: Setting "
1737 "oldest_start to offset %lu, trans_id %lu\n",
1738 oldest_start - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
1739 oldest_trans_id) ;
1740 } else if (oldest_trans_id > le32_to_cpu(desc->j_trans_id)) {
1741 /* one we just read was older */
1742 oldest_trans_id = le32_to_cpu(desc->j_trans_id) ;
1743 oldest_start = d_bh->b_blocknr ;
1744 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1180: Resetting "
1745 "oldest_start to offset %lu, trans_id %lu\n",
1746 oldest_start - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
1747 oldest_trans_id) ;
1748 }
1749 if (newest_mount_id < le32_to_cpu(desc->j_mount_id)) {
1750 newest_mount_id = le32_to_cpu(desc->j_mount_id) ;
1751 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1299: Setting "
1752 "newest_mount_id to %d\n", le32_to_cpu(desc->j_mount_id));
1753 }
1754 cur_dblock += le32_to_cpu(desc->j_len) + 2 ;
1755 } else {
1756 cur_dblock++ ;
1757 }
1758 brelse(d_bh) ;
1759 }
1760
1761 start_log_replay:
1762 cur_dblock = oldest_start ;
1763 if (oldest_trans_id) {
1764 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1206: Starting replay "
1765 "from offset %lu, trans_id %lu\n",
1766 cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
1767 oldest_trans_id) ;
1768
1769 }
1770 replay_count = 0 ;
1771 while(continue_replay && oldest_trans_id > 0) {
1772 ret = journal_read_transaction(p_s_sb, cur_dblock, oldest_start, oldest_trans_id, newest_mount_id) ;
1773 if (ret < 0) {
1774 return ret ;
1775 } else if (ret != 0) {
1776 break ;
1777 }
1778 cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_JOURNAL(p_s_sb)->j_start ;
1779 replay_count++ ;
1780 if (cur_dblock == oldest_start)
1781 break;
1782 }
1783
1784 if (oldest_trans_id == 0) {
1785 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1225: No valid "
1786 "transactions found\n") ;
1787 }
1788 /* j_start does not get set correctly if we don't replay any transactions.
1789 ** if we had a valid journal_header, set j_start to the first unflushed transaction value,
1790 ** copy the trans_id from the header
1791 */
1792 if (valid_journal_header && replay_count == 0) {
1793 SB_JOURNAL(p_s_sb)->j_start = le32_to_cpu(jh->j_first_unflushed_offset) ;
1794 SB_JOURNAL(p_s_sb)->j_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1;
1795 SB_JOURNAL(p_s_sb)->j_last_flush_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) ;
1796 SB_JOURNAL(p_s_sb)->j_mount_id = le32_to_cpu(jh->j_mount_id) + 1;
1797 } else {
1798 SB_JOURNAL(p_s_sb)->j_mount_id = newest_mount_id + 1 ;
1799 }
1800 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1299: Setting "
1801 "newest_mount_id to %lu\n", SB_JOURNAL(p_s_sb)->j_mount_id) ;
1802 SB_JOURNAL(p_s_sb)->j_first_unflushed_offset = SB_JOURNAL(p_s_sb)->j_start ;
1803 if (replay_count > 0) {
1804 printk("reiserfs: replayed %d transactions in %lu seconds\n", replay_count,
1805 CURRENT_TIME - start) ;
1806 }
1807 if (!is_read_only(p_s_sb->s_dev) &&
1808 _update_journal_header_block(p_s_sb, SB_JOURNAL(p_s_sb)->j_start,
1809 SB_JOURNAL(p_s_sb)->j_last_flush_trans_id))
1810 {
1811 /* replay failed, caller must call free_journal_ram and abort
1812 ** the mount
1813 */
1814 return -1 ;
1815 }
1816 return 0 ;
1817 }
1818
1819
1820 struct reiserfs_journal_commit_task {
1821 struct super_block *p_s_sb ;
1822 int jindex ;
1823 int wake_on_finish ; /* if this is one, we wake the task_done queue, if it
1824 ** is zero, we free the whole struct on finish
1825 */
1826 struct reiserfs_journal_commit_task *self ;
1827 struct wait_queue *task_done ;
1828 struct tq_struct task ;
1829 } ;
1830
reiserfs_journal_commit_task_func(struct reiserfs_journal_commit_task * ct)1831 static void reiserfs_journal_commit_task_func(struct reiserfs_journal_commit_task *ct) {
1832
1833 struct reiserfs_journal_list *jl ;
1834 jl = SB_JOURNAL_LIST(ct->p_s_sb) + ct->jindex ;
1835
1836 flush_commit_list(ct->p_s_sb, SB_JOURNAL_LIST(ct->p_s_sb) + ct->jindex, 1) ;
1837
1838 if (jl->j_len > 0 && atomic_read(&(jl->j_nonzerolen)) > 0 &&
1839 atomic_read(&(jl->j_commit_left)) == 0) {
1840 kupdate_one_transaction(ct->p_s_sb, jl) ;
1841 }
1842 reiserfs_kfree(ct->self, sizeof(struct reiserfs_journal_commit_task), ct->p_s_sb) ;
1843 }
1844
setup_commit_task_arg(struct reiserfs_journal_commit_task * ct,struct super_block * p_s_sb,int jindex)1845 static void setup_commit_task_arg(struct reiserfs_journal_commit_task *ct,
1846 struct super_block *p_s_sb,
1847 int jindex) {
1848 if (!ct) {
1849 reiserfs_panic(NULL, "journal-1360: setup_commit_task_arg called with NULL struct\n") ;
1850 }
1851 ct->p_s_sb = p_s_sb ;
1852 ct->jindex = jindex ;
1853 ct->task_done = NULL ;
1854 INIT_LIST_HEAD(&ct->task.list) ;
1855 ct->task.sync = 0 ;
1856 ct->task.routine = (void *)(void *)reiserfs_journal_commit_task_func ;
1857 ct->self = ct ;
1858 ct->task.data = (void *)ct ;
1859 }
1860
commit_flush_async(struct super_block * p_s_sb,int jindex)1861 static void commit_flush_async(struct super_block *p_s_sb, int jindex) {
1862 struct reiserfs_journal_commit_task *ct ;
1863 /* using GFP_NOFS, GFP_KERNEL could try to flush inodes, which will try
1864 ** to start/join a transaction, which will deadlock
1865 */
1866 ct = reiserfs_kmalloc(sizeof(struct reiserfs_journal_commit_task), GFP_NOFS, p_s_sb) ;
1867 if (ct) {
1868 setup_commit_task_arg(ct, p_s_sb, jindex) ;
1869 queue_task(&(ct->task), &reiserfs_commit_thread_tq);
1870 wake_up(&reiserfs_commit_thread_wait) ;
1871 } else {
1872 #ifdef CONFIG_REISERFS_CHECK
1873 reiserfs_warning(p_s_sb, "journal-1540: kmalloc failed, doing sync commit\n") ;
1874 #endif
1875 flush_commit_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + jindex, 1) ;
1876 }
1877 }
1878
1879 /*
1880 ** this is the commit thread. It is started with kernel_thread on
1881 ** FS mount, and journal_release() waits for it to exit.
1882 **
1883 ** It could do a periodic commit, but there is a lot code for that
1884 ** elsewhere right now, and I only wanted to implement this little
1885 ** piece for starters.
1886 **
1887 ** All we do here is sleep on the j_commit_thread_wait wait queue, and
1888 ** then run the per filesystem commit task queue when we wakeup.
1889 */
reiserfs_journal_commit_thread(void * nullp)1890 static int reiserfs_journal_commit_thread(void *nullp) {
1891
1892 daemonize() ;
1893
1894 spin_lock_irq(¤t->sigmask_lock);
1895 sigfillset(¤t->blocked);
1896 recalc_sigpending(current);
1897 spin_unlock_irq(¤t->sigmask_lock);
1898
1899 sprintf(current->comm, "kreiserfsd") ;
1900 lock_kernel() ;
1901 while(1) {
1902
1903 while(TQ_ACTIVE(reiserfs_commit_thread_tq)) {
1904 run_task_queue(&reiserfs_commit_thread_tq) ;
1905 }
1906
1907 /* if there aren't any more filesystems left, break */
1908 if (reiserfs_mounted_fs_count <= 0) {
1909 run_task_queue(&reiserfs_commit_thread_tq) ;
1910 break ;
1911 }
1912 wake_up(&reiserfs_commit_thread_done) ;
1913 interruptible_sleep_on_timeout(&reiserfs_commit_thread_wait, 5 * HZ) ;
1914 }
1915 unlock_kernel() ;
1916 wake_up(&reiserfs_commit_thread_done) ;
1917 return 0 ;
1918 }
1919
journal_list_init(struct super_block * p_s_sb)1920 static void journal_list_init(struct super_block *p_s_sb) {
1921 int i ;
1922 for (i = 0 ; i < JOURNAL_LIST_COUNT ; i++) {
1923 init_waitqueue_head(&(SB_JOURNAL_LIST(p_s_sb)[i].j_commit_wait)) ;
1924 init_waitqueue_head(&(SB_JOURNAL_LIST(p_s_sb)[i].j_flush_wait)) ;
1925 }
1926 }
1927
release_journal_dev(struct super_block * super,struct reiserfs_journal * journal)1928 static int release_journal_dev( struct super_block *super,
1929 struct reiserfs_journal *journal )
1930 {
1931 int result;
1932
1933 result = 0;
1934
1935 if( journal -> j_dev_bd != NULL && journal->j_dev_bd != super->s_bdev) {
1936 result = blkdev_put( journal -> j_dev_bd, BDEV_FS );
1937 journal -> j_dev_bd = NULL;
1938 }
1939 if( journal -> j_dev_file != NULL ) {
1940 result = filp_close( journal -> j_dev_file, NULL );
1941 journal -> j_dev_file = NULL;
1942 }
1943 if( result != 0 ) {
1944 reiserfs_warning(super, "release_journal_dev: Cannot release journal device: %i", result );
1945 }
1946 return result;
1947 }
1948
journal_init_dev(struct super_block * super,struct reiserfs_journal * journal,const char * jdev_name)1949 static int journal_init_dev( struct super_block *super,
1950 struct reiserfs_journal *journal,
1951 const char *jdev_name )
1952 {
1953 int result;
1954 kdev_t jdev;
1955 int blkdev_mode = FMODE_READ | FMODE_WRITE;
1956
1957 result = 0;
1958
1959 journal -> j_dev_bd = NULL;
1960 journal -> j_dev_file = NULL;
1961 jdev = SB_JOURNAL_DEV( super ) =
1962 SB_ONDISK_JOURNAL_DEVICE( super ) ?
1963 to_kdev_t(SB_ONDISK_JOURNAL_DEVICE( super )) : super -> s_dev;
1964
1965 /* there is no "jdev" option */
1966
1967 if (is_read_only(super->s_dev))
1968 blkdev_mode = FMODE_READ;
1969
1970 if( ( !jdev_name || !jdev_name[ 0 ] ) ) {
1971
1972 /* don't add an extra reference to the device when
1973 * the log is on the same disk as the FS. It makes the
1974 * raid code unhappy
1975 */
1976 if (jdev == super->s_dev) {
1977 journal->j_dev_bd = super->s_bdev;
1978 return 0;
1979 }
1980 journal -> j_dev_bd = bdget( kdev_t_to_nr( jdev ) );
1981 if( journal -> j_dev_bd ) {
1982 result = blkdev_get( journal -> j_dev_bd,
1983 blkdev_mode, 0, BDEV_FS );
1984 if (result) {
1985 bdput(journal->j_dev_bd);
1986 journal->j_dev_bd = NULL;
1987 }
1988 } else {
1989 result = -ENOMEM;
1990 }
1991 if( result != 0 )
1992 printk( "journal_init_dev: cannot init journal device\n '%s': %i",
1993 kdevname( jdev ), result );
1994
1995 return result;
1996 }
1997
1998 /* "jdev" option has been found */
1999
2000 journal -> j_dev_file = filp_open( jdev_name, 0, 0 );
2001 if( !IS_ERR( journal -> j_dev_file ) ) {
2002 struct inode *jdev_inode;
2003
2004 jdev_inode = journal -> j_dev_file -> f_dentry -> d_inode;
2005 journal -> j_dev_bd = jdev_inode -> i_bdev;
2006 if( !S_ISBLK( jdev_inode -> i_mode ) ) {
2007 printk( "journal_init_dev: '%s' is not a block device", jdev_name );
2008 result = -ENOTBLK;
2009 } else if( journal -> j_dev_file -> f_vfsmnt -> mnt_flags & MNT_NODEV) {
2010 printk( "journal_init_dev: Cannot use devices on '%s'", jdev_name );
2011 result = -EACCES;
2012 } else if( jdev_inode -> i_bdev == NULL ) {
2013 printk( "journal_init_dev: bdev unintialized for '%s'", jdev_name );
2014 result = -ENOMEM;
2015 } else if( ( result = blkdev_get( jdev_inode -> i_bdev,
2016 blkdev_mode,
2017 0, BDEV_FS ) ) != 0 ) {
2018 journal -> j_dev_bd = NULL;
2019 printk( "journal_init_dev: Cannot load device '%s': %i", jdev_name,
2020 result );
2021 } else
2022 /* ok */
2023 SB_JOURNAL_DEV( super ) =
2024 to_kdev_t( jdev_inode -> i_bdev -> bd_dev );
2025 } else {
2026 result = PTR_ERR( journal -> j_dev_file );
2027 journal -> j_dev_file = NULL;
2028 printk( "journal_init_dev: Cannot open '%s': %i", jdev_name, result );
2029 }
2030 if( result != 0 ) {
2031 release_journal_dev( super, journal );
2032 }
2033 printk( "journal_init_dev: journal device: %s", kdevname( SB_JOURNAL_DEV( super ) ) );
2034 return result;
2035 }
2036
2037 /*
2038 ** must be called once on fs mount. calls journal_read for you
2039 */
journal_init(struct super_block * p_s_sb,const char * j_dev_name,int old_format)2040 int journal_init(struct super_block *p_s_sb, const char * j_dev_name,
2041 int old_format) {
2042 int num_cnodes = SB_ONDISK_JOURNAL_SIZE(p_s_sb) * 2 ;
2043 struct buffer_head *bhjh;
2044 struct reiserfs_super_block * rs;
2045 struct reiserfs_journal_header *jh;
2046 struct reiserfs_journal *journal;
2047
2048 if (sizeof(struct reiserfs_journal_commit) != 4096 ||
2049 sizeof(struct reiserfs_journal_desc) != 4096) {
2050 reiserfs_warning(p_s_sb, "journal-1249: commit or desc struct not 4096 %Zd %Zd\n",
2051 sizeof(struct reiserfs_journal_commit),
2052 sizeof(struct reiserfs_journal_desc)) ;
2053 return 1 ;
2054 }
2055
2056 if ( SB_ONDISK_JOURNAL_SIZE(p_s_sb) < 512 ) {
2057 reiserfs_warning(p_s_sb, "Journal size %d is less than 512+1 blocks, which unsupported\n", SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2058 return 1 ;
2059 }
2060
2061 journal = SB_JOURNAL(p_s_sb) = vmalloc(sizeof (struct reiserfs_journal)) ;
2062 if (!journal) {
2063 reiserfs_warning(p_s_sb, "journal-1256: unable to get memory for journal structure\n") ;
2064 return 1 ;
2065 }
2066 memset(journal, 0, sizeof(struct reiserfs_journal)) ;
2067 INIT_LIST_HEAD(&SB_JOURNAL(p_s_sb)->j_bitmap_nodes) ;
2068 INIT_LIST_HEAD (&SB_JOURNAL(p_s_sb)->j_prealloc_list);
2069
2070 if (reiserfs_allocate_list_bitmaps(p_s_sb,
2071 SB_JOURNAL(p_s_sb)->j_list_bitmap,
2072 SB_BMAP_NR(p_s_sb)))
2073 goto free_and_return ;
2074
2075 allocate_bitmap_nodes(p_s_sb) ;
2076
2077 /* reserved for journal area support */
2078 SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb) = (old_format ?
2079 REISERFS_OLD_DISK_OFFSET_IN_BYTES /
2080 p_s_sb->s_blocksize +
2081 SB_BMAP_NR(p_s_sb) + 1 :
2082 REISERFS_DISK_OFFSET_IN_BYTES /
2083 p_s_sb->s_blocksize + 2);
2084
2085 if( journal_init_dev( p_s_sb, journal, j_dev_name ) != 0 ) {
2086 reiserfs_warning(p_s_sb, "journal-1259: unable to initialize jornal device\n");
2087 goto free_and_return;
2088 }
2089
2090 rs = SB_DISK_SUPER_BLOCK(p_s_sb);
2091
2092 /* read journal header */
2093 bhjh = journal_bread (p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2094 SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2095 if (!bhjh) {
2096 reiserfs_warning(p_s_sb, "journal-459: unable to read journal header\n") ;
2097 goto free_and_return;
2098 }
2099 jh = (struct reiserfs_journal_header *)(bhjh->b_data);
2100
2101 /* make sure that journal matches to the super block */
2102 if (is_reiserfs_jr(rs) &&
2103 jh->jh_journal.jp_journal_magic != sb_jp_journal_magic(rs)) {
2104 char jname[ 32 ];
2105 char fname[ 32 ];
2106
2107 strcpy( jname, kdevname( SB_JOURNAL_DEV(p_s_sb) ) );
2108 strcpy( fname, kdevname( p_s_sb->s_dev ) );
2109 printk("journal-460: journal header magic %x (device %s) does not "
2110 "match magic found in super block %x (device %s)\n",
2111 jh->jh_journal.jp_journal_magic, jname,
2112 sb_jp_journal_magic(rs), fname);
2113 brelse (bhjh);
2114 goto free_and_return;
2115 }
2116
2117 SB_JOURNAL_TRANS_MAX(p_s_sb) = le32_to_cpu (jh->jh_journal.jp_journal_trans_max);
2118 SB_JOURNAL_MAX_BATCH(p_s_sb) = le32_to_cpu (jh->jh_journal.jp_journal_max_batch);
2119 SB_JOURNAL_MAX_COMMIT_AGE(p_s_sb) = le32_to_cpu (jh->jh_journal.jp_journal_max_commit_age);
2120 SB_JOURNAL_MAX_TRANS_AGE(p_s_sb) = JOURNAL_MAX_TRANS_AGE;
2121
2122 if (SB_JOURNAL_TRANS_MAX(p_s_sb)) {
2123 /* make sure these parameters are available, assign if they are not */
2124 __u32 initial = SB_JOURNAL_TRANS_MAX(p_s_sb);
2125 __u32 ratio = 1;
2126
2127 if (p_s_sb->s_blocksize < 4096)
2128 ratio = 4096 / p_s_sb->s_blocksize;
2129
2130 if (SB_ONDISK_JOURNAL_SIZE(p_s_sb)/SB_JOURNAL_TRANS_MAX(p_s_sb) <
2131 JOURNAL_MIN_RATIO)
2132 {
2133 SB_JOURNAL_TRANS_MAX(p_s_sb) = SB_ONDISK_JOURNAL_SIZE(p_s_sb) /
2134 JOURNAL_MIN_RATIO;
2135 }
2136 if (SB_JOURNAL_TRANS_MAX(p_s_sb) > JOURNAL_TRANS_MAX_DEFAULT / ratio)
2137 SB_JOURNAL_TRANS_MAX(p_s_sb) = JOURNAL_TRANS_MAX_DEFAULT / ratio;
2138 if (SB_JOURNAL_TRANS_MAX(p_s_sb) < JOURNAL_TRANS_MIN_DEFAULT / ratio)
2139 SB_JOURNAL_TRANS_MAX(p_s_sb) = JOURNAL_TRANS_MIN_DEFAULT / ratio;
2140
2141 if (SB_JOURNAL_TRANS_MAX(p_s_sb) != initial) {
2142 printk ("reiserfs warning: wrong transaction max size (%u). "
2143 "Changed to %u\n", initial, SB_JOURNAL_TRANS_MAX(p_s_sb));
2144 }
2145 SB_JOURNAL_MAX_BATCH(p_s_sb) = SB_JOURNAL_TRANS_MAX(p_s_sb) *
2146 JOURNAL_MAX_BATCH_DEFAULT /
2147 JOURNAL_TRANS_MAX_DEFAULT;
2148 }
2149
2150 if (!SB_JOURNAL_TRANS_MAX(p_s_sb)) {
2151 /*we have the file system was created by old version of mkreiserfs
2152 so this field contains zero value */
2153 SB_JOURNAL_TRANS_MAX(p_s_sb) = JOURNAL_TRANS_MAX_DEFAULT ;
2154 SB_JOURNAL_MAX_BATCH(p_s_sb) = JOURNAL_MAX_BATCH_DEFAULT ;
2155 SB_JOURNAL_MAX_COMMIT_AGE(p_s_sb) = JOURNAL_MAX_COMMIT_AGE ;
2156
2157 /* for blocksize >= 4096 - max transaction size is 1024. For
2158 block size < 4096 trans max size is decreased proportionally */
2159 if (p_s_sb->s_blocksize < 4096) {
2160 SB_JOURNAL_TRANS_MAX(p_s_sb) /= (4096 / p_s_sb->s_blocksize) ;
2161 SB_JOURNAL_MAX_BATCH(p_s_sb) = SB_JOURNAL_TRANS_MAX(p_s_sb)*9 / 10;
2162 }
2163 }
2164
2165 brelse (bhjh);
2166
2167 SB_JOURNAL(p_s_sb)->j_list_bitmap_index = 0 ;
2168 SB_JOURNAL_LIST_INDEX(p_s_sb) = -10000 ; /* make sure flush_old_commits does not try to flush a list while replay is on */
2169
2170 /* clear out the journal list array */
2171 memset(SB_JOURNAL_LIST(p_s_sb), 0,
2172 sizeof(struct reiserfs_journal_list) * JOURNAL_LIST_COUNT) ;
2173
2174 journal_list_init(p_s_sb) ;
2175
2176 memset(SB_JOURNAL(p_s_sb)->j_list_hash_table, 0,
2177 JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *)) ;
2178 memset(journal_writers, 0, sizeof(char *) * 512) ; /* debug code */
2179
2180 INIT_LIST_HEAD(&(SB_JOURNAL(p_s_sb)->j_dirty_buffers)) ;
2181
2182 SB_JOURNAL(p_s_sb)->j_start = 0 ;
2183 SB_JOURNAL(p_s_sb)->j_len = 0 ;
2184 SB_JOURNAL(p_s_sb)->j_len_alloc = 0 ;
2185 atomic_set(&(SB_JOURNAL(p_s_sb)->j_wcount), 0) ;
2186 SB_JOURNAL(p_s_sb)->j_bcount = 0 ;
2187 SB_JOURNAL(p_s_sb)->j_trans_start_time = 0 ;
2188 SB_JOURNAL(p_s_sb)->j_last = NULL ;
2189 SB_JOURNAL(p_s_sb)->j_first = NULL ;
2190 init_waitqueue_head(&(SB_JOURNAL(p_s_sb)->j_join_wait)) ;
2191 init_waitqueue_head(&(SB_JOURNAL(p_s_sb)->j_wait)) ;
2192
2193 SB_JOURNAL(p_s_sb)->j_trans_id = 10 ;
2194 SB_JOURNAL(p_s_sb)->j_mount_id = 10 ;
2195 SB_JOURNAL(p_s_sb)->j_state = 0 ;
2196 atomic_set(&(SB_JOURNAL(p_s_sb)->j_jlock), 0) ;
2197 atomic_set(&(SB_JOURNAL(p_s_sb)->j_wlock), 0) ;
2198 SB_JOURNAL(p_s_sb)->j_cnode_free_list = allocate_cnodes(num_cnodes) ;
2199 SB_JOURNAL(p_s_sb)->j_cnode_free_orig = SB_JOURNAL(p_s_sb)->j_cnode_free_list ;
2200 SB_JOURNAL(p_s_sb)->j_cnode_free = SB_JOURNAL(p_s_sb)->j_cnode_free_list ?
2201 num_cnodes : 0 ;
2202 SB_JOURNAL(p_s_sb)->j_cnode_used = 0 ;
2203 SB_JOURNAL(p_s_sb)->j_must_wait = 0 ;
2204 init_journal_hash(p_s_sb) ;
2205 SB_JOURNAL_LIST(p_s_sb)[0].j_list_bitmap = get_list_bitmap(p_s_sb, SB_JOURNAL_LIST(p_s_sb)) ;
2206 if (!(SB_JOURNAL_LIST(p_s_sb)[0].j_list_bitmap)) {
2207 reiserfs_warning(p_s_sb, "journal-2005, get_list_bitmap failed for journal list 0\n") ;
2208 goto free_and_return;
2209 }
2210 if (journal_read(p_s_sb) < 0) {
2211 reiserfs_warning(p_s_sb, "Replay Failure, unable to mount\n") ;
2212 goto free_and_return;
2213 }
2214 /* once the read is done, we can set this where it belongs */
2215 SB_JOURNAL_LIST_INDEX(p_s_sb) = 0 ;
2216
2217 if (reiserfs_dont_log (p_s_sb))
2218 return 0;
2219
2220 reiserfs_mounted_fs_count++ ;
2221 if (reiserfs_mounted_fs_count <= 1) {
2222 kernel_thread((void *)(void *)reiserfs_journal_commit_thread, NULL,
2223 CLONE_FS | CLONE_FILES | CLONE_VM) ;
2224 }
2225 return 0 ;
2226
2227 free_and_return:
2228 free_journal_ram(p_s_sb);
2229 return 1;
2230 }
2231
2232 /*
2233 ** test for a polite end of the current transaction. Used by file_write, and should
2234 ** be used by delete to make sure they don't write more than can fit inside a single
2235 ** transaction
2236 */
journal_transaction_should_end(struct reiserfs_transaction_handle * th,int new_alloc)2237 int journal_transaction_should_end(struct reiserfs_transaction_handle *th, int new_alloc) {
2238 time_t now = CURRENT_TIME ;
2239 if (reiserfs_dont_log(th->t_super))
2240 return 0 ;
2241 if ( SB_JOURNAL(th->t_super)->j_must_wait > 0 ||
2242 (SB_JOURNAL(th->t_super)->j_len_alloc + new_alloc) >= SB_JOURNAL_MAX_BATCH(th->t_super) ||
2243 atomic_read(&(SB_JOURNAL(th->t_super)->j_jlock)) ||
2244 (now - SB_JOURNAL(th->t_super)->j_trans_start_time) > SB_JOURNAL_MAX_TRANS_AGE(th->t_super) ||
2245 SB_JOURNAL(th->t_super)->j_cnode_free < (SB_JOURNAL_TRANS_MAX(th->t_super) * 3)) {
2246 return 1 ;
2247 }
2248 return 0 ;
2249 }
2250
2251 /* this must be called inside a transaction, and requires the
2252 ** kernel_lock to be held
2253 */
reiserfs_block_writes(struct reiserfs_transaction_handle * th)2254 void reiserfs_block_writes(struct reiserfs_transaction_handle *th) {
2255 struct super_block *s = th->t_super ;
2256 SB_JOURNAL(s)->j_must_wait = 1 ;
2257 set_bit(WRITERS_BLOCKED, &SB_JOURNAL(s)->j_state) ;
2258 return ;
2259 }
2260
2261 /* this must be called without a transaction started, and does not
2262 ** require BKL
2263 */
reiserfs_allow_writes(struct super_block * s)2264 void reiserfs_allow_writes(struct super_block *s) {
2265 clear_bit(WRITERS_BLOCKED, &SB_JOURNAL(s)->j_state) ;
2266 wake_up(&SB_JOURNAL(s)->j_join_wait) ;
2267 }
2268
2269 /* this must be called without a transaction started, and does not
2270 ** require BKL
2271 */
reiserfs_wait_on_write_block(struct super_block * s)2272 void reiserfs_wait_on_write_block(struct super_block *s) {
2273 wait_event(SB_JOURNAL(s)->j_join_wait,
2274 !test_bit(WRITERS_BLOCKED, &SB_JOURNAL(s)->j_state)) ;
2275 }
2276
2277 /* join == true if you must join an existing transaction.
2278 ** join == false if you can deal with waiting for others to finish
2279 **
2280 ** this will block until the transaction is joinable. send the number of blocks you
2281 ** expect to use in nblocks.
2282 */
do_journal_begin_r(struct reiserfs_transaction_handle * th,struct super_block * p_s_sb,unsigned long nblocks,int join)2283 static int do_journal_begin_r(struct reiserfs_transaction_handle *th, struct super_block * p_s_sb,unsigned long nblocks,int join) {
2284 time_t now = CURRENT_TIME ;
2285 int old_trans_id ;
2286
2287 reiserfs_check_lock_depth("journal_begin") ;
2288 RFALSE( p_s_sb->s_flags & MS_RDONLY,
2289 "clm-2078: calling journal_begin on readonly FS") ;
2290
2291 if (reiserfs_dont_log(p_s_sb)) {
2292 th->t_super = p_s_sb ; /* others will check this for the don't log flag */
2293 return 0 ;
2294 }
2295 PROC_INFO_INC( p_s_sb, journal.journal_being );
2296
2297 relock:
2298 lock_journal(p_s_sb) ;
2299
2300 if (test_bit(WRITERS_BLOCKED, &SB_JOURNAL(p_s_sb)->j_state)) {
2301 unlock_journal(p_s_sb) ;
2302 reiserfs_wait_on_write_block(p_s_sb) ;
2303 PROC_INFO_INC( p_s_sb, journal.journal_relock_writers );
2304 goto relock ;
2305 }
2306
2307 /* if there is no room in the journal OR
2308 ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning
2309 ** we don't sleep if there aren't other writers
2310 */
2311
2312 if ( (!join && SB_JOURNAL(p_s_sb)->j_must_wait > 0) ||
2313 ( !join && (SB_JOURNAL(p_s_sb)->j_len_alloc + nblocks + 2) >= SB_JOURNAL_MAX_BATCH(p_s_sb)) ||
2314 (!join && atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)) > 0 && SB_JOURNAL(p_s_sb)->j_trans_start_time > 0 &&
2315 (now - SB_JOURNAL(p_s_sb)->j_trans_start_time) > SB_JOURNAL_MAX_TRANS_AGE(p_s_sb)) ||
2316 (!join && atomic_read(&(SB_JOURNAL(p_s_sb)->j_jlock)) ) ||
2317 (!join && SB_JOURNAL(p_s_sb)->j_cnode_free < (SB_JOURNAL_TRANS_MAX(p_s_sb) * 3))) {
2318
2319 unlock_journal(p_s_sb) ; /* allow others to finish this transaction */
2320
2321 /* if writer count is 0, we can just force this transaction to end, and start
2322 ** a new one afterwards.
2323 */
2324 if (atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)) <= 0) {
2325 struct reiserfs_transaction_handle myth ;
2326 journal_join(&myth, p_s_sb, 1) ;
2327 reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
2328 journal_mark_dirty(&myth, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
2329 do_journal_end(&myth, p_s_sb,1,COMMIT_NOW) ;
2330 } else {
2331 /* but if the writer count isn't zero, we have to wait for the current writers to finish.
2332 ** They won't batch on transaction end once we set j_jlock
2333 */
2334 atomic_set(&(SB_JOURNAL(p_s_sb)->j_jlock), 1) ;
2335 old_trans_id = SB_JOURNAL(p_s_sb)->j_trans_id ;
2336 while(atomic_read(&(SB_JOURNAL(p_s_sb)->j_jlock)) &&
2337 SB_JOURNAL(p_s_sb)->j_trans_id == old_trans_id) {
2338 sleep_on(&(SB_JOURNAL(p_s_sb)->j_join_wait)) ;
2339 }
2340 }
2341 PROC_INFO_INC( p_s_sb, journal.journal_relock_wcount );
2342 goto relock ;
2343 }
2344
2345 if (SB_JOURNAL(p_s_sb)->j_trans_start_time == 0) { /* we are the first writer, set trans_id */
2346 SB_JOURNAL(p_s_sb)->j_trans_start_time = now ;
2347 }
2348 atomic_inc(&(SB_JOURNAL(p_s_sb)->j_wcount)) ;
2349 SB_JOURNAL(p_s_sb)->j_len_alloc += nblocks ;
2350 th->t_blocks_logged = 0 ;
2351 th->t_blocks_allocated = nblocks ;
2352 th->t_super = p_s_sb ;
2353 th->t_trans_id = SB_JOURNAL(p_s_sb)->j_trans_id ;
2354 th->t_caller = "Unknown" ;
2355 unlock_journal(p_s_sb) ;
2356 p_s_sb->s_dirt = 1;
2357 return 0 ;
2358 }
2359
2360
journal_join(struct reiserfs_transaction_handle * th,struct super_block * p_s_sb,unsigned long nblocks)2361 static int journal_join(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) {
2362 return do_journal_begin_r(th, p_s_sb, nblocks, 1) ;
2363 }
2364
journal_begin(struct reiserfs_transaction_handle * th,struct super_block * p_s_sb,unsigned long nblocks)2365 int journal_begin(struct reiserfs_transaction_handle *th, struct super_block * p_s_sb, unsigned long nblocks) {
2366 return do_journal_begin_r(th, p_s_sb, nblocks, 0) ;
2367 }
2368
2369 /* not used at all */
journal_prepare(struct super_block * p_s_sb,struct buffer_head * bh)2370 int journal_prepare(struct super_block * p_s_sb, struct buffer_head *bh) {
2371 return 0 ;
2372 }
2373
2374 /*
2375 ** puts bh into the current transaction. If it was already there, reorders removes the
2376 ** old pointers from the hash, and puts new ones in (to make sure replay happen in the right order).
2377 **
2378 ** if it was dirty, cleans and files onto the clean list. I can't let it be dirty again until the
2379 ** transaction is committed.
2380 **
2381 ** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
2382 */
journal_mark_dirty(struct reiserfs_transaction_handle * th,struct super_block * p_s_sb,struct buffer_head * bh)2383 int journal_mark_dirty(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, struct buffer_head *bh) {
2384 struct reiserfs_journal_cnode *cn = NULL;
2385 int count_already_incd = 0 ;
2386 int prepared = 0 ;
2387
2388 PROC_INFO_INC( p_s_sb, journal.mark_dirty );
2389 if (reiserfs_dont_log(th->t_super)) {
2390 mark_buffer_dirty(bh) ;
2391 return 0 ;
2392 }
2393
2394 if (th->t_trans_id != SB_JOURNAL(p_s_sb)->j_trans_id) {
2395 reiserfs_panic(th->t_super, "journal-1577: handle trans id %ld != current trans id %ld\n",
2396 th->t_trans_id, SB_JOURNAL(p_s_sb)->j_trans_id);
2397 }
2398 p_s_sb->s_dirt = 1 ;
2399
2400 prepared = test_and_clear_bit(BH_JPrepared, &bh->b_state) ;
2401 /* already in this transaction, we are done */
2402 if (buffer_journaled(bh)) {
2403 PROC_INFO_INC( p_s_sb, journal.mark_dirty_already );
2404 return 0 ;
2405 }
2406
2407 /* this must be turned into a panic instead of a warning. We can't allow
2408 ** a dirty or journal_dirty or locked buffer to be logged, as some changes
2409 ** could get to disk too early. NOT GOOD.
2410 */
2411 if (!prepared || buffer_locked(bh)) {
2412 reiserfs_warning(p_s_sb, "journal-1777: buffer %lu bad state %cPREPARED %cLOCKED %cDIRTY %cJDIRTY_WAIT\n", bh->b_blocknr, prepared ? ' ' : '!',
2413 buffer_locked(bh) ? ' ' : '!',
2414 buffer_dirty(bh) ? ' ' : '!',
2415 buffer_journal_dirty(bh) ? ' ' : '!') ;
2416 show_reiserfs_locks() ;
2417 }
2418 count_already_incd = clear_prepared_bits(bh) ;
2419
2420 if (atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)) <= 0) {
2421 reiserfs_warning(p_s_sb, "journal-1409: journal_mark_dirty returning because j_wcount was %d\n", atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount))) ;
2422 return 1 ;
2423 }
2424 /* this error means I've screwed up, and we've overflowed the transaction.
2425 ** Nothing can be done here, except make the FS readonly or panic.
2426 */
2427 if (SB_JOURNAL(p_s_sb)->j_len >= SB_JOURNAL_TRANS_MAX(p_s_sb)) {
2428 reiserfs_panic(th->t_super, "journal-1413: journal_mark_dirty: j_len (%lu) is too big\n", SB_JOURNAL(p_s_sb)->j_len) ;
2429 }
2430
2431 if (buffer_journal_dirty(bh)) {
2432 count_already_incd = 1 ;
2433 PROC_INFO_INC( p_s_sb, journal.mark_dirty_notjournal );
2434 mark_buffer_notjournal_dirty(bh) ;
2435 }
2436
2437 if (buffer_dirty(bh)) {
2438 clear_bit(BH_Dirty, &bh->b_state) ;
2439 }
2440
2441 if (buffer_journaled(bh)) { /* must double check after getting lock */
2442 goto done ;
2443 }
2444
2445 if (SB_JOURNAL(p_s_sb)->j_len > SB_JOURNAL(p_s_sb)->j_len_alloc) {
2446 SB_JOURNAL(p_s_sb)->j_len_alloc = SB_JOURNAL(p_s_sb)->j_len + JOURNAL_PER_BALANCE_CNT ;
2447 }
2448
2449 set_bit(BH_JDirty, &bh->b_state) ;
2450
2451 /* now put this guy on the end */
2452 if (!cn) {
2453 cn = get_cnode(p_s_sb) ;
2454 if (!cn) {
2455 reiserfs_panic(p_s_sb, "get_cnode failed!\n");
2456 }
2457
2458 if (th->t_blocks_logged == th->t_blocks_allocated) {
2459 th->t_blocks_allocated += JOURNAL_PER_BALANCE_CNT ;
2460 SB_JOURNAL(p_s_sb)->j_len_alloc += JOURNAL_PER_BALANCE_CNT ;
2461 }
2462 th->t_blocks_logged++ ;
2463 SB_JOURNAL(p_s_sb)->j_len++ ;
2464
2465 cn->bh = bh ;
2466 cn->blocknr = bh->b_blocknr ;
2467 cn->dev = bh->b_dev ;
2468 cn->jlist = NULL ;
2469 insert_journal_hash(SB_JOURNAL(p_s_sb)->j_hash_table, cn) ;
2470 if (!count_already_incd) {
2471 get_bh(bh) ;
2472 }
2473 }
2474 cn->next = NULL ;
2475 cn->prev = SB_JOURNAL(p_s_sb)->j_last ;
2476 cn->bh = bh ;
2477 if (SB_JOURNAL(p_s_sb)->j_last) {
2478 SB_JOURNAL(p_s_sb)->j_last->next = cn ;
2479 SB_JOURNAL(p_s_sb)->j_last = cn ;
2480 } else {
2481 SB_JOURNAL(p_s_sb)->j_first = cn ;
2482 SB_JOURNAL(p_s_sb)->j_last = cn ;
2483 }
2484 done:
2485 return 0 ;
2486 }
2487
2488 /*
2489 ** if buffer already in current transaction, do a journal_mark_dirty
2490 ** otherwise, just mark it dirty and move on. Used for writes to meta blocks
2491 ** that don't need journaling
2492 */
journal_mark_dirty_nolog(struct reiserfs_transaction_handle * th,struct super_block * p_s_sb,struct buffer_head * bh)2493 int journal_mark_dirty_nolog(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, struct buffer_head *bh) {
2494 if (reiserfs_dont_log(th->t_super) || buffer_journaled(bh) ||
2495 buffer_journal_dirty(bh)) {
2496 return journal_mark_dirty(th, p_s_sb, bh) ;
2497 }
2498 if (get_journal_hash_dev(SB_JOURNAL(p_s_sb)->j_list_hash_table, bh->b_dev,bh->b_blocknr,bh->b_size)) {
2499 return journal_mark_dirty(th, p_s_sb, bh) ;
2500 }
2501 mark_buffer_dirty(bh) ;
2502 return 0 ;
2503 }
2504
journal_end(struct reiserfs_transaction_handle * th,struct super_block * p_s_sb,unsigned long nblocks)2505 int journal_end(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) {
2506 return do_journal_end(th, p_s_sb, nblocks, 0) ;
2507 }
2508
2509 /* removes from the current transaction, relsing and descrementing any counters.
2510 ** also files the removed buffer directly onto the clean list
2511 **
2512 ** called by journal_mark_freed when a block has been deleted
2513 **
2514 ** returns 1 if it cleaned and relsed the buffer. 0 otherwise
2515 */
remove_from_transaction(struct super_block * p_s_sb,unsigned long blocknr,int already_cleaned)2516 static int remove_from_transaction(struct super_block *p_s_sb, unsigned long blocknr, int already_cleaned) {
2517 struct buffer_head *bh ;
2518 struct reiserfs_journal_cnode *cn ;
2519 int ret = 0;
2520
2521 cn = get_journal_hash_dev(SB_JOURNAL(p_s_sb)->j_hash_table, p_s_sb->s_dev, blocknr, p_s_sb->s_blocksize) ;
2522 if (!cn || !cn->bh) {
2523 return ret ;
2524 }
2525 bh = cn->bh ;
2526 if (cn->prev) {
2527 cn->prev->next = cn->next ;
2528 }
2529 if (cn->next) {
2530 cn->next->prev = cn->prev ;
2531 }
2532 if (cn == SB_JOURNAL(p_s_sb)->j_first) {
2533 SB_JOURNAL(p_s_sb)->j_first = cn->next ;
2534 }
2535 if (cn == SB_JOURNAL(p_s_sb)->j_last) {
2536 SB_JOURNAL(p_s_sb)->j_last = cn->prev ;
2537 }
2538 remove_journal_hash(SB_JOURNAL(p_s_sb)->j_hash_table, NULL, bh, 0) ;
2539 mark_buffer_not_journaled(bh) ; /* don't log this one */
2540
2541 if (!already_cleaned) {
2542 mark_buffer_notjournal_dirty(bh) ;
2543 put_bh(bh) ;
2544 if (atomic_read(&(bh->b_count)) < 0) {
2545 reiserfs_warning(p_s_sb, "journal-1752: remove from trans, b_count < 0\n") ;
2546 }
2547 if (!buffer_locked(bh)) reiserfs_clean_and_file_buffer(bh) ;
2548 ret = 1 ;
2549 }
2550 SB_JOURNAL(p_s_sb)->j_len-- ;
2551 SB_JOURNAL(p_s_sb)->j_len_alloc-- ;
2552 free_cnode(p_s_sb, cn) ;
2553 return ret ;
2554 }
2555
2556 /* removes from a specific journal list hash */
remove_from_journal_list(struct super_block * s,struct reiserfs_journal_list * jl,struct buffer_head * bh,int remove_freed)2557 static int remove_from_journal_list(struct super_block *s, struct reiserfs_journal_list *jl, struct buffer_head *bh, int remove_freed) {
2558 remove_journal_hash(SB_JOURNAL(s)->j_list_hash_table, jl, bh, remove_freed) ;
2559 return 0 ;
2560 }
2561
2562 /*
2563 ** for any cnode in a journal list, it can only be dirtied of all the
2564 ** transactions that include it are commited to disk.
2565 ** this checks through each transaction, and returns 1 if you are allowed to dirty,
2566 ** and 0 if you aren't
2567 **
2568 ** it is called by dirty_journal_list, which is called after flush_commit_list has gotten all the log
2569 ** blocks for a given transaction on disk
2570 **
2571 */
can_dirty(struct reiserfs_journal_cnode * cn)2572 static int can_dirty(struct reiserfs_journal_cnode *cn) {
2573 kdev_t dev = cn->dev ;
2574 unsigned long blocknr = cn->blocknr ;
2575 struct reiserfs_journal_cnode *cur = cn->hprev ;
2576 int can_dirty = 1 ;
2577
2578 /* first test hprev. These are all newer than cn, so any node here
2579 ** with the name block number and dev means this node can't be sent
2580 ** to disk right now.
2581 */
2582 while(cur && can_dirty) {
2583 if (cur->jlist && cur->bh && cur->blocknr && cur->dev == dev &&
2584 cur->blocknr == blocknr) {
2585 can_dirty = 0 ;
2586 }
2587 cur = cur->hprev ;
2588 }
2589 /* then test hnext. These are all older than cn. As long as they
2590 ** are committed to the log, it is safe to write cn to disk
2591 */
2592 cur = cn->hnext ;
2593 while(cur && can_dirty) {
2594 if (cur->jlist && cur->jlist->j_len > 0 &&
2595 atomic_read(&(cur->jlist->j_commit_left)) > 0 && cur->bh &&
2596 cur->blocknr && cur->dev == dev && cur->blocknr == blocknr) {
2597 can_dirty = 0 ;
2598 }
2599 cur = cur->hnext ;
2600 }
2601 return can_dirty ;
2602 }
2603
2604 /* syncs the commit blocks, but does not force the real buffers to disk
2605 ** will wait until the current transaction is done/commited before returning
2606 */
journal_end_sync(struct reiserfs_transaction_handle * th,struct super_block * p_s_sb,unsigned long nblocks)2607 int journal_end_sync(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) {
2608
2609 if (SB_JOURNAL(p_s_sb)->j_len == 0) {
2610 reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
2611 journal_mark_dirty(th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
2612 }
2613 return do_journal_end(th, p_s_sb, nblocks, COMMIT_NOW | WAIT) ;
2614 }
2615
show_reiserfs_locks(void)2616 int show_reiserfs_locks(void) {
2617
2618 dump_journal_writers() ;
2619 return 0 ;
2620 }
2621
2622 /*
2623 ** used to get memory back from async commits that are floating around
2624 ** and to reclaim any blocks deleted but unusable because their commits
2625 ** haven't hit disk yet. called from bitmap.c
2626 **
2627 ** if it starts flushing things, it ors SCHEDULE_OCCURRED into repeat.
2628 ** note, this is just if schedule has a chance of occuring. I need to
2629 ** change flush_commit_lists to have a repeat parameter too.
2630 **
2631 */
flush_async_commits(struct super_block * p_s_sb)2632 void flush_async_commits(struct super_block *p_s_sb) {
2633 int i ;
2634
2635 for (i = 0 ; i < JOURNAL_LIST_COUNT ; i++) {
2636 if (i != SB_JOURNAL_LIST_INDEX(p_s_sb)) {
2637 flush_commit_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + i, 1) ;
2638 }
2639 }
2640 }
2641
2642 /*
2643 ** flushes any old transactions to disk
2644 ** ends the current transaction if it is too old
2645 **
2646 ** also calls flush_journal_list with old_only == 1, which allows me to reclaim
2647 ** memory and such from the journal lists whose real blocks are all on disk.
2648 **
2649 ** called by sync_dev_journal from buffer.c
2650 */
flush_old_commits(struct super_block * p_s_sb,int immediate)2651 int flush_old_commits(struct super_block *p_s_sb, int immediate) {
2652 int i ;
2653 int count = 0;
2654 int start ;
2655 time_t now ;
2656 struct reiserfs_transaction_handle th ;
2657
2658 start = SB_JOURNAL_LIST_INDEX(p_s_sb) ;
2659 now = CURRENT_TIME ;
2660
2661 /* safety check so we don't flush while we are replaying the log during mount */
2662 if (SB_JOURNAL_LIST_INDEX(p_s_sb) < 0) {
2663 return 0 ;
2664 }
2665 /* starting with oldest, loop until we get to the start */
2666 i = (SB_JOURNAL_LIST_INDEX(p_s_sb) + 1) % JOURNAL_LIST_COUNT ;
2667 while(i != start) {
2668 if (SB_JOURNAL_LIST(p_s_sb)[i].j_len > 0 && ((now - SB_JOURNAL_LIST(p_s_sb)[i].j_timestamp) > SB_JOURNAL_MAX_COMMIT_AGE(p_s_sb) ||
2669 immediate)) {
2670 /* we have to check again to be sure the current transaction did not change */
2671 if (i != SB_JOURNAL_LIST_INDEX(p_s_sb)) {
2672 flush_commit_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + i, 1) ;
2673 }
2674 }
2675 i = (i + 1) % JOURNAL_LIST_COUNT ;
2676 count++ ;
2677 }
2678 /* now, check the current transaction. If there are no writers, and it is too old, finish it, and
2679 ** force the commit blocks to disk
2680 */
2681 if (!immediate && atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)) <= 0 &&
2682 SB_JOURNAL(p_s_sb)->j_trans_start_time > 0 &&
2683 SB_JOURNAL(p_s_sb)->j_len > 0 &&
2684 (now - SB_JOURNAL(p_s_sb)->j_trans_start_time) > SB_JOURNAL_MAX_TRANS_AGE(p_s_sb)) {
2685 journal_join(&th, p_s_sb, 1) ;
2686 reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
2687 journal_mark_dirty(&th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
2688 do_journal_end(&th, p_s_sb,1, COMMIT_NOW) ;
2689 } else if (immediate) { /* belongs above, but I wanted this to be very explicit as a special case. If they say to
2690 flush, we must be sure old transactions hit the disk too. */
2691 journal_join(&th, p_s_sb, 1) ;
2692 reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
2693 journal_mark_dirty(&th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
2694 do_journal_end(&th, p_s_sb,1, COMMIT_NOW | WAIT) ;
2695 }
2696 reiserfs_journal_kupdate(p_s_sb) ;
2697 return 0 ;
2698 }
2699
2700 /*
2701 ** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit
2702 **
2703 ** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all
2704 ** the writers are done. By the time it wakes up, the transaction it was called has already ended, so it just
2705 ** flushes the commit list and returns 0.
2706 **
2707 ** Won't batch when flush or commit_now is set. Also won't batch when others are waiting on j_join_wait.
2708 **
2709 ** Note, we can't allow the journal_end to proceed while there are still writers in the log.
2710 */
check_journal_end(struct reiserfs_transaction_handle * th,struct super_block * p_s_sb,unsigned long nblocks,int flags)2711 static int check_journal_end(struct reiserfs_transaction_handle *th, struct super_block * p_s_sb,
2712 unsigned long nblocks, int flags) {
2713
2714 time_t now ;
2715 int flush = flags & FLUSH_ALL ;
2716 int commit_now = flags & COMMIT_NOW ;
2717 int wait_on_commit = flags & WAIT ;
2718
2719 if (th->t_trans_id != SB_JOURNAL(p_s_sb)->j_trans_id) {
2720 reiserfs_panic(th->t_super, "journal-1577: handle trans id %ld != current trans id %ld\n",
2721 th->t_trans_id, SB_JOURNAL(p_s_sb)->j_trans_id);
2722 }
2723
2724 SB_JOURNAL(p_s_sb)->j_len_alloc -= (th->t_blocks_allocated - th->t_blocks_logged) ;
2725 if (atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)) > 0) { /* <= 0 is allowed. unmounting might not call begin */
2726 atomic_dec(&(SB_JOURNAL(p_s_sb)->j_wcount)) ;
2727 }
2728
2729 /* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released
2730 ** will be dealt with by next transaction that actually writes something, but should be taken
2731 ** care of in this trans
2732 */
2733 if (SB_JOURNAL(p_s_sb)->j_len == 0) {
2734 int wcount = atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)) ;
2735 unlock_journal(p_s_sb) ;
2736 if (atomic_read(&(SB_JOURNAL(p_s_sb)->j_jlock)) > 0 && wcount <= 0) {
2737 atomic_dec(&(SB_JOURNAL(p_s_sb)->j_jlock)) ;
2738 wake_up(&(SB_JOURNAL(p_s_sb)->j_join_wait)) ;
2739 }
2740 return 0 ;
2741 }
2742 /* if wcount > 0, and we are called to with flush or commit_now,
2743 ** we wait on j_join_wait. We will wake up when the last writer has
2744 ** finished the transaction, and started it on its way to the disk.
2745 ** Then, we flush the commit or journal list, and just return 0
2746 ** because the rest of journal end was already done for this transaction.
2747 */
2748 if (atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)) > 0) {
2749 if (flush || commit_now) {
2750 int orig_jindex = SB_JOURNAL_LIST_INDEX(p_s_sb) ;
2751 atomic_set(&(SB_JOURNAL(p_s_sb)->j_jlock), 1) ;
2752 if (flush) {
2753 SB_JOURNAL(p_s_sb)->j_next_full_flush = 1 ;
2754 }
2755 unlock_journal(p_s_sb) ;
2756 /* sleep while the current transaction is still j_jlocked */
2757 while(atomic_read(&(SB_JOURNAL(p_s_sb)->j_jlock)) &&
2758 SB_JOURNAL(p_s_sb)->j_trans_id == th->t_trans_id) {
2759 sleep_on(&(SB_JOURNAL(p_s_sb)->j_join_wait)) ;
2760 }
2761 if (commit_now) {
2762 if (wait_on_commit) {
2763 flush_commit_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + orig_jindex, 1) ;
2764 } else {
2765 commit_flush_async(p_s_sb, orig_jindex) ;
2766 }
2767 }
2768 return 0 ;
2769 }
2770 unlock_journal(p_s_sb) ;
2771 return 0 ;
2772 }
2773
2774 /* deal with old transactions where we are the last writers */
2775 now = CURRENT_TIME ;
2776 if ((now - SB_JOURNAL(p_s_sb)->j_trans_start_time) > SB_JOURNAL_MAX_TRANS_AGE(p_s_sb)) {
2777 commit_now = 1 ;
2778 SB_JOURNAL(p_s_sb)->j_next_async_flush = 1 ;
2779 }
2780 /* don't batch when someone is waiting on j_join_wait */
2781 /* don't batch when syncing the commit or flushing the whole trans */
2782 if (!(SB_JOURNAL(p_s_sb)->j_must_wait > 0) && !(atomic_read(&(SB_JOURNAL(p_s_sb)->j_jlock))) && !flush && !commit_now &&
2783 (SB_JOURNAL(p_s_sb)->j_len < SB_JOURNAL_MAX_BATCH(p_s_sb)) &&
2784 SB_JOURNAL(p_s_sb)->j_len_alloc < SB_JOURNAL_MAX_BATCH(p_s_sb) && SB_JOURNAL(p_s_sb)->j_cnode_free > (SB_JOURNAL_TRANS_MAX(p_s_sb) * 3)) {
2785 SB_JOURNAL(p_s_sb)->j_bcount++ ;
2786 unlock_journal(p_s_sb) ;
2787 return 0 ;
2788 }
2789
2790 if (SB_JOURNAL(p_s_sb)->j_start > SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
2791 reiserfs_panic(p_s_sb, "journal-003: journal_end: j_start (%ld) is too high\n", SB_JOURNAL(p_s_sb)->j_start) ;
2792 }
2793 return 1 ;
2794 }
2795
2796 /*
2797 ** Does all the work that makes deleting blocks safe.
2798 ** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on.
2799 **
2800 ** otherwise:
2801 ** set a bit for the block in the journal bitmap. That will prevent it from being allocated for unformatted nodes
2802 ** before this transaction has finished.
2803 **
2804 ** mark any cnodes for this block as BLOCK_FREED, and clear their bh pointers. That will prevent any old transactions with
2805 ** this block from trying to flush to the real location. Since we aren't removing the cnode from the journal_list_hash,
2806 ** the block can't be reallocated yet.
2807 **
2808 ** Then remove it from the current transaction, decrementing any counters and filing it on the clean list.
2809 */
journal_mark_freed(struct reiserfs_transaction_handle * th,struct super_block * p_s_sb,unsigned long blocknr)2810 int journal_mark_freed(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long blocknr) {
2811 struct reiserfs_journal_cnode *cn = NULL ;
2812 struct buffer_head *bh = NULL ;
2813 struct reiserfs_list_bitmap *jb = NULL ;
2814 int cleaned = 0 ;
2815
2816 if (reiserfs_dont_log(th->t_super)) {
2817 bh = sb_get_hash_table(p_s_sb, blocknr) ;
2818 if (bh && buffer_dirty (bh)) {
2819 reiserfs_warning (p_s_sb, "journal_mark_freed(dont_log): dirty buffer on hash list: %lx %ld\n", bh->b_state, blocknr);
2820 BUG ();
2821 }
2822 brelse (bh);
2823 return 0 ;
2824 }
2825 bh = sb_get_hash_table(p_s_sb, blocknr) ;
2826 /* if it is journal new, we just remove it from this transaction */
2827 if (bh && buffer_journal_new(bh)) {
2828 mark_buffer_notjournal_new(bh) ;
2829 clear_prepared_bits(bh) ;
2830 cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned) ;
2831 } else {
2832 /* set the bit for this block in the journal bitmap for this transaction */
2833 jb = SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_list_bitmap ;
2834 if (!jb) {
2835 reiserfs_panic(p_s_sb, "journal-1702: journal_mark_freed, journal_list_bitmap is NULL\n") ;
2836 }
2837 set_bit_in_list_bitmap(p_s_sb, blocknr, jb) ;
2838
2839 /* Note, the entire while loop is not allowed to schedule. */
2840
2841 if (bh) {
2842 clear_prepared_bits(bh) ;
2843 }
2844 cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned) ;
2845
2846 /* find all older transactions with this block, make sure they don't try to write it out */
2847 cn = get_journal_hash_dev(SB_JOURNAL(p_s_sb)->j_list_hash_table, p_s_sb->s_dev, blocknr, p_s_sb->s_blocksize) ;
2848 while (cn) {
2849 if (p_s_sb->s_dev == cn->dev && blocknr == cn->blocknr) {
2850 set_bit(BLOCK_FREED, &cn->state) ;
2851 if (cn->bh) {
2852 if (!cleaned) {
2853 /* remove_from_transaction will brelse the buffer if it was
2854 ** in the current trans
2855 */
2856 mark_buffer_notjournal_dirty(cn->bh) ;
2857 cleaned = 1 ;
2858 put_bh(cn->bh) ;
2859 if (atomic_read(&(cn->bh->b_count)) < 0) {
2860 reiserfs_warning(p_s_sb, "journal-2138: cn->bh->b_count < 0\n") ;
2861 }
2862 }
2863 if (cn->jlist) { /* since we are clearing the bh, we MUST dec nonzerolen */
2864 atomic_dec(&(cn->jlist->j_nonzerolen)) ;
2865 }
2866 cn->bh = NULL ;
2867 }
2868 }
2869 cn = cn->hnext ;
2870 }
2871 }
2872
2873 if (bh) {
2874 reiserfs_clean_and_file_buffer(bh) ;
2875 put_bh(bh) ; /* get_hash grabs the buffer */
2876 if (atomic_read(&(bh->b_count)) < 0) {
2877 reiserfs_warning(p_s_sb, "journal-2165: bh->b_count < 0\n") ;
2878 }
2879 }
2880 return 0 ;
2881 }
2882
reiserfs_update_inode_transaction(struct inode * inode)2883 void reiserfs_update_inode_transaction(struct inode *inode) {
2884
2885 inode->u.reiserfs_i.i_trans_index = SB_JOURNAL_LIST_INDEX(inode->i_sb);
2886
2887 inode->u.reiserfs_i.i_trans_id = SB_JOURNAL(inode->i_sb)->j_trans_id ;
2888 }
2889
reiserfs_update_tail_transaction(struct inode * inode)2890 void reiserfs_update_tail_transaction(struct inode *inode) {
2891
2892 inode->u.reiserfs_i.i_tail_trans_index = SB_JOURNAL_LIST_INDEX(inode->i_sb);
2893
2894 inode->u.reiserfs_i.i_tail_trans_id = SB_JOURNAL(inode->i_sb)->j_trans_id ;
2895 }
2896
__commit_trans_index(struct inode * inode,unsigned long id,unsigned long index)2897 static void __commit_trans_index(struct inode *inode, unsigned long id,
2898 unsigned long index)
2899 {
2900 struct reiserfs_journal_list *jl ;
2901 struct reiserfs_transaction_handle th ;
2902 struct super_block *sb = inode->i_sb ;
2903
2904 jl = SB_JOURNAL_LIST(sb) + index;
2905
2906 /* is it from the current transaction, or from an unknown transaction? */
2907 if (id == SB_JOURNAL(sb)->j_trans_id) {
2908 journal_join(&th, sb, 1) ;
2909 journal_end_sync(&th, sb, 1) ;
2910 } else if (jl->j_trans_id == id) {
2911 flush_commit_list(sb, jl, 1) ;
2912 }
2913 /* if the transaction id does not match, this list is long since flushed
2914 ** and we don't have to do anything here
2915 */
2916 }
reiserfs_commit_for_tail(struct inode * inode)2917 void reiserfs_commit_for_tail(struct inode *inode) {
2918 unsigned long id = inode->u.reiserfs_i.i_tail_trans_id;
2919 unsigned long index = inode->u.reiserfs_i.i_tail_trans_index;
2920
2921 /* for tails, if this info is unset there's nothing to commit */
2922 if (id && index)
2923 __commit_trans_index(inode, id, index);
2924 }
reiserfs_commit_for_inode(struct inode * inode)2925 void reiserfs_commit_for_inode(struct inode *inode) {
2926 unsigned long id = inode->u.reiserfs_i.i_trans_id;
2927 unsigned long index = inode->u.reiserfs_i.i_trans_index;
2928
2929 /* for the whole inode, assume unset id or index means it was
2930 * changed in the current transaction. More conservative
2931 */
2932 if (!id || !index)
2933 reiserfs_update_inode_transaction(inode) ;
2934
2935 __commit_trans_index(inode, id, index);
2936 }
2937
reiserfs_restore_prepared_buffer(struct super_block * p_s_sb,struct buffer_head * bh)2938 void reiserfs_restore_prepared_buffer(struct super_block *p_s_sb,
2939 struct buffer_head *bh) {
2940 PROC_INFO_INC( p_s_sb, journal.restore_prepared );
2941 if (reiserfs_dont_log (p_s_sb))
2942 return;
2943
2944 if (!bh) {
2945 return ;
2946 }
2947 clear_bit(BH_JPrepared, &bh->b_state) ;
2948 }
2949
2950 extern struct tree_balance *cur_tb ;
2951 /*
2952 ** before we can change a metadata block, we have to make sure it won't
2953 ** be written to disk while we are altering it. So, we must:
2954 ** clean it
2955 ** wait on it.
2956 **
2957 */
reiserfs_prepare_for_journal(struct super_block * p_s_sb,struct buffer_head * bh,int wait)2958 void reiserfs_prepare_for_journal(struct super_block *p_s_sb,
2959 struct buffer_head *bh, int wait) {
2960 int retry_count = 0 ;
2961
2962 PROC_INFO_INC( p_s_sb, journal.prepare );
2963 if (reiserfs_dont_log (p_s_sb))
2964 return;
2965
2966 while(!test_bit(BH_JPrepared, &bh->b_state) ||
2967 (wait && buffer_locked(bh))) {
2968 if (buffer_journaled(bh)) {
2969 set_bit(BH_JPrepared, &bh->b_state) ;
2970 return ;
2971 }
2972 set_bit(BH_JPrepared, &bh->b_state) ;
2973 if (wait) {
2974 RFALSE( buffer_locked(bh) && cur_tb != NULL,
2975 "waiting while do_balance was running\n") ;
2976 wait_on_buffer(bh) ;
2977 }
2978 PROC_INFO_INC( p_s_sb, journal.prepare_retry );
2979 retry_count++ ;
2980 }
2981 }
2982
2983 /*
2984 ** long and ugly. If flush, will not return until all commit
2985 ** blocks and all real buffers in the trans are on disk.
2986 ** If no_async, won't return until all commit blocks are on disk.
2987 **
2988 ** keep reading, there are comments as you go along
2989 */
do_journal_end(struct reiserfs_transaction_handle * th,struct super_block * p_s_sb,unsigned long nblocks,int flags)2990 static int do_journal_end(struct reiserfs_transaction_handle *th, struct super_block * p_s_sb, unsigned long nblocks,
2991 int flags) {
2992 struct reiserfs_journal_cnode *cn, *next, *jl_cn;
2993 struct reiserfs_journal_cnode *last_cn = NULL;
2994 struct reiserfs_journal_desc *desc ;
2995 struct reiserfs_journal_commit *commit ;
2996 struct buffer_head *c_bh ; /* commit bh */
2997 struct buffer_head *d_bh ; /* desc bh */
2998 int cur_write_start = 0 ; /* start index of current log write */
2999 int cur_blocks_left = 0 ; /* number of journal blocks left to write */
3000 int old_start ;
3001 int i ;
3002 int jindex ;
3003 int orig_jindex ;
3004 int flush = flags & FLUSH_ALL ;
3005 int commit_now = flags & COMMIT_NOW ;
3006 int wait_on_commit = flags & WAIT ;
3007 struct reiserfs_super_block *rs ;
3008
3009 if (reiserfs_dont_log(th->t_super)) {
3010 return 0 ;
3011 }
3012
3013 lock_journal(p_s_sb) ;
3014 if (SB_JOURNAL(p_s_sb)->j_next_full_flush) {
3015 flags |= FLUSH_ALL ;
3016 flush = 1 ;
3017 }
3018 if (SB_JOURNAL(p_s_sb)->j_next_async_flush) {
3019 flags |= COMMIT_NOW ;
3020 commit_now = 1 ;
3021 }
3022
3023 /* check_journal_end locks the journal, and unlocks if it does not return 1
3024 ** it tells us if we should continue with the journal_end, or just return
3025 */
3026 if (!check_journal_end(th, p_s_sb, nblocks, flags)) {
3027 return 0 ;
3028 }
3029
3030 /* check_journal_end might set these, check again */
3031 if (SB_JOURNAL(p_s_sb)->j_next_full_flush) {
3032 flush = 1 ;
3033 }
3034 if (SB_JOURNAL(p_s_sb)->j_next_async_flush) {
3035 commit_now = 1 ;
3036 }
3037 /*
3038 ** j must wait means we have to flush the log blocks, and the real blocks for
3039 ** this transaction
3040 */
3041 if (SB_JOURNAL(p_s_sb)->j_must_wait > 0) {
3042 flush = 1 ;
3043 }
3044
3045 #ifdef REISERFS_PREALLOCATE
3046 reiserfs_discard_all_prealloc(th); /* it should not involve new blocks into
3047 * the transaction */
3048 #endif
3049
3050 rs = SB_DISK_SUPER_BLOCK(p_s_sb) ;
3051 /* setup description block */
3052 d_bh = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_JOURNAL(p_s_sb)->j_start) ;
3053 mark_buffer_uptodate(d_bh, 1) ;
3054 desc = (struct reiserfs_journal_desc *)(d_bh)->b_data ;
3055 memset(desc, 0, sizeof(struct reiserfs_journal_desc)) ;
3056 memcpy(desc->j_magic, JOURNAL_DESC_MAGIC, 8) ;
3057 desc->j_trans_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_trans_id) ;
3058
3059 /* setup commit block. Don't write (keep it clean too) this one until after everyone else is written */
3060 c_bh = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
3061 ((SB_JOURNAL(p_s_sb)->j_start + SB_JOURNAL(p_s_sb)->j_len + 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb))) ;
3062 commit = (struct reiserfs_journal_commit *)c_bh->b_data ;
3063 memset(commit, 0, sizeof(struct reiserfs_journal_commit)) ;
3064 commit->j_trans_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_trans_id) ;
3065 mark_buffer_uptodate(c_bh, 1) ;
3066
3067 /* init this journal list */
3068 atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_older_commits_done), 0) ;
3069 SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_trans_id = SB_JOURNAL(p_s_sb)->j_trans_id ;
3070 SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_timestamp = SB_JOURNAL(p_s_sb)->j_trans_start_time ;
3071 SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_commit_bh = c_bh ;
3072 SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_start = SB_JOURNAL(p_s_sb)->j_start ;
3073 SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_len = SB_JOURNAL(p_s_sb)->j_len ;
3074 atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_nonzerolen), SB_JOURNAL(p_s_sb)->j_len) ;
3075 atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_commit_left), SB_JOURNAL(p_s_sb)->j_len + 2);
3076 SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_realblock = NULL ;
3077 atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_commit_flushing), 1) ;
3078 atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_flushing), 1) ;
3079
3080 /* which is faster, locking/unlocking at the start and end of the for
3081 ** or locking once per iteration around the insert_journal_hash?
3082 ** eitherway, we are write locking insert_journal_hash. The ENTIRE FOR
3083 ** LOOP MUST not cause schedule to occur.
3084 */
3085
3086 /* for each real block, add it to the journal list hash,
3087 ** copy into real block index array in the commit or desc block
3088 */
3089 for (i = 0, cn = SB_JOURNAL(p_s_sb)->j_first ; cn ; cn = cn->next, i++) {
3090 if (test_bit(BH_JDirty, &cn->bh->b_state) ) {
3091 jl_cn = get_cnode(p_s_sb) ;
3092 if (!jl_cn) {
3093 reiserfs_panic(p_s_sb, "journal-1676, get_cnode returned NULL\n") ;
3094 }
3095 if (i == 0) {
3096 SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_realblock = jl_cn ;
3097 }
3098 jl_cn->prev = last_cn ;
3099 jl_cn->next = NULL ;
3100 if (last_cn) {
3101 last_cn->next = jl_cn ;
3102 }
3103 last_cn = jl_cn ;
3104 /* make sure the block we are trying to log is not a block
3105 of journal or reserved area */
3106
3107 if (is_block_in_log_or_reserved_area(p_s_sb, cn->bh->b_blocknr)) {
3108 reiserfs_panic(p_s_sb, "journal-2332: Trying to log block %lu, which is a log block\n", cn->bh->b_blocknr) ;
3109 }
3110 jl_cn->blocknr = cn->bh->b_blocknr ;
3111 jl_cn->state = 0 ;
3112 jl_cn->dev = cn->bh->b_dev ;
3113 jl_cn->bh = cn->bh ;
3114 jl_cn->jlist = SB_JOURNAL_LIST(p_s_sb) + SB_JOURNAL_LIST_INDEX(p_s_sb) ;
3115 insert_journal_hash(SB_JOURNAL(p_s_sb)->j_list_hash_table, jl_cn) ;
3116 if (i < JOURNAL_TRANS_HALF) {
3117 desc->j_realblock[i] = cpu_to_le32(cn->bh->b_blocknr) ;
3118 } else {
3119 commit->j_realblock[i - JOURNAL_TRANS_HALF] = cpu_to_le32(cn->bh->b_blocknr) ;
3120 }
3121 } else {
3122 i-- ;
3123 }
3124 }
3125
3126 desc->j_len = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_len) ;
3127 desc->j_mount_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_mount_id) ;
3128 desc->j_trans_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_trans_id) ;
3129 commit->j_len = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_len) ;
3130
3131 /* special check in case all buffers in the journal were marked for not logging */
3132 if (SB_JOURNAL(p_s_sb)->j_len == 0) {
3133 brelse(d_bh) ;
3134 brelse(c_bh) ;
3135 unlock_journal(p_s_sb) ;
3136 reiserfs_warning(p_s_sb, "journal-2020: do_journal_end: BAD desc->j_len is ZERO\n") ;
3137 atomic_set(&(SB_JOURNAL(p_s_sb)->j_jlock), 0) ;
3138 wake_up(&(SB_JOURNAL(p_s_sb)->j_join_wait)) ;
3139 return 0 ;
3140 }
3141
3142 /* first data block is j_start + 1, so add one to cur_write_start wherever you use it */
3143 cur_write_start = SB_JOURNAL(p_s_sb)->j_start ;
3144 cur_blocks_left = SB_JOURNAL(p_s_sb)->j_len ;
3145 cn = SB_JOURNAL(p_s_sb)->j_first ;
3146 jindex = 1 ; /* start at one so we don't get the desc again */
3147 while(cur_blocks_left > 0) {
3148 /* copy all the real blocks into log area. dirty log blocks */
3149 if (test_bit(BH_JDirty, &cn->bh->b_state)) {
3150 struct buffer_head *tmp_bh ;
3151 tmp_bh = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
3152 ((cur_write_start + jindex) % SB_ONDISK_JOURNAL_SIZE(p_s_sb))) ;
3153 mark_buffer_uptodate(tmp_bh, 1) ;
3154 memcpy(tmp_bh->b_data, cn->bh->b_data, cn->bh->b_size) ;
3155 jindex++ ;
3156 } else {
3157 /* JDirty cleared sometime during transaction. don't log this one */
3158 reiserfs_warning(p_s_sb, "journal-2048: do_journal_end: BAD, buffer in journal hash, but not JDirty!\n") ;
3159 }
3160 cn = cn->next ;
3161 cur_blocks_left-- ;
3162 }
3163
3164 /* we are done with both the c_bh and d_bh, but
3165 ** c_bh must be written after all other commit blocks,
3166 ** so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1.
3167 */
3168
3169 /* now loop through and mark all buffers from this transaction as JDirty_wait
3170 ** clear the JDirty bit, clear BH_JNew too.
3171 ** if they weren't JDirty, they weren't logged, just relse them and move on
3172 */
3173 cn = SB_JOURNAL(p_s_sb)->j_first ;
3174 while(cn) {
3175 clear_bit(BH_JNew, &(cn->bh->b_state)) ;
3176 if (test_bit(BH_JDirty, &(cn->bh->b_state))) {
3177 set_bit(BH_JDirty_wait, &(cn->bh->b_state)) ;
3178 clear_bit(BH_JDirty, &(cn->bh->b_state)) ;
3179 } else {
3180 brelse(cn->bh) ;
3181 }
3182 next = cn->next ;
3183 free_cnode(p_s_sb, cn) ;
3184 cn = next ;
3185 }
3186
3187 /* unlock the journal list for committing and flushing */
3188 atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_commit_flushing), 0) ;
3189 atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_flushing), 0) ;
3190
3191 orig_jindex = SB_JOURNAL_LIST_INDEX(p_s_sb) ;
3192 jindex = (SB_JOURNAL_LIST_INDEX(p_s_sb) + 1) % JOURNAL_LIST_COUNT ;
3193 SB_JOURNAL_LIST_INDEX(p_s_sb) = jindex ;
3194
3195 /* write any buffers that must hit disk before this commit is done */
3196 fsync_buffers_list(&(SB_JOURNAL(p_s_sb)->j_dirty_buffers)) ;
3197
3198 /* honor the flush and async wishes from the caller */
3199 if (flush) {
3200
3201 flush_commit_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + orig_jindex, 1) ;
3202 flush_journal_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + orig_jindex , 1) ;
3203 } else if (commit_now) {
3204 if (wait_on_commit) {
3205 flush_commit_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + orig_jindex, 1) ;
3206 } else {
3207 commit_flush_async(p_s_sb, orig_jindex) ;
3208 }
3209 }
3210
3211 /* reset journal values for the next transaction */
3212 old_start = SB_JOURNAL(p_s_sb)->j_start ;
3213 SB_JOURNAL(p_s_sb)->j_start = (SB_JOURNAL(p_s_sb)->j_start + SB_JOURNAL(p_s_sb)->j_len + 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb);
3214 atomic_set(&(SB_JOURNAL(p_s_sb)->j_wcount), 0) ;
3215 SB_JOURNAL(p_s_sb)->j_bcount = 0 ;
3216 SB_JOURNAL(p_s_sb)->j_last = NULL ;
3217 SB_JOURNAL(p_s_sb)->j_first = NULL ;
3218 SB_JOURNAL(p_s_sb)->j_len = 0 ;
3219 SB_JOURNAL(p_s_sb)->j_trans_start_time = 0 ;
3220 SB_JOURNAL(p_s_sb)->j_trans_id++ ;
3221 SB_JOURNAL(p_s_sb)->j_must_wait = 0 ;
3222 SB_JOURNAL(p_s_sb)->j_len_alloc = 0 ;
3223 SB_JOURNAL(p_s_sb)->j_next_full_flush = 0 ;
3224 SB_JOURNAL(p_s_sb)->j_next_async_flush = 0 ;
3225 init_journal_hash(p_s_sb) ;
3226
3227 /* if the next transaction has any chance of wrapping, flush
3228 ** transactions that might get overwritten. If any journal lists are very
3229 ** old flush them as well.
3230 */
3231 for (i = 0 ; i < JOURNAL_LIST_COUNT ; i++) {
3232 jindex = i ;
3233 if (SB_JOURNAL_LIST(p_s_sb)[jindex].j_len > 0 && SB_JOURNAL(p_s_sb)->j_start <= SB_JOURNAL_LIST(p_s_sb)[jindex].j_start) {
3234 if ((SB_JOURNAL(p_s_sb)->j_start + SB_JOURNAL_TRANS_MAX(p_s_sb) + 1) >= SB_JOURNAL_LIST(p_s_sb)[jindex].j_start) {
3235 flush_journal_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + jindex, 1) ;
3236 }
3237 } else if (SB_JOURNAL_LIST(p_s_sb)[jindex].j_len > 0 &&
3238 (SB_JOURNAL(p_s_sb)->j_start + SB_JOURNAL_TRANS_MAX(p_s_sb) + 1) > SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
3239 if (((SB_JOURNAL(p_s_sb)->j_start + SB_JOURNAL_TRANS_MAX(p_s_sb) + 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)) >=
3240 SB_JOURNAL_LIST(p_s_sb)[jindex].j_start) {
3241 flush_journal_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + jindex, 1 ) ;
3242 }
3243 }
3244 /* this check should always be run, to send old lists to disk */
3245 if (SB_JOURNAL_LIST(p_s_sb)[jindex].j_len > 0 &&
3246 SB_JOURNAL_LIST(p_s_sb)[jindex].j_timestamp <
3247 (CURRENT_TIME - (SB_JOURNAL_MAX_TRANS_AGE(p_s_sb) * 4))) {
3248 flush_journal_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + jindex, 1 ) ;
3249 }
3250 }
3251
3252 /* if the next journal_list is still in use, flush it */
3253 if (SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_len != 0) {
3254 flush_journal_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + SB_JOURNAL_LIST_INDEX(p_s_sb), 1) ;
3255 }
3256
3257 /* we don't want anyone flushing the new transaction's list */
3258 atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_commit_flushing), 1) ;
3259 atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_flushing), 1) ;
3260 SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_list_bitmap = get_list_bitmap(p_s_sb, SB_JOURNAL_LIST(p_s_sb) +
3261 SB_JOURNAL_LIST_INDEX(p_s_sb)) ;
3262
3263 if (!(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_list_bitmap)) {
3264 reiserfs_panic(p_s_sb, "journal-1996: do_journal_end, could not get a list bitmap\n") ;
3265 }
3266 unlock_journal(p_s_sb) ;
3267 atomic_set(&(SB_JOURNAL(p_s_sb)->j_jlock), 0) ;
3268 /* wake up any body waiting to join. */
3269 wake_up(&(SB_JOURNAL(p_s_sb)->j_join_wait)) ;
3270 return 0 ;
3271 }
3272
3273
3274
3275