1 /*
2  * recovery.c - NILFS recovery logic
3  *
4  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  * Written by Ryusuke Konishi <ryusuke@osrg.net>
21  */
22 
23 #include <linux/buffer_head.h>
24 #include <linux/blkdev.h>
25 #include <linux/swap.h>
26 #include <linux/slab.h>
27 #include <linux/crc32.h>
28 #include "nilfs.h"
29 #include "segment.h"
30 #include "sufile.h"
31 #include "page.h"
32 #include "segbuf.h"
33 
34 /*
35  * Segment check result
36  */
37 enum {
38 	NILFS_SEG_VALID,
39 	NILFS_SEG_NO_SUPER_ROOT,
40 	NILFS_SEG_FAIL_IO,
41 	NILFS_SEG_FAIL_MAGIC,
42 	NILFS_SEG_FAIL_SEQ,
43 	NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT,
44 	NILFS_SEG_FAIL_CHECKSUM_FULL,
45 	NILFS_SEG_FAIL_CONSISTENCY,
46 };
47 
48 /* work structure for recovery */
49 struct nilfs_recovery_block {
50 	ino_t ino;		/* Inode number of the file that this block
51 				   belongs to */
52 	sector_t blocknr;	/* block number */
53 	__u64 vblocknr;		/* virtual block number */
54 	unsigned long blkoff;	/* File offset of the data block (per block) */
55 	struct list_head list;
56 };
57 
58 
nilfs_warn_segment_error(int err)59 static int nilfs_warn_segment_error(int err)
60 {
61 	switch (err) {
62 	case NILFS_SEG_FAIL_IO:
63 		printk(KERN_WARNING
64 		       "NILFS warning: I/O error on loading last segment\n");
65 		return -EIO;
66 	case NILFS_SEG_FAIL_MAGIC:
67 		printk(KERN_WARNING
68 		       "NILFS warning: Segment magic number invalid\n");
69 		break;
70 	case NILFS_SEG_FAIL_SEQ:
71 		printk(KERN_WARNING
72 		       "NILFS warning: Sequence number mismatch\n");
73 		break;
74 	case NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT:
75 		printk(KERN_WARNING
76 		       "NILFS warning: Checksum error in super root\n");
77 		break;
78 	case NILFS_SEG_FAIL_CHECKSUM_FULL:
79 		printk(KERN_WARNING
80 		       "NILFS warning: Checksum error in segment payload\n");
81 		break;
82 	case NILFS_SEG_FAIL_CONSISTENCY:
83 		printk(KERN_WARNING
84 		       "NILFS warning: Inconsistent segment\n");
85 		break;
86 	case NILFS_SEG_NO_SUPER_ROOT:
87 		printk(KERN_WARNING
88 		       "NILFS warning: No super root in the last segment\n");
89 		break;
90 	}
91 	return -EINVAL;
92 }
93 
94 /**
95  * nilfs_compute_checksum - compute checksum of blocks continuously
96  * @nilfs: nilfs object
97  * @bhs: buffer head of start block
98  * @sum: place to store result
99  * @offset: offset bytes in the first block
100  * @check_bytes: number of bytes to be checked
101  * @start: DBN of start block
102  * @nblock: number of blocks to be checked
103  */
nilfs_compute_checksum(struct the_nilfs * nilfs,struct buffer_head * bhs,u32 * sum,unsigned long offset,u64 check_bytes,sector_t start,unsigned long nblock)104 static int nilfs_compute_checksum(struct the_nilfs *nilfs,
105 				  struct buffer_head *bhs, u32 *sum,
106 				  unsigned long offset, u64 check_bytes,
107 				  sector_t start, unsigned long nblock)
108 {
109 	unsigned int blocksize = nilfs->ns_blocksize;
110 	unsigned long size;
111 	u32 crc;
112 
113 	BUG_ON(offset >= blocksize);
114 	check_bytes -= offset;
115 	size = min_t(u64, check_bytes, blocksize - offset);
116 	crc = crc32_le(nilfs->ns_crc_seed,
117 		       (unsigned char *)bhs->b_data + offset, size);
118 	if (--nblock > 0) {
119 		do {
120 			struct buffer_head *bh;
121 
122 			bh = __bread(nilfs->ns_bdev, ++start, blocksize);
123 			if (!bh)
124 				return -EIO;
125 			check_bytes -= size;
126 			size = min_t(u64, check_bytes, blocksize);
127 			crc = crc32_le(crc, bh->b_data, size);
128 			brelse(bh);
129 		} while (--nblock > 0);
130 	}
131 	*sum = crc;
132 	return 0;
133 }
134 
135 /**
136  * nilfs_read_super_root_block - read super root block
137  * @nilfs: nilfs object
138  * @sr_block: disk block number of the super root block
139  * @pbh: address of a buffer_head pointer to return super root buffer
140  * @check: CRC check flag
141  */
nilfs_read_super_root_block(struct the_nilfs * nilfs,sector_t sr_block,struct buffer_head ** pbh,int check)142 int nilfs_read_super_root_block(struct the_nilfs *nilfs, sector_t sr_block,
143 				struct buffer_head **pbh, int check)
144 {
145 	struct buffer_head *bh_sr;
146 	struct nilfs_super_root *sr;
147 	u32 crc;
148 	int ret;
149 
150 	*pbh = NULL;
151 	bh_sr = __bread(nilfs->ns_bdev, sr_block, nilfs->ns_blocksize);
152 	if (unlikely(!bh_sr)) {
153 		ret = NILFS_SEG_FAIL_IO;
154 		goto failed;
155 	}
156 
157 	sr = (struct nilfs_super_root *)bh_sr->b_data;
158 	if (check) {
159 		unsigned bytes = le16_to_cpu(sr->sr_bytes);
160 
161 		if (bytes == 0 || bytes > nilfs->ns_blocksize) {
162 			ret = NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT;
163 			goto failed_bh;
164 		}
165 		if (nilfs_compute_checksum(
166 			    nilfs, bh_sr, &crc, sizeof(sr->sr_sum), bytes,
167 			    sr_block, 1)) {
168 			ret = NILFS_SEG_FAIL_IO;
169 			goto failed_bh;
170 		}
171 		if (crc != le32_to_cpu(sr->sr_sum)) {
172 			ret = NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT;
173 			goto failed_bh;
174 		}
175 	}
176 	*pbh = bh_sr;
177 	return 0;
178 
179  failed_bh:
180 	brelse(bh_sr);
181 
182  failed:
183 	return nilfs_warn_segment_error(ret);
184 }
185 
186 /**
187  * nilfs_read_log_header - read summary header of the specified log
188  * @nilfs: nilfs object
189  * @start_blocknr: start block number of the log
190  * @sum: pointer to return segment summary structure
191  */
192 static struct buffer_head *
nilfs_read_log_header(struct the_nilfs * nilfs,sector_t start_blocknr,struct nilfs_segment_summary ** sum)193 nilfs_read_log_header(struct the_nilfs *nilfs, sector_t start_blocknr,
194 		      struct nilfs_segment_summary **sum)
195 {
196 	struct buffer_head *bh_sum;
197 
198 	bh_sum = __bread(nilfs->ns_bdev, start_blocknr, nilfs->ns_blocksize);
199 	if (bh_sum)
200 		*sum = (struct nilfs_segment_summary *)bh_sum->b_data;
201 	return bh_sum;
202 }
203 
204 /**
205  * nilfs_validate_log - verify consistency of log
206  * @nilfs: nilfs object
207  * @seg_seq: sequence number of segment
208  * @bh_sum: buffer head of summary block
209  * @sum: segment summary struct
210  */
nilfs_validate_log(struct the_nilfs * nilfs,u64 seg_seq,struct buffer_head * bh_sum,struct nilfs_segment_summary * sum)211 static int nilfs_validate_log(struct the_nilfs *nilfs, u64 seg_seq,
212 			      struct buffer_head *bh_sum,
213 			      struct nilfs_segment_summary *sum)
214 {
215 	unsigned long nblock;
216 	u32 crc;
217 	int ret;
218 
219 	ret = NILFS_SEG_FAIL_MAGIC;
220 	if (le32_to_cpu(sum->ss_magic) != NILFS_SEGSUM_MAGIC)
221 		goto out;
222 
223 	ret = NILFS_SEG_FAIL_SEQ;
224 	if (le64_to_cpu(sum->ss_seq) != seg_seq)
225 		goto out;
226 
227 	nblock = le32_to_cpu(sum->ss_nblocks);
228 	ret = NILFS_SEG_FAIL_CONSISTENCY;
229 	if (unlikely(nblock == 0 || nblock > nilfs->ns_blocks_per_segment))
230 		/* This limits the number of blocks read in the CRC check */
231 		goto out;
232 
233 	ret = NILFS_SEG_FAIL_IO;
234 	if (nilfs_compute_checksum(nilfs, bh_sum, &crc, sizeof(sum->ss_datasum),
235 				   ((u64)nblock << nilfs->ns_blocksize_bits),
236 				   bh_sum->b_blocknr, nblock))
237 		goto out;
238 
239 	ret = NILFS_SEG_FAIL_CHECKSUM_FULL;
240 	if (crc != le32_to_cpu(sum->ss_datasum))
241 		goto out;
242 	ret = 0;
243 out:
244 	return ret;
245 }
246 
247 /**
248  * nilfs_read_summary_info - read an item on summary blocks of a log
249  * @nilfs: nilfs object
250  * @pbh: the current buffer head on summary blocks [in, out]
251  * @offset: the current byte offset on summary blocks [in, out]
252  * @bytes: byte size of the item to be read
253  */
nilfs_read_summary_info(struct the_nilfs * nilfs,struct buffer_head ** pbh,unsigned int * offset,unsigned int bytes)254 static void *nilfs_read_summary_info(struct the_nilfs *nilfs,
255 				     struct buffer_head **pbh,
256 				     unsigned int *offset, unsigned int bytes)
257 {
258 	void *ptr;
259 	sector_t blocknr;
260 
261 	BUG_ON((*pbh)->b_size < *offset);
262 	if (bytes > (*pbh)->b_size - *offset) {
263 		blocknr = (*pbh)->b_blocknr;
264 		brelse(*pbh);
265 		*pbh = __bread(nilfs->ns_bdev, blocknr + 1,
266 			       nilfs->ns_blocksize);
267 		if (unlikely(!*pbh))
268 			return NULL;
269 		*offset = 0;
270 	}
271 	ptr = (*pbh)->b_data + *offset;
272 	*offset += bytes;
273 	return ptr;
274 }
275 
276 /**
277  * nilfs_skip_summary_info - skip items on summary blocks of a log
278  * @nilfs: nilfs object
279  * @pbh: the current buffer head on summary blocks [in, out]
280  * @offset: the current byte offset on summary blocks [in, out]
281  * @bytes: byte size of the item to be skipped
282  * @count: number of items to be skipped
283  */
nilfs_skip_summary_info(struct the_nilfs * nilfs,struct buffer_head ** pbh,unsigned int * offset,unsigned int bytes,unsigned long count)284 static void nilfs_skip_summary_info(struct the_nilfs *nilfs,
285 				    struct buffer_head **pbh,
286 				    unsigned int *offset, unsigned int bytes,
287 				    unsigned long count)
288 {
289 	unsigned int rest_item_in_current_block
290 		= ((*pbh)->b_size - *offset) / bytes;
291 
292 	if (count <= rest_item_in_current_block) {
293 		*offset += bytes * count;
294 	} else {
295 		sector_t blocknr = (*pbh)->b_blocknr;
296 		unsigned int nitem_per_block = (*pbh)->b_size / bytes;
297 		unsigned int bcnt;
298 
299 		count -= rest_item_in_current_block;
300 		bcnt = DIV_ROUND_UP(count, nitem_per_block);
301 		*offset = bytes * (count - (bcnt - 1) * nitem_per_block);
302 
303 		brelse(*pbh);
304 		*pbh = __bread(nilfs->ns_bdev, blocknr + bcnt,
305 			       nilfs->ns_blocksize);
306 	}
307 }
308 
309 /**
310  * nilfs_scan_dsync_log - get block information of a log written for data sync
311  * @nilfs: nilfs object
312  * @start_blocknr: start block number of the log
313  * @sum: log summary information
314  * @head: list head to add nilfs_recovery_block struct
315  */
nilfs_scan_dsync_log(struct the_nilfs * nilfs,sector_t start_blocknr,struct nilfs_segment_summary * sum,struct list_head * head)316 static int nilfs_scan_dsync_log(struct the_nilfs *nilfs, sector_t start_blocknr,
317 				struct nilfs_segment_summary *sum,
318 				struct list_head *head)
319 {
320 	struct buffer_head *bh;
321 	unsigned int offset;
322 	u32 nfinfo, sumbytes;
323 	sector_t blocknr;
324 	ino_t ino;
325 	int err = -EIO;
326 
327 	nfinfo = le32_to_cpu(sum->ss_nfinfo);
328 	if (!nfinfo)
329 		return 0;
330 
331 	sumbytes = le32_to_cpu(sum->ss_sumbytes);
332 	blocknr = start_blocknr + DIV_ROUND_UP(sumbytes, nilfs->ns_blocksize);
333 	bh = __bread(nilfs->ns_bdev, start_blocknr, nilfs->ns_blocksize);
334 	if (unlikely(!bh))
335 		goto out;
336 
337 	offset = le16_to_cpu(sum->ss_bytes);
338 	for (;;) {
339 		unsigned long nblocks, ndatablk, nnodeblk;
340 		struct nilfs_finfo *finfo;
341 
342 		finfo = nilfs_read_summary_info(nilfs, &bh, &offset,
343 						sizeof(*finfo));
344 		if (unlikely(!finfo))
345 			goto out;
346 
347 		ino = le64_to_cpu(finfo->fi_ino);
348 		nblocks = le32_to_cpu(finfo->fi_nblocks);
349 		ndatablk = le32_to_cpu(finfo->fi_ndatablk);
350 		nnodeblk = nblocks - ndatablk;
351 
352 		while (ndatablk-- > 0) {
353 			struct nilfs_recovery_block *rb;
354 			struct nilfs_binfo_v *binfo;
355 
356 			binfo = nilfs_read_summary_info(nilfs, &bh, &offset,
357 							sizeof(*binfo));
358 			if (unlikely(!binfo))
359 				goto out;
360 
361 			rb = kmalloc(sizeof(*rb), GFP_NOFS);
362 			if (unlikely(!rb)) {
363 				err = -ENOMEM;
364 				goto out;
365 			}
366 			rb->ino = ino;
367 			rb->blocknr = blocknr++;
368 			rb->vblocknr = le64_to_cpu(binfo->bi_vblocknr);
369 			rb->blkoff = le64_to_cpu(binfo->bi_blkoff);
370 			/* INIT_LIST_HEAD(&rb->list); */
371 			list_add_tail(&rb->list, head);
372 		}
373 		if (--nfinfo == 0)
374 			break;
375 		blocknr += nnodeblk; /* always 0 for data sync logs */
376 		nilfs_skip_summary_info(nilfs, &bh, &offset, sizeof(__le64),
377 					nnodeblk);
378 		if (unlikely(!bh))
379 			goto out;
380 	}
381 	err = 0;
382  out:
383 	brelse(bh);   /* brelse(NULL) is just ignored */
384 	return err;
385 }
386 
dispose_recovery_list(struct list_head * head)387 static void dispose_recovery_list(struct list_head *head)
388 {
389 	while (!list_empty(head)) {
390 		struct nilfs_recovery_block *rb
391 			= list_entry(head->next,
392 				     struct nilfs_recovery_block, list);
393 		list_del(&rb->list);
394 		kfree(rb);
395 	}
396 }
397 
398 struct nilfs_segment_entry {
399 	struct list_head	list;
400 	__u64			segnum;
401 };
402 
nilfs_segment_list_add(struct list_head * head,__u64 segnum)403 static int nilfs_segment_list_add(struct list_head *head, __u64 segnum)
404 {
405 	struct nilfs_segment_entry *ent = kmalloc(sizeof(*ent), GFP_NOFS);
406 
407 	if (unlikely(!ent))
408 		return -ENOMEM;
409 
410 	ent->segnum = segnum;
411 	INIT_LIST_HEAD(&ent->list);
412 	list_add_tail(&ent->list, head);
413 	return 0;
414 }
415 
nilfs_dispose_segment_list(struct list_head * head)416 void nilfs_dispose_segment_list(struct list_head *head)
417 {
418 	while (!list_empty(head)) {
419 		struct nilfs_segment_entry *ent
420 			= list_entry(head->next,
421 				     struct nilfs_segment_entry, list);
422 		list_del(&ent->list);
423 		kfree(ent);
424 	}
425 }
426 
nilfs_prepare_segment_for_recovery(struct the_nilfs * nilfs,struct super_block * sb,struct nilfs_recovery_info * ri)427 static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs,
428 					      struct super_block *sb,
429 					      struct nilfs_recovery_info *ri)
430 {
431 	struct list_head *head = &ri->ri_used_segments;
432 	struct nilfs_segment_entry *ent, *n;
433 	struct inode *sufile = nilfs->ns_sufile;
434 	__u64 segnum[4];
435 	int err;
436 	int i;
437 
438 	segnum[0] = nilfs->ns_segnum;
439 	segnum[1] = nilfs->ns_nextnum;
440 	segnum[2] = ri->ri_segnum;
441 	segnum[3] = ri->ri_nextnum;
442 
443 	/*
444 	 * Releasing the next segment of the latest super root.
445 	 * The next segment is invalidated by this recovery.
446 	 */
447 	err = nilfs_sufile_free(sufile, segnum[1]);
448 	if (unlikely(err))
449 		goto failed;
450 
451 	for (i = 1; i < 4; i++) {
452 		err = nilfs_segment_list_add(head, segnum[i]);
453 		if (unlikely(err))
454 			goto failed;
455 	}
456 
457 	/*
458 	 * Collecting segments written after the latest super root.
459 	 * These are marked dirty to avoid being reallocated in the next write.
460 	 */
461 	list_for_each_entry_safe(ent, n, head, list) {
462 		if (ent->segnum != segnum[0]) {
463 			err = nilfs_sufile_scrap(sufile, ent->segnum);
464 			if (unlikely(err))
465 				goto failed;
466 		}
467 		list_del(&ent->list);
468 		kfree(ent);
469 	}
470 
471 	/* Allocate new segments for recovery */
472 	err = nilfs_sufile_alloc(sufile, &segnum[0]);
473 	if (unlikely(err))
474 		goto failed;
475 
476 	nilfs->ns_pseg_offset = 0;
477 	nilfs->ns_seg_seq = ri->ri_seq + 2;
478 	nilfs->ns_nextnum = nilfs->ns_segnum = segnum[0];
479 
480  failed:
481 	/* No need to recover sufile because it will be destroyed on error */
482 	return err;
483 }
484 
nilfs_recovery_copy_block(struct the_nilfs * nilfs,struct nilfs_recovery_block * rb,struct page * page)485 static int nilfs_recovery_copy_block(struct the_nilfs *nilfs,
486 				     struct nilfs_recovery_block *rb,
487 				     struct page *page)
488 {
489 	struct buffer_head *bh_org;
490 	void *kaddr;
491 
492 	bh_org = __bread(nilfs->ns_bdev, rb->blocknr, nilfs->ns_blocksize);
493 	if (unlikely(!bh_org))
494 		return -EIO;
495 
496 	kaddr = kmap_atomic(page, KM_USER0);
497 	memcpy(kaddr + bh_offset(bh_org), bh_org->b_data, bh_org->b_size);
498 	kunmap_atomic(kaddr, KM_USER0);
499 	brelse(bh_org);
500 	return 0;
501 }
502 
nilfs_recover_dsync_blocks(struct the_nilfs * nilfs,struct super_block * sb,struct nilfs_root * root,struct list_head * head,unsigned long * nr_salvaged_blocks)503 static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
504 				      struct super_block *sb,
505 				      struct nilfs_root *root,
506 				      struct list_head *head,
507 				      unsigned long *nr_salvaged_blocks)
508 {
509 	struct inode *inode;
510 	struct nilfs_recovery_block *rb, *n;
511 	unsigned blocksize = nilfs->ns_blocksize;
512 	struct page *page;
513 	loff_t pos;
514 	int err = 0, err2 = 0;
515 
516 	list_for_each_entry_safe(rb, n, head, list) {
517 		inode = nilfs_iget(sb, root, rb->ino);
518 		if (IS_ERR(inode)) {
519 			err = PTR_ERR(inode);
520 			inode = NULL;
521 			goto failed_inode;
522 		}
523 
524 		pos = rb->blkoff << inode->i_blkbits;
525 		err = block_write_begin(inode->i_mapping, pos, blocksize,
526 					0, &page, nilfs_get_block);
527 		if (unlikely(err)) {
528 			loff_t isize = inode->i_size;
529 			if (pos + blocksize > isize)
530 				vmtruncate(inode, isize);
531 			goto failed_inode;
532 		}
533 
534 		err = nilfs_recovery_copy_block(nilfs, rb, page);
535 		if (unlikely(err))
536 			goto failed_page;
537 
538 		err = nilfs_set_file_dirty(inode, 1);
539 		if (unlikely(err))
540 			goto failed_page;
541 
542 		block_write_end(NULL, inode->i_mapping, pos, blocksize,
543 				blocksize, page, NULL);
544 
545 		unlock_page(page);
546 		page_cache_release(page);
547 
548 		(*nr_salvaged_blocks)++;
549 		goto next;
550 
551  failed_page:
552 		unlock_page(page);
553 		page_cache_release(page);
554 
555  failed_inode:
556 		printk(KERN_WARNING
557 		       "NILFS warning: error recovering data block "
558 		       "(err=%d, ino=%lu, block-offset=%llu)\n",
559 		       err, (unsigned long)rb->ino,
560 		       (unsigned long long)rb->blkoff);
561 		if (!err2)
562 			err2 = err;
563  next:
564 		iput(inode); /* iput(NULL) is just ignored */
565 		list_del_init(&rb->list);
566 		kfree(rb);
567 	}
568 	return err2;
569 }
570 
571 /**
572  * nilfs_do_roll_forward - salvage logical segments newer than the latest
573  * checkpoint
574  * @nilfs: nilfs object
575  * @sb: super block instance
576  * @ri: pointer to a nilfs_recovery_info
577  */
nilfs_do_roll_forward(struct the_nilfs * nilfs,struct super_block * sb,struct nilfs_root * root,struct nilfs_recovery_info * ri)578 static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
579 				 struct super_block *sb,
580 				 struct nilfs_root *root,
581 				 struct nilfs_recovery_info *ri)
582 {
583 	struct buffer_head *bh_sum = NULL;
584 	struct nilfs_segment_summary *sum;
585 	sector_t pseg_start;
586 	sector_t seg_start, seg_end;  /* Starting/ending DBN of full segment */
587 	unsigned long nsalvaged_blocks = 0;
588 	unsigned int flags;
589 	u64 seg_seq;
590 	__u64 segnum, nextnum = 0;
591 	int empty_seg = 0;
592 	int err = 0, ret;
593 	LIST_HEAD(dsync_blocks);  /* list of data blocks to be recovered */
594 	enum {
595 		RF_INIT_ST,
596 		RF_DSYNC_ST,   /* scanning data-sync segments */
597 	};
598 	int state = RF_INIT_ST;
599 
600 	pseg_start = ri->ri_lsegs_start;
601 	seg_seq = ri->ri_lsegs_start_seq;
602 	segnum = nilfs_get_segnum_of_block(nilfs, pseg_start);
603 	nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end);
604 
605 	while (segnum != ri->ri_segnum || pseg_start <= ri->ri_pseg_start) {
606 		brelse(bh_sum);
607 		bh_sum = nilfs_read_log_header(nilfs, pseg_start, &sum);
608 		if (!bh_sum) {
609 			err = -EIO;
610 			goto failed;
611 		}
612 
613 		ret = nilfs_validate_log(nilfs, seg_seq, bh_sum, sum);
614 		if (ret) {
615 			if (ret == NILFS_SEG_FAIL_IO) {
616 				err = -EIO;
617 				goto failed;
618 			}
619 			goto strayed;
620 		}
621 
622 		flags = le16_to_cpu(sum->ss_flags);
623 		if (flags & NILFS_SS_SR)
624 			goto confused;
625 
626 		/* Found a valid partial segment; do recovery actions */
627 		nextnum = nilfs_get_segnum_of_block(nilfs,
628 						    le64_to_cpu(sum->ss_next));
629 		empty_seg = 0;
630 		nilfs->ns_ctime = le64_to_cpu(sum->ss_create);
631 		if (!(flags & NILFS_SS_GC))
632 			nilfs->ns_nongc_ctime = nilfs->ns_ctime;
633 
634 		switch (state) {
635 		case RF_INIT_ST:
636 			if (!(flags & NILFS_SS_LOGBGN) ||
637 			    !(flags & NILFS_SS_SYNDT))
638 				goto try_next_pseg;
639 			state = RF_DSYNC_ST;
640 			/* Fall through */
641 		case RF_DSYNC_ST:
642 			if (!(flags & NILFS_SS_SYNDT))
643 				goto confused;
644 
645 			err = nilfs_scan_dsync_log(nilfs, pseg_start, sum,
646 						   &dsync_blocks);
647 			if (unlikely(err))
648 				goto failed;
649 			if (flags & NILFS_SS_LOGEND) {
650 				err = nilfs_recover_dsync_blocks(
651 					nilfs, sb, root, &dsync_blocks,
652 					&nsalvaged_blocks);
653 				if (unlikely(err))
654 					goto failed;
655 				state = RF_INIT_ST;
656 			}
657 			break; /* Fall through to try_next_pseg */
658 		}
659 
660  try_next_pseg:
661 		if (pseg_start == ri->ri_lsegs_end)
662 			break;
663 		pseg_start += le32_to_cpu(sum->ss_nblocks);
664 		if (pseg_start < seg_end)
665 			continue;
666 		goto feed_segment;
667 
668  strayed:
669 		if (pseg_start == ri->ri_lsegs_end)
670 			break;
671 
672  feed_segment:
673 		/* Looking to the next full segment */
674 		if (empty_seg++)
675 			break;
676 		seg_seq++;
677 		segnum = nextnum;
678 		nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end);
679 		pseg_start = seg_start;
680 	}
681 
682 	if (nsalvaged_blocks) {
683 		printk(KERN_INFO "NILFS (device %s): salvaged %lu blocks\n",
684 		       sb->s_id, nsalvaged_blocks);
685 		ri->ri_need_recovery = NILFS_RECOVERY_ROLLFORWARD_DONE;
686 	}
687  out:
688 	brelse(bh_sum);
689 	dispose_recovery_list(&dsync_blocks);
690 	return err;
691 
692  confused:
693 	err = -EINVAL;
694  failed:
695 	printk(KERN_ERR
696 	       "NILFS (device %s): Error roll-forwarding "
697 	       "(err=%d, pseg block=%llu). ",
698 	       sb->s_id, err, (unsigned long long)pseg_start);
699 	goto out;
700 }
701 
nilfs_finish_roll_forward(struct the_nilfs * nilfs,struct nilfs_recovery_info * ri)702 static void nilfs_finish_roll_forward(struct the_nilfs *nilfs,
703 				      struct nilfs_recovery_info *ri)
704 {
705 	struct buffer_head *bh;
706 	int err;
707 
708 	if (nilfs_get_segnum_of_block(nilfs, ri->ri_lsegs_start) !=
709 	    nilfs_get_segnum_of_block(nilfs, ri->ri_super_root))
710 		return;
711 
712 	bh = __getblk(nilfs->ns_bdev, ri->ri_lsegs_start, nilfs->ns_blocksize);
713 	BUG_ON(!bh);
714 	memset(bh->b_data, 0, bh->b_size);
715 	set_buffer_dirty(bh);
716 	err = sync_dirty_buffer(bh);
717 	if (unlikely(err))
718 		printk(KERN_WARNING
719 		       "NILFS warning: buffer sync write failed during "
720 		       "post-cleaning of recovery.\n");
721 	brelse(bh);
722 }
723 
724 /**
725  * nilfs_salvage_orphan_logs - salvage logs written after the latest checkpoint
726  * @nilfs: nilfs object
727  * @sb: super block instance
728  * @ri: pointer to a nilfs_recovery_info struct to store search results.
729  *
730  * Return Value: On success, 0 is returned.  On error, one of the following
731  * negative error code is returned.
732  *
733  * %-EINVAL - Inconsistent filesystem state.
734  *
735  * %-EIO - I/O error
736  *
737  * %-ENOSPC - No space left on device (only in a panic state).
738  *
739  * %-ERESTARTSYS - Interrupted.
740  *
741  * %-ENOMEM - Insufficient memory available.
742  */
nilfs_salvage_orphan_logs(struct the_nilfs * nilfs,struct super_block * sb,struct nilfs_recovery_info * ri)743 int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs,
744 			      struct super_block *sb,
745 			      struct nilfs_recovery_info *ri)
746 {
747 	struct nilfs_root *root;
748 	int err;
749 
750 	if (ri->ri_lsegs_start == 0 || ri->ri_lsegs_end == 0)
751 		return 0;
752 
753 	err = nilfs_attach_checkpoint(sb, ri->ri_cno, true, &root);
754 	if (unlikely(err)) {
755 		printk(KERN_ERR
756 		       "NILFS: error loading the latest checkpoint.\n");
757 		return err;
758 	}
759 
760 	err = nilfs_do_roll_forward(nilfs, sb, root, ri);
761 	if (unlikely(err))
762 		goto failed;
763 
764 	if (ri->ri_need_recovery == NILFS_RECOVERY_ROLLFORWARD_DONE) {
765 		err = nilfs_prepare_segment_for_recovery(nilfs, sb, ri);
766 		if (unlikely(err)) {
767 			printk(KERN_ERR "NILFS: Error preparing segments for "
768 			       "recovery.\n");
769 			goto failed;
770 		}
771 
772 		err = nilfs_attach_log_writer(sb, root);
773 		if (unlikely(err))
774 			goto failed;
775 
776 		set_nilfs_discontinued(nilfs);
777 		err = nilfs_construct_segment(sb);
778 		nilfs_detach_log_writer(sb);
779 
780 		if (unlikely(err)) {
781 			printk(KERN_ERR "NILFS: Oops! recovery failed. "
782 			       "(err=%d)\n", err);
783 			goto failed;
784 		}
785 
786 		nilfs_finish_roll_forward(nilfs, ri);
787 	}
788 
789  failed:
790 	nilfs_put_root(root);
791 	return err;
792 }
793 
794 /**
795  * nilfs_search_super_root - search the latest valid super root
796  * @nilfs: the_nilfs
797  * @ri: pointer to a nilfs_recovery_info struct to store search results.
798  *
799  * nilfs_search_super_root() looks for the latest super-root from a partial
800  * segment pointed by the superblock.  It sets up struct the_nilfs through
801  * this search. It fills nilfs_recovery_info (ri) required for recovery.
802  *
803  * Return Value: On success, 0 is returned.  On error, one of the following
804  * negative error code is returned.
805  *
806  * %-EINVAL - No valid segment found
807  *
808  * %-EIO - I/O error
809  *
810  * %-ENOMEM - Insufficient memory available.
811  */
nilfs_search_super_root(struct the_nilfs * nilfs,struct nilfs_recovery_info * ri)812 int nilfs_search_super_root(struct the_nilfs *nilfs,
813 			    struct nilfs_recovery_info *ri)
814 {
815 	struct buffer_head *bh_sum = NULL;
816 	struct nilfs_segment_summary *sum;
817 	sector_t pseg_start, pseg_end, sr_pseg_start = 0;
818 	sector_t seg_start, seg_end; /* range of full segment (block number) */
819 	sector_t b, end;
820 	unsigned long nblocks;
821 	unsigned int flags;
822 	u64 seg_seq;
823 	__u64 segnum, nextnum = 0;
824 	__u64 cno;
825 	LIST_HEAD(segments);
826 	int empty_seg = 0, scan_newer = 0;
827 	int ret;
828 
829 	pseg_start = nilfs->ns_last_pseg;
830 	seg_seq = nilfs->ns_last_seq;
831 	cno = nilfs->ns_last_cno;
832 	segnum = nilfs_get_segnum_of_block(nilfs, pseg_start);
833 
834 	/* Calculate range of segment */
835 	nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end);
836 
837 	/* Read ahead segment */
838 	b = seg_start;
839 	while (b <= seg_end)
840 		__breadahead(nilfs->ns_bdev, b++, nilfs->ns_blocksize);
841 
842 	for (;;) {
843 		brelse(bh_sum);
844 		ret = NILFS_SEG_FAIL_IO;
845 		bh_sum = nilfs_read_log_header(nilfs, pseg_start, &sum);
846 		if (!bh_sum)
847 			goto failed;
848 
849 		ret = nilfs_validate_log(nilfs, seg_seq, bh_sum, sum);
850 		if (ret) {
851 			if (ret == NILFS_SEG_FAIL_IO)
852 				goto failed;
853 			goto strayed;
854 		}
855 
856 		nblocks = le32_to_cpu(sum->ss_nblocks);
857 		pseg_end = pseg_start + nblocks - 1;
858 		if (unlikely(pseg_end > seg_end)) {
859 			ret = NILFS_SEG_FAIL_CONSISTENCY;
860 			goto strayed;
861 		}
862 
863 		/* A valid partial segment */
864 		ri->ri_pseg_start = pseg_start;
865 		ri->ri_seq = seg_seq;
866 		ri->ri_segnum = segnum;
867 		nextnum = nilfs_get_segnum_of_block(nilfs,
868 						    le64_to_cpu(sum->ss_next));
869 		ri->ri_nextnum = nextnum;
870 		empty_seg = 0;
871 
872 		flags = le16_to_cpu(sum->ss_flags);
873 		if (!(flags & NILFS_SS_SR) && !scan_newer) {
874 			/* This will never happen because a superblock
875 			   (last_segment) always points to a pseg
876 			   having a super root. */
877 			ret = NILFS_SEG_FAIL_CONSISTENCY;
878 			goto failed;
879 		}
880 
881 		if (pseg_start == seg_start) {
882 			nilfs_get_segment_range(nilfs, nextnum, &b, &end);
883 			while (b <= end)
884 				__breadahead(nilfs->ns_bdev, b++,
885 					     nilfs->ns_blocksize);
886 		}
887 		if (!(flags & NILFS_SS_SR)) {
888 			if (!ri->ri_lsegs_start && (flags & NILFS_SS_LOGBGN)) {
889 				ri->ri_lsegs_start = pseg_start;
890 				ri->ri_lsegs_start_seq = seg_seq;
891 			}
892 			if (flags & NILFS_SS_LOGEND)
893 				ri->ri_lsegs_end = pseg_start;
894 			goto try_next_pseg;
895 		}
896 
897 		/* A valid super root was found. */
898 		ri->ri_cno = cno++;
899 		ri->ri_super_root = pseg_end;
900 		ri->ri_lsegs_start = ri->ri_lsegs_end = 0;
901 
902 		nilfs_dispose_segment_list(&segments);
903 		sr_pseg_start = pseg_start;
904 		nilfs->ns_pseg_offset = pseg_start + nblocks - seg_start;
905 		nilfs->ns_seg_seq = seg_seq;
906 		nilfs->ns_segnum = segnum;
907 		nilfs->ns_cno = cno;  /* nilfs->ns_cno = ri->ri_cno + 1 */
908 		nilfs->ns_ctime = le64_to_cpu(sum->ss_create);
909 		nilfs->ns_nextnum = nextnum;
910 
911 		if (scan_newer)
912 			ri->ri_need_recovery = NILFS_RECOVERY_SR_UPDATED;
913 		else {
914 			if (nilfs->ns_mount_state & NILFS_VALID_FS)
915 				goto super_root_found;
916 			scan_newer = 1;
917 		}
918 
919  try_next_pseg:
920 		/* Standing on a course, or met an inconsistent state */
921 		pseg_start += nblocks;
922 		if (pseg_start < seg_end)
923 			continue;
924 		goto feed_segment;
925 
926  strayed:
927 		/* Off the trail */
928 		if (!scan_newer)
929 			/*
930 			 * This can happen if a checkpoint was written without
931 			 * barriers, or as a result of an I/O failure.
932 			 */
933 			goto failed;
934 
935  feed_segment:
936 		/* Looking to the next full segment */
937 		if (empty_seg++)
938 			goto super_root_found; /* found a valid super root */
939 
940 		ret = nilfs_segment_list_add(&segments, segnum);
941 		if (unlikely(ret))
942 			goto failed;
943 
944 		seg_seq++;
945 		segnum = nextnum;
946 		nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end);
947 		pseg_start = seg_start;
948 	}
949 
950  super_root_found:
951 	/* Updating pointers relating to the latest checkpoint */
952 	brelse(bh_sum);
953 	list_splice_tail(&segments, &ri->ri_used_segments);
954 	nilfs->ns_last_pseg = sr_pseg_start;
955 	nilfs->ns_last_seq = nilfs->ns_seg_seq;
956 	nilfs->ns_last_cno = ri->ri_cno;
957 	return 0;
958 
959  failed:
960 	brelse(bh_sum);
961 	nilfs_dispose_segment_list(&segments);
962 	return (ret < 0) ? ret : nilfs_warn_segment_error(ret);
963 }
964