1 /*
2  *  linux/fs/ext2/balloc.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  Enhanced block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
10  *  Big-endian to little-endian byte-swapping/bitmaps by
11  *        David S. Miller (davem@caip.rutgers.edu), 1995
12  */
13 
14 #include <linux/config.h>
15 #include <linux/fs.h>
16 #include <linux/ext2_fs.h>
17 #include <linux/locks.h>
18 #include <linux/quotaops.h>
19 
20 /*
21  * balloc.c contains the blocks allocation and deallocation routines
22  */
23 
24 /*
25  * The free blocks are managed by bitmaps.  A file system contains several
26  * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
27  * block for inodes, N blocks for the inode table and data blocks.
28  *
29  * The file system contains group descriptors which are located after the
30  * super block.  Each descriptor contains the number of the bitmap block and
31  * the free blocks count in the block.  The descriptors are loaded in memory
32  * when a file system is mounted (see ext2_read_super).
33  */
34 
35 
36 #define in_range(b, first, len)		((b) >= (first) && (b) <= (first) + (len) - 1)
37 
ext2_get_group_desc(struct super_block * sb,unsigned int block_group,struct buffer_head ** bh)38 struct ext2_group_desc * ext2_get_group_desc(struct super_block * sb,
39 					     unsigned int block_group,
40 					     struct buffer_head ** bh)
41 {
42 	unsigned long group_desc;
43 	unsigned long desc;
44 	struct ext2_group_desc * gdp;
45 
46 	if (block_group >= sb->u.ext2_sb.s_groups_count) {
47 		ext2_error (sb, "ext2_get_group_desc",
48 			    "block_group >= groups_count - "
49 			    "block_group = %d, groups_count = %lu",
50 			    block_group, sb->u.ext2_sb.s_groups_count);
51 
52 		return NULL;
53 	}
54 
55 	group_desc = block_group / EXT2_DESC_PER_BLOCK(sb);
56 	desc = block_group % EXT2_DESC_PER_BLOCK(sb);
57 	if (!sb->u.ext2_sb.s_group_desc[group_desc]) {
58 		ext2_error (sb, "ext2_get_group_desc",
59 			    "Group descriptor not loaded - "
60 			    "block_group = %d, group_desc = %lu, desc = %lu",
61 			     block_group, group_desc, desc);
62 		return NULL;
63 	}
64 
65 	gdp = (struct ext2_group_desc *)
66 	      sb->u.ext2_sb.s_group_desc[group_desc]->b_data;
67 	if (bh)
68 		*bh = sb->u.ext2_sb.s_group_desc[group_desc];
69 	return gdp + desc;
70 }
71 
72 /*
73  * Read the bitmap for a given block_group, reading into the specified
74  * slot in the superblock's bitmap cache.
75  *
76  * Return >=0 on success or a -ve error code.
77  */
78 
read_block_bitmap(struct super_block * sb,unsigned int block_group,unsigned long bitmap_nr)79 static int read_block_bitmap (struct super_block * sb,
80 			       unsigned int block_group,
81 			       unsigned long bitmap_nr)
82 {
83 	struct ext2_group_desc * gdp;
84 	struct buffer_head * bh = NULL;
85 	int retval = -EIO;
86 
87 	gdp = ext2_get_group_desc (sb, block_group, NULL);
88 	if (!gdp)
89 		goto error_out;
90 	retval = 0;
91 	bh = sb_bread(sb, le32_to_cpu(gdp->bg_block_bitmap));
92 	if (!bh) {
93 		ext2_error (sb, "read_block_bitmap",
94 			    "Cannot read block bitmap - "
95 			    "block_group = %d, block_bitmap = %lu",
96 			    block_group, (unsigned long) gdp->bg_block_bitmap);
97 		retval = -EIO;
98 	}
99 	/*
100 	 * On IO error, just leave a zero in the superblock's block pointer for
101 	 * this group.  The IO will be retried next time.
102 	 */
103 error_out:
104 	sb->u.ext2_sb.s_block_bitmap_number[bitmap_nr] = block_group;
105 	sb->u.ext2_sb.s_block_bitmap[bitmap_nr] = bh;
106 	return retval;
107 }
108 
109 /*
110  * load_block_bitmap loads the block bitmap for a blocks group
111  *
112  * It maintains a cache for the last bitmaps loaded.  This cache is managed
113  * with a LRU algorithm.
114  *
115  * Notes:
116  * 1/ There is one cache per mounted file system.
117  * 2/ If the file system contains less than EXT2_MAX_GROUP_LOADED groups,
118  *    this function reads the bitmap without maintaining a LRU cache.
119  *
120  * Return the slot used to store the bitmap, or a -ve error code.
121  */
__load_block_bitmap(struct super_block * sb,unsigned int block_group)122 static int __load_block_bitmap (struct super_block * sb,
123 			        unsigned int block_group)
124 {
125 	int i, j, retval = 0;
126 	unsigned long block_bitmap_number;
127 	struct buffer_head * block_bitmap;
128 
129 	if (block_group >= sb->u.ext2_sb.s_groups_count)
130 		ext2_panic (sb, "load_block_bitmap",
131 			    "block_group >= groups_count - "
132 			    "block_group = %d, groups_count = %lu",
133 			    block_group, sb->u.ext2_sb.s_groups_count);
134 
135 	if (sb->u.ext2_sb.s_groups_count <= EXT2_MAX_GROUP_LOADED) {
136 		if (sb->u.ext2_sb.s_block_bitmap[block_group]) {
137 			if (sb->u.ext2_sb.s_block_bitmap_number[block_group] ==
138 			    block_group)
139 				return block_group;
140 			ext2_error (sb, "__load_block_bitmap",
141 				    "block_group != block_bitmap_number");
142 		}
143 		retval = read_block_bitmap (sb, block_group, block_group);
144 		if (retval < 0)
145 			return retval;
146 		return block_group;
147 	}
148 
149 	for (i = 0; i < sb->u.ext2_sb.s_loaded_block_bitmaps &&
150 		    sb->u.ext2_sb.s_block_bitmap_number[i] != block_group; i++)
151 		;
152 	if (i < sb->u.ext2_sb.s_loaded_block_bitmaps &&
153   	    sb->u.ext2_sb.s_block_bitmap_number[i] == block_group) {
154 		block_bitmap_number = sb->u.ext2_sb.s_block_bitmap_number[i];
155 		block_bitmap = sb->u.ext2_sb.s_block_bitmap[i];
156 		for (j = i; j > 0; j--) {
157 			sb->u.ext2_sb.s_block_bitmap_number[j] =
158 				sb->u.ext2_sb.s_block_bitmap_number[j - 1];
159 			sb->u.ext2_sb.s_block_bitmap[j] =
160 				sb->u.ext2_sb.s_block_bitmap[j - 1];
161 		}
162 		sb->u.ext2_sb.s_block_bitmap_number[0] = block_bitmap_number;
163 		sb->u.ext2_sb.s_block_bitmap[0] = block_bitmap;
164 
165 		/*
166 		 * There's still one special case here --- if block_bitmap == 0
167 		 * then our last attempt to read the bitmap failed and we have
168 		 * just ended up caching that failure.  Try again to read it.
169 		 */
170 		if (!block_bitmap)
171 			retval = read_block_bitmap (sb, block_group, 0);
172 	} else {
173 		if (sb->u.ext2_sb.s_loaded_block_bitmaps < EXT2_MAX_GROUP_LOADED)
174 			sb->u.ext2_sb.s_loaded_block_bitmaps++;
175 		else
176 			brelse (sb->u.ext2_sb.s_block_bitmap[EXT2_MAX_GROUP_LOADED - 1]);
177 		for (j = sb->u.ext2_sb.s_loaded_block_bitmaps - 1; j > 0; j--) {
178 			sb->u.ext2_sb.s_block_bitmap_number[j] =
179 				sb->u.ext2_sb.s_block_bitmap_number[j - 1];
180 			sb->u.ext2_sb.s_block_bitmap[j] =
181 				sb->u.ext2_sb.s_block_bitmap[j - 1];
182 		}
183 		retval = read_block_bitmap (sb, block_group, 0);
184 	}
185 	return retval;
186 }
187 
188 /*
189  * Load the block bitmap for a given block group.  First of all do a couple
190  * of fast lookups for common cases and then pass the request onto the guts
191  * of the bitmap loader.
192  *
193  * Return the slot number of the group in the superblock bitmap cache's on
194  * success, or a -ve error code.
195  *
196  * There is still one inconsistency here --- if the number of groups in this
197  * filesystems is <= EXT2_MAX_GROUP_LOADED, then we have no way of
198  * differentiating between a group for which we have never performed a bitmap
199  * IO request, and a group for which the last bitmap read request failed.
200  */
load_block_bitmap(struct super_block * sb,unsigned int block_group)201 static inline int load_block_bitmap (struct super_block * sb,
202 				     unsigned int block_group)
203 {
204 	int slot;
205 
206 	/*
207 	 * Do the lookup for the slot.  First of all, check if we're asking
208 	 * for the same slot as last time, and did we succeed that last time?
209 	 */
210 	if (sb->u.ext2_sb.s_loaded_block_bitmaps > 0 &&
211 	    sb->u.ext2_sb.s_block_bitmap_number[0] == block_group &&
212 	    sb->u.ext2_sb.s_block_bitmap[0]) {
213 		return 0;
214 	}
215 	/*
216 	 * Or can we do a fast lookup based on a loaded group on a filesystem
217 	 * small enough to be mapped directly into the superblock?
218 	 */
219 	else if (sb->u.ext2_sb.s_groups_count <= EXT2_MAX_GROUP_LOADED &&
220 		 sb->u.ext2_sb.s_block_bitmap_number[block_group] == block_group &&
221 		 sb->u.ext2_sb.s_block_bitmap[block_group]) {
222 		slot = block_group;
223 	}
224 	/*
225 	 * If not, then do a full lookup for this block group.
226 	 */
227 	else {
228 		slot = __load_block_bitmap (sb, block_group);
229 	}
230 
231 	/*
232 	 * <0 means we just got an error
233 	 */
234 	if (slot < 0)
235 		return slot;
236 
237 	/*
238 	 * If it's a valid slot, we may still have cached a previous IO error,
239 	 * in which case the bh in the superblock cache will be zero.
240 	 */
241 	if (!sb->u.ext2_sb.s_block_bitmap[slot])
242 		return -EIO;
243 
244 	/*
245 	 * Must have been read in OK to get this far.
246 	 */
247 	return slot;
248 }
249 
250 /* Free given blocks, update quota and i_blocks field */
ext2_free_blocks(struct inode * inode,unsigned long block,unsigned long count)251 void ext2_free_blocks (struct inode * inode, unsigned long block,
252 		       unsigned long count)
253 {
254 	struct buffer_head * bh;
255 	struct buffer_head * bh2;
256 	unsigned long block_group;
257 	unsigned long bit;
258 	unsigned long i;
259 	int bitmap_nr;
260 	unsigned long overflow;
261 	struct super_block * sb;
262 	struct ext2_group_desc * gdp;
263 	struct ext2_super_block * es;
264 
265 	sb = inode->i_sb;
266 	if (!sb) {
267 		printk ("ext2_free_blocks: nonexistent device");
268 		return;
269 	}
270 	lock_super (sb);
271 	es = sb->u.ext2_sb.s_es;
272 	if (block < le32_to_cpu(es->s_first_data_block) ||
273 	    block + count < block ||
274 	    (block + count) > le32_to_cpu(es->s_blocks_count)) {
275 		ext2_error (sb, "ext2_free_blocks",
276 			    "Freeing blocks not in datazone - "
277 			    "block = %lu, count = %lu", block, count);
278 		goto error_return;
279 	}
280 
281 	ext2_debug ("freeing block(s) %lu-%lu\n", block, block + count - 1);
282 
283 do_more:
284 	overflow = 0;
285 	block_group = (block - le32_to_cpu(es->s_first_data_block)) /
286 		      EXT2_BLOCKS_PER_GROUP(sb);
287 	bit = (block - le32_to_cpu(es->s_first_data_block)) %
288 		      EXT2_BLOCKS_PER_GROUP(sb);
289 	/*
290 	 * Check to see if we are freeing blocks across a group
291 	 * boundary.
292 	 */
293 	if (bit + count > EXT2_BLOCKS_PER_GROUP(sb)) {
294 		overflow = bit + count - EXT2_BLOCKS_PER_GROUP(sb);
295 		count -= overflow;
296 	}
297 	bitmap_nr = load_block_bitmap (sb, block_group);
298 	if (bitmap_nr < 0)
299 		goto error_return;
300 
301 	bh = sb->u.ext2_sb.s_block_bitmap[bitmap_nr];
302 	gdp = ext2_get_group_desc (sb, block_group, &bh2);
303 	if (!gdp)
304 		goto error_return;
305 
306 	for (i = 0; i < count; i++, block++) {
307 		if (block == le32_to_cpu(gdp->bg_block_bitmap) ||
308 		    block == le32_to_cpu(gdp->bg_inode_bitmap) ||
309 		    in_range(block, le32_to_cpu(gdp->bg_inode_table),
310 			     EXT2_SB(sb)->s_itb_per_group)) {
311 			ext2_error(sb, __FUNCTION__,
312 				   "Freeing block in system zone - block = %lu",
313 				   block);
314 			continue;
315 		}
316 
317 		if (!ext2_clear_bit (bit + i, bh->b_data))
318 			ext2_error(sb, __FUNCTION__,
319 				   "bit already cleared for block %lu", block);
320 		else {
321 			DQUOT_FREE_BLOCK(inode, 1);
322 			gdp->bg_free_blocks_count =
323 				cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)+1);
324 			es->s_free_blocks_count =
325 				cpu_to_le32(le32_to_cpu(es->s_free_blocks_count)+1);
326 		}
327 	}
328 
329 	mark_buffer_dirty(bh2);
330 	mark_buffer_dirty(sb->u.ext2_sb.s_sbh);
331 
332 	mark_buffer_dirty(bh);
333 	if (sb->s_flags & MS_SYNCHRONOUS) {
334 		ll_rw_block (WRITE, 1, &bh);
335 		wait_on_buffer (bh);
336 	}
337 	if (overflow) {
338 		count = overflow;
339 		goto do_more;
340 	}
341 	sb->s_dirt = 1;
342 error_return:
343 	unlock_super (sb);
344 	return;
345 }
346 
347 /*
348  * ext2_new_block uses a goal block to assist allocation.  If the goal is
349  * free, or there is a free block within 32 blocks of the goal, that block
350  * is allocated.  Otherwise a forward search is made for a free block; within
351  * each block group the search first looks for an entire free byte in the block
352  * bitmap, and then for any free bit if that fails.
353  * This function also updates quota and i_blocks field.
354  */
ext2_new_block(struct inode * inode,unsigned long goal,u32 * prealloc_count,u32 * prealloc_block,int * err)355 int ext2_new_block (struct inode * inode, unsigned long goal,
356     u32 * prealloc_count, u32 * prealloc_block, int * err)
357 {
358 	struct buffer_head * bh;
359 	struct buffer_head * bh2;
360 	char * p, * r;
361 	int i, j, k, tmp;
362 	int bitmap_nr;
363 	struct super_block * sb;
364 	struct ext2_group_desc * gdp;
365 	struct ext2_super_block * es;
366 #ifdef EXT2FS_DEBUG
367 	static int goal_hits = 0, goal_attempts = 0;
368 #endif
369 	*err = -ENOSPC;
370 	sb = inode->i_sb;
371 	if (!sb) {
372 		printk ("ext2_new_block: nonexistent device");
373 		return 0;
374 	}
375 
376 	lock_super (sb);
377 	es = sb->u.ext2_sb.s_es;
378 	if (le32_to_cpu(es->s_free_blocks_count) <= le32_to_cpu(es->s_r_blocks_count) &&
379 	    ((sb->u.ext2_sb.s_resuid != current->fsuid) &&
380 	     (sb->u.ext2_sb.s_resgid == 0 ||
381 	      !in_group_p (sb->u.ext2_sb.s_resgid)) &&
382 	     !capable(CAP_SYS_RESOURCE)))
383 		goto out;
384 
385 	ext2_debug ("goal=%lu.\n", goal);
386 
387 repeat:
388 	/*
389 	 * First, test whether the goal block is free.
390 	 */
391 	if (goal < le32_to_cpu(es->s_first_data_block) ||
392 	    goal >= le32_to_cpu(es->s_blocks_count))
393 		goal = le32_to_cpu(es->s_first_data_block);
394 	i = (goal - le32_to_cpu(es->s_first_data_block)) / EXT2_BLOCKS_PER_GROUP(sb);
395 	gdp = ext2_get_group_desc (sb, i, &bh2);
396 	if (!gdp)
397 		goto io_error;
398 
399 	if (le16_to_cpu(gdp->bg_free_blocks_count) > 0) {
400 		j = ((goal - le32_to_cpu(es->s_first_data_block)) % EXT2_BLOCKS_PER_GROUP(sb));
401 #ifdef EXT2FS_DEBUG
402 		if (j)
403 			goal_attempts++;
404 #endif
405 		bitmap_nr = load_block_bitmap (sb, i);
406 		if (bitmap_nr < 0)
407 			goto io_error;
408 
409 		bh = sb->u.ext2_sb.s_block_bitmap[bitmap_nr];
410 
411 		ext2_debug ("goal is at %d:%d.\n", i, j);
412 
413 		if (!ext2_test_bit(j, bh->b_data)) {
414 			ext2_debug("goal bit allocated, %d hits\n",++goal_hits);
415 			goto got_block;
416 		}
417 		if (j) {
418 			/*
419 			 * The goal was occupied; search forward for a free
420 			 * block within the next XX blocks.
421 			 *
422 			 * end_goal is more or less random, but it has to be
423 			 * less than EXT2_BLOCKS_PER_GROUP. Aligning up to the
424 			 * next 64-bit boundary is simple..
425 			 */
426 			int end_goal = (j + 63) & ~63;
427 			j = ext2_find_next_zero_bit(bh->b_data, end_goal, j);
428 			if (j < end_goal)
429 				goto got_block;
430 		}
431 
432 		ext2_debug ("Bit not found near goal\n");
433 
434 		/*
435 		 * There has been no free block found in the near vicinity
436 		 * of the goal: do a search forward through the block groups,
437 		 * searching in each group first for an entire free byte in
438 		 * the bitmap and then for any free bit.
439 		 *
440 		 * Search first in the remainder of the current group; then,
441 		 * cyclicly search through the rest of the groups.
442 		 */
443 		p = ((char *) bh->b_data) + (j >> 3);
444 		r = memscan(p, 0, (EXT2_BLOCKS_PER_GROUP(sb) - j + 7) >> 3);
445 		k = (r - ((char *) bh->b_data)) << 3;
446 		if (k < EXT2_BLOCKS_PER_GROUP(sb)) {
447 			j = k;
448 			goto search_back;
449 		}
450 
451 		k = ext2_find_next_zero_bit ((unsigned long *) bh->b_data,
452 					EXT2_BLOCKS_PER_GROUP(sb),
453 					j);
454 		if (k < EXT2_BLOCKS_PER_GROUP(sb)) {
455 			j = k;
456 			goto got_block;
457 		}
458 	}
459 
460 	ext2_debug ("Bit not found in block group %d.\n", i);
461 
462 	/*
463 	 * Now search the rest of the groups.  We assume that
464 	 * i and gdp correctly point to the last group visited.
465 	 */
466 	for (k = 0; k < sb->u.ext2_sb.s_groups_count; k++) {
467 		i++;
468 		if (i >= sb->u.ext2_sb.s_groups_count)
469 			i = 0;
470 		gdp = ext2_get_group_desc (sb, i, &bh2);
471 		if (!gdp)
472 			goto io_error;
473 		if (le16_to_cpu(gdp->bg_free_blocks_count) > 0)
474 			break;
475 	}
476 	if (k >= sb->u.ext2_sb.s_groups_count)
477 		goto out;
478 	bitmap_nr = load_block_bitmap (sb, i);
479 	if (bitmap_nr < 0)
480 		goto io_error;
481 
482 	bh = sb->u.ext2_sb.s_block_bitmap[bitmap_nr];
483 	r = memscan(bh->b_data, 0, EXT2_BLOCKS_PER_GROUP(sb) >> 3);
484 	j = (r - bh->b_data) << 3;
485 	if (j < EXT2_BLOCKS_PER_GROUP(sb))
486 		goto search_back;
487 	else
488 		j = ext2_find_first_zero_bit ((unsigned long *) bh->b_data,
489 					 EXT2_BLOCKS_PER_GROUP(sb));
490 	if (j >= EXT2_BLOCKS_PER_GROUP(sb)) {
491 		ext2_error (sb, "ext2_new_block",
492 			    "Free blocks count corrupted for block group %d", i);
493 		goto out;
494 	}
495 
496 search_back:
497 	/*
498 	 * We have succeeded in finding a free byte in the block
499 	 * bitmap.  Now search backwards up to 7 bits to find the
500 	 * start of this group of free blocks.
501 	 */
502 	for (k = 0; k < 7 && j > 0 && !ext2_test_bit (j - 1, bh->b_data); k++, j--);
503 
504 got_block:
505 
506 	ext2_debug ("using block group %d(%d)\n", i, gdp->bg_free_blocks_count);
507 
508 	/*
509 	 * Check quota for allocation of this block.
510 	 */
511 	if(DQUOT_ALLOC_BLOCK(inode, 1)) {
512 		*err = -EDQUOT;
513 		goto out;
514 	}
515 
516 	tmp = j + i * EXT2_BLOCKS_PER_GROUP(sb) + le32_to_cpu(es->s_first_data_block);
517 
518 	if (tmp == le32_to_cpu(gdp->bg_block_bitmap) ||
519 	    tmp == le32_to_cpu(gdp->bg_inode_bitmap) ||
520 	    in_range (tmp, le32_to_cpu(gdp->bg_inode_table),
521 		      EXT2_SB(sb)->s_itb_per_group)) {
522 		ext2_error (sb, "ext2_new_block",
523 			    "Allocating block in system zone - block = %u",
524 			    tmp);
525 		ext2_set_bit(j, bh->b_data);
526 		DQUOT_FREE_BLOCK(inode, 1);
527 		goto repeat;
528 	}
529 
530 	if (ext2_set_bit (j, bh->b_data)) {
531 		ext2_warning (sb, "ext2_new_block",
532 			      "bit already set for block %d", j);
533 		DQUOT_FREE_BLOCK(inode, 1);
534 		goto repeat;
535 	}
536 
537 	ext2_debug ("found bit %d\n", j);
538 
539 	/*
540 	 * Do block preallocation now if required.
541 	 */
542 #ifdef EXT2_PREALLOCATE
543 	/* Writer: ->i_prealloc* */
544 	if (prealloc_count && !*prealloc_count) {
545 		int	prealloc_goal;
546 		unsigned long next_block = tmp + 1;
547 
548 		prealloc_goal = es->s_prealloc_blocks ?
549 			es->s_prealloc_blocks : EXT2_DEFAULT_PREALLOC_BLOCKS;
550 
551 		*prealloc_block = next_block;
552 		/* Writer: end */
553 		for (k = 1;
554 		     k < prealloc_goal && (j + k) < EXT2_BLOCKS_PER_GROUP(sb);
555 		     k++, next_block++) {
556 			if (DQUOT_PREALLOC_BLOCK(inode, 1))
557 				break;
558 			/* Writer: ->i_prealloc* */
559 			if (*prealloc_block + *prealloc_count != next_block ||
560 			    ext2_set_bit (j + k, bh->b_data)) {
561 				/* Writer: end */
562 				DQUOT_FREE_BLOCK(inode, 1);
563  				break;
564 			}
565 			(*prealloc_count)++;
566 			/* Writer: end */
567 		}
568 		/*
569 		 * As soon as we go for per-group spinlocks we'll need these
570 		 * done inside the loop above.
571 		 */
572 		gdp->bg_free_blocks_count =
573 			cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) -
574 			       (k - 1));
575 		es->s_free_blocks_count =
576 			cpu_to_le32(le32_to_cpu(es->s_free_blocks_count) -
577 			       (k - 1));
578 		ext2_debug ("Preallocated a further %lu bits.\n",
579 			       (k - 1));
580 	}
581 #endif
582 
583 	j = tmp;
584 
585 	mark_buffer_dirty(bh);
586 	if (sb->s_flags & MS_SYNCHRONOUS) {
587 		ll_rw_block (WRITE, 1, &bh);
588 		wait_on_buffer (bh);
589 	}
590 
591 	if (j >= le32_to_cpu(es->s_blocks_count)) {
592 		ext2_error (sb, "ext2_new_block",
593 			    "block(%d) >= blocks count(%d) - "
594 			    "block_group = %d, es == %p ",j,
595 			le32_to_cpu(es->s_blocks_count), i, es);
596 		goto out;
597 	}
598 
599 	ext2_debug ("allocating block %d. "
600 		    "Goal hits %d of %d.\n", j, goal_hits, goal_attempts);
601 
602 	gdp->bg_free_blocks_count = cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) - 1);
603 	mark_buffer_dirty(bh2);
604 	es->s_free_blocks_count = cpu_to_le32(le32_to_cpu(es->s_free_blocks_count) - 1);
605 	mark_buffer_dirty(sb->u.ext2_sb.s_sbh);
606 	sb->s_dirt = 1;
607 	unlock_super (sb);
608 	*err = 0;
609 	return j;
610 
611 io_error:
612 	*err = -EIO;
613 out:
614 	unlock_super (sb);
615 	return 0;
616 
617 }
618 
ext2_count_free_blocks(struct super_block * sb)619 unsigned long ext2_count_free_blocks (struct super_block * sb)
620 {
621 #ifdef EXT2FS_DEBUG
622 	struct ext2_super_block * es;
623 	unsigned long desc_count, bitmap_count, x;
624 	int bitmap_nr;
625 	struct ext2_group_desc * gdp;
626 	int i;
627 
628 	lock_super (sb);
629 	es = sb->u.ext2_sb.s_es;
630 	desc_count = 0;
631 	bitmap_count = 0;
632 	gdp = NULL;
633 	for (i = 0; i < sb->u.ext2_sb.s_groups_count; i++) {
634 		gdp = ext2_get_group_desc (sb, i, NULL);
635 		if (!gdp)
636 			continue;
637 		desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
638 		bitmap_nr = load_block_bitmap (sb, i);
639 		if (bitmap_nr < 0)
640 			continue;
641 
642 		x = ext2_count_free (sb->u.ext2_sb.s_block_bitmap[bitmap_nr],
643 				     sb->s_blocksize);
644 		printk ("group %d: stored = %d, counted = %lu\n",
645 			i, le16_to_cpu(gdp->bg_free_blocks_count), x);
646 		bitmap_count += x;
647 	}
648 	printk("ext2_count_free_blocks: stored = %lu, computed = %lu, %lu\n",
649 	       le32_to_cpu(es->s_free_blocks_count), desc_count, bitmap_count);
650 	unlock_super (sb);
651 	return bitmap_count;
652 #else
653 	return le32_to_cpu(sb->u.ext2_sb.s_es->s_free_blocks_count);
654 #endif
655 }
656 
block_in_use(unsigned long block,struct super_block * sb,unsigned char * map)657 static inline int block_in_use (unsigned long block,
658 				struct super_block * sb,
659 				unsigned char * map)
660 {
661 	return ext2_test_bit ((block - le32_to_cpu(sb->u.ext2_sb.s_es->s_first_data_block)) %
662 			 EXT2_BLOCKS_PER_GROUP(sb), map);
663 }
664 
test_root(int a,int b)665 static inline int test_root(int a, int b)
666 {
667 	if (a == 0)
668 		return 1;
669 	while (1) {
670 		if (a == 1)
671 			return 1;
672 		if (a % b)
673 			return 0;
674 		a = a / b;
675 	}
676 }
677 
ext2_group_sparse(int group)678 int ext2_group_sparse(int group)
679 {
680 	return (test_root(group, 3) || test_root(group, 5) ||
681 		test_root(group, 7));
682 }
683 
684 /**
685  *	ext2_bg_has_super - number of blocks used by the superblock in group
686  *	@sb: superblock for filesystem
687  *	@group: group number to check
688  *
689  *	Return the number of blocks used by the superblock (primary or backup)
690  *	in this group.  Currently this will be only 0 or 1.
691  */
ext2_bg_has_super(struct super_block * sb,int group)692 int ext2_bg_has_super(struct super_block *sb, int group)
693 {
694 	if (EXT2_HAS_RO_COMPAT_FEATURE(sb,EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER)&&
695 	    !ext2_group_sparse(group))
696 		return 0;
697 	return 1;
698 }
699 
700 /**
701  *	ext2_bg_num_gdb - number of blocks used by the group table in group
702  *	@sb: superblock for filesystem
703  *	@group: group number to check
704  *
705  *	Return the number of blocks used by the group descriptor table
706  *	(primary or backup) in this group.  In the future there may be a
707  *	different number of descriptor blocks in each group.
708  */
ext2_bg_num_gdb(struct super_block * sb,int group)709 unsigned long ext2_bg_num_gdb(struct super_block *sb, int group)
710 {
711 	if (EXT2_HAS_RO_COMPAT_FEATURE(sb,EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER)&&
712 	    !ext2_group_sparse(group))
713 		return 0;
714 	return EXT2_SB(sb)->s_gdb_count;
715 }
716 
717 #ifdef CONFIG_EXT2_CHECK
718 /* Called at mount-time, super-block is locked */
ext2_check_blocks_bitmap(struct super_block * sb)719 void ext2_check_blocks_bitmap (struct super_block * sb)
720 {
721 	struct buffer_head * bh;
722 	struct ext2_super_block * es;
723 	unsigned long desc_count, bitmap_count, x, j;
724 	unsigned long desc_blocks;
725 	int bitmap_nr;
726 	struct ext2_group_desc * gdp;
727 	int i;
728 
729 	es = sb->u.ext2_sb.s_es;
730 	desc_count = 0;
731 	bitmap_count = 0;
732 	gdp = NULL;
733 	for (i = 0; i < sb->u.ext2_sb.s_groups_count; i++) {
734 		gdp = ext2_get_group_desc (sb, i, NULL);
735 		if (!gdp)
736 			continue;
737 		desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
738 		bitmap_nr = load_block_bitmap (sb, i);
739 		if (bitmap_nr < 0)
740 			continue;
741 
742 		bh = EXT2_SB(sb)->s_block_bitmap[bitmap_nr];
743 
744 		if (ext2_bg_has_super(sb, i) && !ext2_test_bit(0, bh->b_data))
745 			ext2_error(sb, __FUNCTION__,
746 				   "Superblock in group %d is marked free", i);
747 
748 		desc_blocks = ext2_bg_num_gdb(sb, i);
749 		for (j = 0; j < desc_blocks; j++)
750 			if (!ext2_test_bit(j + 1, bh->b_data))
751 				ext2_error(sb, __FUNCTION__,
752 					   "Descriptor block #%ld in group "
753 					   "%d is marked free", j, i);
754 
755 		if (!block_in_use (le32_to_cpu(gdp->bg_block_bitmap), sb, bh->b_data))
756 			ext2_error (sb, "ext2_check_blocks_bitmap",
757 				    "Block bitmap for group %d is marked free",
758 				    i);
759 
760 		if (!block_in_use (le32_to_cpu(gdp->bg_inode_bitmap), sb, bh->b_data))
761 			ext2_error (sb, "ext2_check_blocks_bitmap",
762 				    "Inode bitmap for group %d is marked free",
763 				    i);
764 
765 		for (j = 0; j < sb->u.ext2_sb.s_itb_per_group; j++)
766 			if (!block_in_use (le32_to_cpu(gdp->bg_inode_table) + j, sb, bh->b_data))
767 				ext2_error (sb, "ext2_check_blocks_bitmap",
768 					    "Block #%ld of the inode table in "
769 					    "group %d is marked free", j, i);
770 
771 		x = ext2_count_free (bh, sb->s_blocksize);
772 		if (le16_to_cpu(gdp->bg_free_blocks_count) != x)
773 			ext2_error (sb, "ext2_check_blocks_bitmap",
774 				    "Wrong free blocks count for group %d, "
775 				    "stored = %d, counted = %lu", i,
776 				    le16_to_cpu(gdp->bg_free_blocks_count), x);
777 		bitmap_count += x;
778 	}
779 	if (le32_to_cpu(es->s_free_blocks_count) != bitmap_count)
780 		ext2_error (sb, "ext2_check_blocks_bitmap",
781 			    "Wrong free blocks count in super block, "
782 			    "stored = %lu, counted = %lu",
783 			    (unsigned long) le32_to_cpu(es->s_free_blocks_count), bitmap_count);
784 }
785 #endif
786