1 /*
2  *  linux/fs/ufs/balloc.c
3  *
4  * Copyright (C) 1998
5  * Daniel Pirkl <daniel.pirkl@email.cz>
6  * Charles University, Faculty of Mathematics and Physics
7  *
8  * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
9  */
10 
11 #include <linux/fs.h>
12 #include <linux/stat.h>
13 #include <linux/time.h>
14 #include <linux/string.h>
15 #include <linux/buffer_head.h>
16 #include <linux/capability.h>
17 #include <linux/bitops.h>
18 #include <asm/byteorder.h>
19 
20 #include "ufs_fs.h"
21 #include "ufs.h"
22 #include "swab.h"
23 #include "util.h"
24 
25 #define INVBLOCK ((u64)-1L)
26 
27 static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned, int *);
28 static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
29 static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
30 static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
31 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
32 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
33 
34 /*
35  * Free 'count' fragments from fragment number 'fragment'
36  */
ufs_free_fragments(struct inode * inode,u64 fragment,unsigned count)37 void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
38 {
39 	struct super_block * sb;
40 	struct ufs_sb_private_info * uspi;
41 	struct ufs_super_block_first * usb1;
42 	struct ufs_cg_private_info * ucpi;
43 	struct ufs_cylinder_group * ucg;
44 	unsigned cgno, bit, end_bit, bbase, blkmap, i;
45 	u64 blkno;
46 
47 	sb = inode->i_sb;
48 	uspi = UFS_SB(sb)->s_uspi;
49 	usb1 = ubh_get_usb_first(uspi);
50 
51 	UFSD("ENTER, fragment %llu, count %u\n",
52 	     (unsigned long long)fragment, count);
53 
54 	if (ufs_fragnum(fragment) + count > uspi->s_fpg)
55 		ufs_error (sb, "ufs_free_fragments", "internal error");
56 
57 	lock_super(sb);
58 
59 	cgno = ufs_dtog(uspi, fragment);
60 	bit = ufs_dtogd(uspi, fragment);
61 	if (cgno >= uspi->s_ncg) {
62 		ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
63 		goto failed;
64 	}
65 
66 	ucpi = ufs_load_cylinder (sb, cgno);
67 	if (!ucpi)
68 		goto failed;
69 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
70 	if (!ufs_cg_chkmagic(sb, ucg)) {
71 		ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
72 		goto failed;
73 	}
74 
75 	end_bit = bit + count;
76 	bbase = ufs_blknum (bit);
77 	blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
78 	ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
79 	for (i = bit; i < end_bit; i++) {
80 		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
81 			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
82 		else
83 			ufs_error (sb, "ufs_free_fragments",
84 				   "bit already cleared for fragment %u", i);
85 	}
86 
87 	fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
88 	uspi->cs_total.cs_nffree += count;
89 	fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
90 	blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
91 	ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
92 
93 	/*
94 	 * Trying to reassemble free fragments into block
95 	 */
96 	blkno = ufs_fragstoblks (bbase);
97 	if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
98 		fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
99 		uspi->cs_total.cs_nffree -= uspi->s_fpb;
100 		fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
101 		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
102 			ufs_clusteracct (sb, ucpi, blkno, 1);
103 		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
104 		uspi->cs_total.cs_nbfree++;
105 		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
106 		if (uspi->fs_magic != UFS2_MAGIC) {
107 			unsigned cylno = ufs_cbtocylno (bbase);
108 
109 			fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
110 						  ufs_cbtorpos(bbase)), 1);
111 			fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
112 		}
113 	}
114 
115 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
116 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
117 	if (sb->s_flags & MS_SYNCHRONOUS)
118 		ubh_sync_block(UCPI_UBH(ucpi));
119 	sb->s_dirt = 1;
120 
121 	unlock_super (sb);
122 	UFSD("EXIT\n");
123 	return;
124 
125 failed:
126 	unlock_super (sb);
127 	UFSD("EXIT (FAILED)\n");
128 	return;
129 }
130 
131 /*
132  * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
133  */
ufs_free_blocks(struct inode * inode,u64 fragment,unsigned count)134 void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
135 {
136 	struct super_block * sb;
137 	struct ufs_sb_private_info * uspi;
138 	struct ufs_super_block_first * usb1;
139 	struct ufs_cg_private_info * ucpi;
140 	struct ufs_cylinder_group * ucg;
141 	unsigned overflow, cgno, bit, end_bit, i;
142 	u64 blkno;
143 
144 	sb = inode->i_sb;
145 	uspi = UFS_SB(sb)->s_uspi;
146 	usb1 = ubh_get_usb_first(uspi);
147 
148 	UFSD("ENTER, fragment %llu, count %u\n",
149 	     (unsigned long long)fragment, count);
150 
151 	if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
152 		ufs_error (sb, "ufs_free_blocks", "internal error, "
153 			   "fragment %llu, count %u\n",
154 			   (unsigned long long)fragment, count);
155 		goto failed;
156 	}
157 
158 	lock_super(sb);
159 
160 do_more:
161 	overflow = 0;
162 	cgno = ufs_dtog(uspi, fragment);
163 	bit = ufs_dtogd(uspi, fragment);
164 	if (cgno >= uspi->s_ncg) {
165 		ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
166 		goto failed_unlock;
167 	}
168 	end_bit = bit + count;
169 	if (end_bit > uspi->s_fpg) {
170 		overflow = bit + count - uspi->s_fpg;
171 		count -= overflow;
172 		end_bit -= overflow;
173 	}
174 
175 	ucpi = ufs_load_cylinder (sb, cgno);
176 	if (!ucpi)
177 		goto failed_unlock;
178 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
179 	if (!ufs_cg_chkmagic(sb, ucg)) {
180 		ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
181 		goto failed_unlock;
182 	}
183 
184 	for (i = bit; i < end_bit; i += uspi->s_fpb) {
185 		blkno = ufs_fragstoblks(i);
186 		if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
187 			ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
188 		}
189 		ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
190 		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
191 			ufs_clusteracct (sb, ucpi, blkno, 1);
192 
193 		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
194 		uspi->cs_total.cs_nbfree++;
195 		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
196 
197 		if (uspi->fs_magic != UFS2_MAGIC) {
198 			unsigned cylno = ufs_cbtocylno(i);
199 
200 			fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
201 						  ufs_cbtorpos(i)), 1);
202 			fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
203 		}
204 	}
205 
206 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
207 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
208 	if (sb->s_flags & MS_SYNCHRONOUS)
209 		ubh_sync_block(UCPI_UBH(ucpi));
210 
211 	if (overflow) {
212 		fragment += count;
213 		count = overflow;
214 		goto do_more;
215 	}
216 
217 	sb->s_dirt = 1;
218 	unlock_super (sb);
219 	UFSD("EXIT\n");
220 	return;
221 
222 failed_unlock:
223 	unlock_super (sb);
224 failed:
225 	UFSD("EXIT (FAILED)\n");
226 	return;
227 }
228 
229 /*
230  * Modify inode page cache in such way:
231  * have - blocks with b_blocknr equal to oldb...oldb+count-1
232  * get - blocks with b_blocknr equal to newb...newb+count-1
233  * also we suppose that oldb...oldb+count-1 blocks
234  * situated at the end of file.
235  *
236  * We can come here from ufs_writepage or ufs_prepare_write,
237  * locked_page is argument of these functions, so we already lock it.
238  */
ufs_change_blocknr(struct inode * inode,sector_t beg,unsigned int count,sector_t oldb,sector_t newb,struct page * locked_page)239 static void ufs_change_blocknr(struct inode *inode, sector_t beg,
240 			       unsigned int count, sector_t oldb,
241 			       sector_t newb, struct page *locked_page)
242 {
243 	const unsigned blks_per_page =
244 		1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
245 	const unsigned mask = blks_per_page - 1;
246 	struct address_space * const mapping = inode->i_mapping;
247 	pgoff_t index, cur_index, last_index;
248 	unsigned pos, j, lblock;
249 	sector_t end, i;
250 	struct page *page;
251 	struct buffer_head *head, *bh;
252 
253 	UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
254 	      inode->i_ino, count,
255 	     (unsigned long long)oldb, (unsigned long long)newb);
256 
257 	BUG_ON(!locked_page);
258 	BUG_ON(!PageLocked(locked_page));
259 
260 	cur_index = locked_page->index;
261 	end = count + beg;
262 	last_index = end >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
263 	for (i = beg; i < end; i = (i | mask) + 1) {
264 		index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
265 
266 		if (likely(cur_index != index)) {
267 			page = ufs_get_locked_page(mapping, index);
268 			if (!page)/* it was truncated */
269 				continue;
270 			if (IS_ERR(page)) {/* or EIO */
271 				ufs_error(inode->i_sb, __func__,
272 					  "read of page %llu failed\n",
273 					  (unsigned long long)index);
274 				continue;
275 			}
276 		} else
277 			page = locked_page;
278 
279 		head = page_buffers(page);
280 		bh = head;
281 		pos = i & mask;
282 		for (j = 0; j < pos; ++j)
283 			bh = bh->b_this_page;
284 
285 
286 		if (unlikely(index == last_index))
287 			lblock = end & mask;
288 		else
289 			lblock = blks_per_page;
290 
291 		do {
292 			if (j >= lblock)
293 				break;
294 			pos = (i - beg) + j;
295 
296 			if (!buffer_mapped(bh))
297 					map_bh(bh, inode->i_sb, oldb + pos);
298 			if (!buffer_uptodate(bh)) {
299 				ll_rw_block(READ, 1, &bh);
300 				wait_on_buffer(bh);
301 				if (!buffer_uptodate(bh)) {
302 					ufs_error(inode->i_sb, __func__,
303 						  "read of block failed\n");
304 					break;
305 				}
306 			}
307 
308 			UFSD(" change from %llu to %llu, pos %u\n",
309 			     (unsigned long long)(pos + oldb),
310 			     (unsigned long long)(pos + newb), pos);
311 
312 			bh->b_blocknr = newb + pos;
313 			unmap_underlying_metadata(bh->b_bdev,
314 						  bh->b_blocknr);
315 			mark_buffer_dirty(bh);
316 			++j;
317 			bh = bh->b_this_page;
318 		} while (bh != head);
319 
320 		if (likely(cur_index != index))
321 			ufs_put_locked_page(page);
322  	}
323 	UFSD("EXIT\n");
324 }
325 
ufs_clear_frags(struct inode * inode,sector_t beg,unsigned int n,int sync)326 static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
327 			    int sync)
328 {
329 	struct buffer_head *bh;
330 	sector_t end = beg + n;
331 
332 	for (; beg < end; ++beg) {
333 		bh = sb_getblk(inode->i_sb, beg);
334 		lock_buffer(bh);
335 		memset(bh->b_data, 0, inode->i_sb->s_blocksize);
336 		set_buffer_uptodate(bh);
337 		mark_buffer_dirty(bh);
338 		unlock_buffer(bh);
339 		if (IS_SYNC(inode) || sync)
340 			sync_dirty_buffer(bh);
341 		brelse(bh);
342 	}
343 }
344 
ufs_new_fragments(struct inode * inode,void * p,u64 fragment,u64 goal,unsigned count,int * err,struct page * locked_page)345 u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
346 			   u64 goal, unsigned count, int *err,
347 			   struct page *locked_page)
348 {
349 	struct super_block * sb;
350 	struct ufs_sb_private_info * uspi;
351 	struct ufs_super_block_first * usb1;
352 	unsigned cgno, oldcount, newcount;
353 	u64 tmp, request, result;
354 
355 	UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
356 	     inode->i_ino, (unsigned long long)fragment,
357 	     (unsigned long long)goal, count);
358 
359 	sb = inode->i_sb;
360 	uspi = UFS_SB(sb)->s_uspi;
361 	usb1 = ubh_get_usb_first(uspi);
362 	*err = -ENOSPC;
363 
364 	lock_super (sb);
365 	tmp = ufs_data_ptr_to_cpu(sb, p);
366 
367 	if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
368 		ufs_warning(sb, "ufs_new_fragments", "internal warning"
369 			    " fragment %llu, count %u",
370 			    (unsigned long long)fragment, count);
371 		count = uspi->s_fpb - ufs_fragnum(fragment);
372 	}
373 	oldcount = ufs_fragnum (fragment);
374 	newcount = oldcount + count;
375 
376 	/*
377 	 * Somebody else has just allocated our fragments
378 	 */
379 	if (oldcount) {
380 		if (!tmp) {
381 			ufs_error(sb, "ufs_new_fragments", "internal error, "
382 				  "fragment %llu, tmp %llu\n",
383 				  (unsigned long long)fragment,
384 				  (unsigned long long)tmp);
385 			unlock_super(sb);
386 			return INVBLOCK;
387 		}
388 		if (fragment < UFS_I(inode)->i_lastfrag) {
389 			UFSD("EXIT (ALREADY ALLOCATED)\n");
390 			unlock_super (sb);
391 			return 0;
392 		}
393 	}
394 	else {
395 		if (tmp) {
396 			UFSD("EXIT (ALREADY ALLOCATED)\n");
397 			unlock_super(sb);
398 			return 0;
399 		}
400 	}
401 
402 	/*
403 	 * There is not enough space for user on the device
404 	 */
405 	if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
406 		unlock_super (sb);
407 		UFSD("EXIT (FAILED)\n");
408 		return 0;
409 	}
410 
411 	if (goal >= uspi->s_size)
412 		goal = 0;
413 	if (goal == 0)
414 		cgno = ufs_inotocg (inode->i_ino);
415 	else
416 		cgno = ufs_dtog(uspi, goal);
417 
418 	/*
419 	 * allocate new fragment
420 	 */
421 	if (oldcount == 0) {
422 		result = ufs_alloc_fragments (inode, cgno, goal, count, err);
423 		if (result) {
424 			ufs_cpu_to_data_ptr(sb, p, result);
425 			*err = 0;
426 			UFS_I(inode)->i_lastfrag =
427 				max_t(u32, UFS_I(inode)->i_lastfrag,
428 				      fragment + count);
429 			ufs_clear_frags(inode, result + oldcount,
430 					newcount - oldcount, locked_page != NULL);
431 		}
432 		unlock_super(sb);
433 		UFSD("EXIT, result %llu\n", (unsigned long long)result);
434 		return result;
435 	}
436 
437 	/*
438 	 * resize block
439 	 */
440 	result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
441 	if (result) {
442 		*err = 0;
443 		UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
444 		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
445 				locked_page != NULL);
446 		unlock_super(sb);
447 		UFSD("EXIT, result %llu\n", (unsigned long long)result);
448 		return result;
449 	}
450 
451 	/*
452 	 * allocate new block and move data
453 	 */
454 	switch (fs32_to_cpu(sb, usb1->fs_optim)) {
455 	    case UFS_OPTSPACE:
456 		request = newcount;
457 		if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
458 		    > uspi->s_dsize * uspi->s_minfree / (2 * 100))
459 			break;
460 		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
461 		break;
462 	    default:
463 		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
464 
465 	    case UFS_OPTTIME:
466 		request = uspi->s_fpb;
467 		if (uspi->cs_total.cs_nffree < uspi->s_dsize *
468 		    (uspi->s_minfree - 2) / 100)
469 			break;
470 		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
471 		break;
472 	}
473 	result = ufs_alloc_fragments (inode, cgno, goal, request, err);
474 	if (result) {
475 		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
476 				locked_page != NULL);
477 		ufs_change_blocknr(inode, fragment - oldcount, oldcount,
478 				   uspi->s_sbbase + tmp,
479 				   uspi->s_sbbase + result, locked_page);
480 		ufs_cpu_to_data_ptr(sb, p, result);
481 		*err = 0;
482 		UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
483 		unlock_super(sb);
484 		if (newcount < request)
485 			ufs_free_fragments (inode, result + newcount, request - newcount);
486 		ufs_free_fragments (inode, tmp, oldcount);
487 		UFSD("EXIT, result %llu\n", (unsigned long long)result);
488 		return result;
489 	}
490 
491 	unlock_super(sb);
492 	UFSD("EXIT (FAILED)\n");
493 	return 0;
494 }
495 
ufs_add_fragments(struct inode * inode,u64 fragment,unsigned oldcount,unsigned newcount,int * err)496 static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
497 			     unsigned oldcount, unsigned newcount, int *err)
498 {
499 	struct super_block * sb;
500 	struct ufs_sb_private_info * uspi;
501 	struct ufs_super_block_first * usb1;
502 	struct ufs_cg_private_info * ucpi;
503 	struct ufs_cylinder_group * ucg;
504 	unsigned cgno, fragno, fragoff, count, fragsize, i;
505 
506 	UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
507 	     (unsigned long long)fragment, oldcount, newcount);
508 
509 	sb = inode->i_sb;
510 	uspi = UFS_SB(sb)->s_uspi;
511 	usb1 = ubh_get_usb_first (uspi);
512 	count = newcount - oldcount;
513 
514 	cgno = ufs_dtog(uspi, fragment);
515 	if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
516 		return 0;
517 	if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
518 		return 0;
519 	ucpi = ufs_load_cylinder (sb, cgno);
520 	if (!ucpi)
521 		return 0;
522 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
523 	if (!ufs_cg_chkmagic(sb, ucg)) {
524 		ufs_panic (sb, "ufs_add_fragments",
525 			"internal error, bad magic number on cg %u", cgno);
526 		return 0;
527 	}
528 
529 	fragno = ufs_dtogd(uspi, fragment);
530 	fragoff = ufs_fragnum (fragno);
531 	for (i = oldcount; i < newcount; i++)
532 		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
533 			return 0;
534 	/*
535 	 * Block can be extended
536 	 */
537 	ucg->cg_time = cpu_to_fs32(sb, get_seconds());
538 	for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
539 		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
540 			break;
541 	fragsize = i - oldcount;
542 	if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
543 		ufs_panic (sb, "ufs_add_fragments",
544 			"internal error or corrupted bitmap on cg %u", cgno);
545 	fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
546 	if (fragsize != count)
547 		fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
548 	for (i = oldcount; i < newcount; i++)
549 		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
550 
551 	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
552 	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
553 	uspi->cs_total.cs_nffree -= count;
554 
555 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
556 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
557 	if (sb->s_flags & MS_SYNCHRONOUS)
558 		ubh_sync_block(UCPI_UBH(ucpi));
559 	sb->s_dirt = 1;
560 
561 	UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
562 
563 	return fragment;
564 }
565 
566 #define UFS_TEST_FREE_SPACE_CG \
567 	ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
568 	if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
569 		goto cg_found; \
570 	for (k = count; k < uspi->s_fpb; k++) \
571 		if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
572 			goto cg_found;
573 
ufs_alloc_fragments(struct inode * inode,unsigned cgno,u64 goal,unsigned count,int * err)574 static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
575 			       u64 goal, unsigned count, int *err)
576 {
577 	struct super_block * sb;
578 	struct ufs_sb_private_info * uspi;
579 	struct ufs_super_block_first * usb1;
580 	struct ufs_cg_private_info * ucpi;
581 	struct ufs_cylinder_group * ucg;
582 	unsigned oldcg, i, j, k, allocsize;
583 	u64 result;
584 
585 	UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
586 	     inode->i_ino, cgno, (unsigned long long)goal, count);
587 
588 	sb = inode->i_sb;
589 	uspi = UFS_SB(sb)->s_uspi;
590 	usb1 = ubh_get_usb_first(uspi);
591 	oldcg = cgno;
592 
593 	/*
594 	 * 1. searching on preferred cylinder group
595 	 */
596 	UFS_TEST_FREE_SPACE_CG
597 
598 	/*
599 	 * 2. quadratic rehash
600 	 */
601 	for (j = 1; j < uspi->s_ncg; j *= 2) {
602 		cgno += j;
603 		if (cgno >= uspi->s_ncg)
604 			cgno -= uspi->s_ncg;
605 		UFS_TEST_FREE_SPACE_CG
606 	}
607 
608 	/*
609 	 * 3. brute force search
610 	 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
611 	 */
612 	cgno = (oldcg + 1) % uspi->s_ncg;
613 	for (j = 2; j < uspi->s_ncg; j++) {
614 		cgno++;
615 		if (cgno >= uspi->s_ncg)
616 			cgno = 0;
617 		UFS_TEST_FREE_SPACE_CG
618 	}
619 
620 	UFSD("EXIT (FAILED)\n");
621 	return 0;
622 
623 cg_found:
624 	ucpi = ufs_load_cylinder (sb, cgno);
625 	if (!ucpi)
626 		return 0;
627 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
628 	if (!ufs_cg_chkmagic(sb, ucg))
629 		ufs_panic (sb, "ufs_alloc_fragments",
630 			"internal error, bad magic number on cg %u", cgno);
631 	ucg->cg_time = cpu_to_fs32(sb, get_seconds());
632 
633 	if (count == uspi->s_fpb) {
634 		result = ufs_alloccg_block (inode, ucpi, goal, err);
635 		if (result == INVBLOCK)
636 			return 0;
637 		goto succed;
638 	}
639 
640 	for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
641 		if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
642 			break;
643 
644 	if (allocsize == uspi->s_fpb) {
645 		result = ufs_alloccg_block (inode, ucpi, goal, err);
646 		if (result == INVBLOCK)
647 			return 0;
648 		goal = ufs_dtogd(uspi, result);
649 		for (i = count; i < uspi->s_fpb; i++)
650 			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
651 		i = uspi->s_fpb - count;
652 
653 		fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
654 		uspi->cs_total.cs_nffree += i;
655 		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
656 		fs32_add(sb, &ucg->cg_frsum[i], 1);
657 		goto succed;
658 	}
659 
660 	result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
661 	if (result == INVBLOCK)
662 		return 0;
663 	for (i = 0; i < count; i++)
664 		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
665 
666 	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
667 	uspi->cs_total.cs_nffree -= count;
668 	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
669 	fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
670 
671 	if (count != allocsize)
672 		fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
673 
674 succed:
675 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
676 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
677 	if (sb->s_flags & MS_SYNCHRONOUS)
678 		ubh_sync_block(UCPI_UBH(ucpi));
679 	sb->s_dirt = 1;
680 
681 	result += cgno * uspi->s_fpg;
682 	UFSD("EXIT3, result %llu\n", (unsigned long long)result);
683 	return result;
684 }
685 
ufs_alloccg_block(struct inode * inode,struct ufs_cg_private_info * ucpi,u64 goal,int * err)686 static u64 ufs_alloccg_block(struct inode *inode,
687 			     struct ufs_cg_private_info *ucpi,
688 			     u64 goal, int *err)
689 {
690 	struct super_block * sb;
691 	struct ufs_sb_private_info * uspi;
692 	struct ufs_super_block_first * usb1;
693 	struct ufs_cylinder_group * ucg;
694 	u64 result, blkno;
695 
696 	UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
697 
698 	sb = inode->i_sb;
699 	uspi = UFS_SB(sb)->s_uspi;
700 	usb1 = ubh_get_usb_first(uspi);
701 	ucg = ubh_get_ucg(UCPI_UBH(ucpi));
702 
703 	if (goal == 0) {
704 		goal = ucpi->c_rotor;
705 		goto norot;
706 	}
707 	goal = ufs_blknum (goal);
708 	goal = ufs_dtogd(uspi, goal);
709 
710 	/*
711 	 * If the requested block is available, use it.
712 	 */
713 	if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
714 		result = goal;
715 		goto gotit;
716 	}
717 
718 norot:
719 	result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
720 	if (result == INVBLOCK)
721 		return INVBLOCK;
722 	ucpi->c_rotor = result;
723 gotit:
724 	blkno = ufs_fragstoblks(result);
725 	ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
726 	if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
727 		ufs_clusteracct (sb, ucpi, blkno, -1);
728 
729 	fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
730 	uspi->cs_total.cs_nbfree--;
731 	fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
732 
733 	if (uspi->fs_magic != UFS2_MAGIC) {
734 		unsigned cylno = ufs_cbtocylno((unsigned)result);
735 
736 		fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
737 					  ufs_cbtorpos((unsigned)result)), 1);
738 		fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
739 	}
740 
741 	UFSD("EXIT, result %llu\n", (unsigned long long)result);
742 
743 	return result;
744 }
745 
ubh_scanc(struct ufs_sb_private_info * uspi,struct ufs_buffer_head * ubh,unsigned begin,unsigned size,unsigned char * table,unsigned char mask)746 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
747 			  struct ufs_buffer_head *ubh,
748 			  unsigned begin, unsigned size,
749 			  unsigned char *table, unsigned char mask)
750 {
751 	unsigned rest, offset;
752 	unsigned char *cp;
753 
754 
755 	offset = begin & ~uspi->s_fmask;
756 	begin >>= uspi->s_fshift;
757 	for (;;) {
758 		if ((offset + size) < uspi->s_fsize)
759 			rest = size;
760 		else
761 			rest = uspi->s_fsize - offset;
762 		size -= rest;
763 		cp = ubh->bh[begin]->b_data + offset;
764 		while ((table[*cp++] & mask) == 0 && --rest)
765 			;
766 		if (rest || !size)
767 			break;
768 		begin++;
769 		offset = 0;
770 	}
771 	return (size + rest);
772 }
773 
774 /*
775  * Find a block of the specified size in the specified cylinder group.
776  * @sp: pointer to super block
777  * @ucpi: pointer to cylinder group info
778  * @goal: near which block we want find new one
779  * @count: specified size
780  */
ufs_bitmap_search(struct super_block * sb,struct ufs_cg_private_info * ucpi,u64 goal,unsigned count)781 static u64 ufs_bitmap_search(struct super_block *sb,
782 			     struct ufs_cg_private_info *ucpi,
783 			     u64 goal, unsigned count)
784 {
785 	/*
786 	 * Bit patterns for identifying fragments in the block map
787 	 * used as ((map & mask_arr) == want_arr)
788 	 */
789 	static const int mask_arr[9] = {
790 		0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
791 	};
792 	static const int want_arr[9] = {
793 		0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
794 	};
795 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
796 	struct ufs_super_block_first *usb1;
797 	struct ufs_cylinder_group *ucg;
798 	unsigned start, length, loc;
799 	unsigned pos, want, blockmap, mask, end;
800 	u64 result;
801 
802 	UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
803 	     (unsigned long long)goal, count);
804 
805 	usb1 = ubh_get_usb_first (uspi);
806 	ucg = ubh_get_ucg(UCPI_UBH(ucpi));
807 
808 	if (goal)
809 		start = ufs_dtogd(uspi, goal) >> 3;
810 	else
811 		start = ucpi->c_frotor >> 3;
812 
813 	length = ((uspi->s_fpg + 7) >> 3) - start;
814 	loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
815 		(uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
816 		1 << (count - 1 + (uspi->s_fpb & 7)));
817 	if (loc == 0) {
818 		length = start + 1;
819 		loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
820 				(uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
821 				ufs_fragtable_other,
822 				1 << (count - 1 + (uspi->s_fpb & 7)));
823 		if (loc == 0) {
824 			ufs_error(sb, "ufs_bitmap_search",
825 				  "bitmap corrupted on cg %u, start %u,"
826 				  " length %u, count %u, freeoff %u\n",
827 				  ucpi->c_cgx, start, length, count,
828 				  ucpi->c_freeoff);
829 			return INVBLOCK;
830 		}
831 		start = 0;
832 	}
833 	result = (start + length - loc) << 3;
834 	ucpi->c_frotor = result;
835 
836 	/*
837 	 * found the byte in the map
838 	 */
839 
840 	for (end = result + 8; result < end; result += uspi->s_fpb) {
841 		blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
842 		blockmap <<= 1;
843 		mask = mask_arr[count];
844 		want = want_arr[count];
845 		for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
846 			if ((blockmap & mask) == want) {
847 				UFSD("EXIT, result %llu\n",
848 				     (unsigned long long)result);
849 				return result + pos;
850  			}
851 			mask <<= 1;
852 			want <<= 1;
853  		}
854  	}
855 
856 	ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
857 		  ucpi->c_cgx);
858 	UFSD("EXIT (FAILED)\n");
859 	return INVBLOCK;
860 }
861 
ufs_clusteracct(struct super_block * sb,struct ufs_cg_private_info * ucpi,unsigned blkno,int cnt)862 static void ufs_clusteracct(struct super_block * sb,
863 	struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
864 {
865 	struct ufs_sb_private_info * uspi;
866 	int i, start, end, forw, back;
867 
868 	uspi = UFS_SB(sb)->s_uspi;
869 	if (uspi->s_contigsumsize <= 0)
870 		return;
871 
872 	if (cnt > 0)
873 		ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
874 	else
875 		ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
876 
877 	/*
878 	 * Find the size of the cluster going forward.
879 	 */
880 	start = blkno + 1;
881 	end = start + uspi->s_contigsumsize;
882 	if ( end >= ucpi->c_nclusterblks)
883 		end = ucpi->c_nclusterblks;
884 	i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
885 	if (i > end)
886 		i = end;
887 	forw = i - start;
888 
889 	/*
890 	 * Find the size of the cluster going backward.
891 	 */
892 	start = blkno - 1;
893 	end = start - uspi->s_contigsumsize;
894 	if (end < 0 )
895 		end = -1;
896 	i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
897 	if ( i < end)
898 		i = end;
899 	back = start - i;
900 
901 	/*
902 	 * Account for old cluster and the possibly new forward and
903 	 * back clusters.
904 	 */
905 	i = back + forw + 1;
906 	if (i > uspi->s_contigsumsize)
907 		i = uspi->s_contigsumsize;
908 	fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
909 	if (back > 0)
910 		fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
911 	if (forw > 0)
912 		fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
913 }
914 
915 
916 static unsigned char ufs_fragtable_8fpb[] = {
917 	0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
918 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
919 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
920 	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
921 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
922 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
923 	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
924 	0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
925 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
926 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
927 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
928 	0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
929 	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
930 	0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
931 	0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
932 	0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
933 };
934 
935 static unsigned char ufs_fragtable_other[] = {
936 	0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
937 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
938 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
939 	0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
940 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
941 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
942 	0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
943 	0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
944 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
945 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
946 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
947 	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
948 	0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
949 	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E,	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
950 	0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
951 	0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
952 };
953