1 /*
2  *  linux/fs/ufs/balloc.c
3  *
4  * Copyright (C) 1998
5  * Daniel Pirkl <daniel.pirkl@email.cz>
6  * Charles University, Faculty of Mathematics and Physics
7  */
8 
9 #include <linux/fs.h>
10 #include <linux/ufs_fs.h>
11 #include <linux/stat.h>
12 #include <linux/sched.h>
13 #include <linux/string.h>
14 #include <linux/locks.h>
15 #include <linux/quotaops.h>
16 #include <asm/bitops.h>
17 #include <asm/byteorder.h>
18 
19 #include "swab.h"
20 #include "util.h"
21 
22 #undef UFS_BALLOC_DEBUG
23 
24 #ifdef UFS_BALLOC_DEBUG
25 #define UFSD(x) printk("(%s, %d), %s:", __FILE__, __LINE__, __FUNCTION__); printk x;
26 #else
27 #define UFSD(x)
28 #endif
29 
30 unsigned ufs_add_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
31 unsigned ufs_alloc_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
32 unsigned ufs_alloccg_block (struct inode *, struct ufs_cg_private_info *, unsigned, int *);
33 unsigned ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, unsigned, unsigned);
34 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
35 void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
36 
37 /*
38  * Free 'count' fragments from fragment number 'fragment'
39  */
ufs_free_fragments(struct inode * inode,unsigned fragment,unsigned count)40 void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count) {
41 	struct super_block * sb;
42 	struct ufs_sb_private_info * uspi;
43 	struct ufs_super_block_first * usb1;
44 	struct ufs_cg_private_info * ucpi;
45 	struct ufs_cylinder_group * ucg;
46 	unsigned cgno, bit, end_bit, bbase, blkmap, i, blkno, cylno;
47 
48 	sb = inode->i_sb;
49 	uspi = sb->u.ufs_sb.s_uspi;
50 	usb1 = ubh_get_usb_first(USPI_UBH);
51 
52 	UFSD(("ENTER, fragment %u, count %u\n", fragment, count))
53 
54 	if (ufs_fragnum(fragment) + count > uspi->s_fpg)
55 		ufs_error (sb, "ufs_free_fragments", "internal error");
56 
57 	lock_super(sb);
58 
59 	cgno = ufs_dtog(fragment);
60 	bit = ufs_dtogd(fragment);
61 	if (cgno >= uspi->s_ncg) {
62 		ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
63 		goto failed;
64 	}
65 
66 	ucpi = ufs_load_cylinder (sb, cgno);
67 	if (!ucpi)
68 		goto failed;
69 	ucg = ubh_get_ucg (UCPI_UBH);
70 	if (!ufs_cg_chkmagic(sb, ucg)) {
71 		ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
72 		goto failed;
73 	}
74 
75 	end_bit = bit + count;
76 	bbase = ufs_blknum (bit);
77 	blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase);
78 	ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
79 	for (i = bit; i < end_bit; i++) {
80 		if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, i))
81 			ubh_setbit (UCPI_UBH, ucpi->c_freeoff, i);
82 		else ufs_error (sb, "ufs_free_fragments",
83 			"bit already cleared for fragment %u", i);
84 	}
85 
86 	DQUOT_FREE_BLOCK (inode, count);
87 
88 
89 	fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
90 	fs32_add(sb, &usb1->fs_cstotal.cs_nffree, count);
91 	fs32_add(sb, &sb->fs_cs(cgno).cs_nffree, count);
92 	blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase);
93 	ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
94 
95 	/*
96 	 * Trying to reassemble free fragments into block
97 	 */
98 	blkno = ufs_fragstoblks (bbase);
99 	if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) {
100 		fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
101 		fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, uspi->s_fpb);
102 		fs32_sub(sb, &sb->fs_cs(cgno).cs_nffree, uspi->s_fpb);
103 		if ((sb->u.ufs_sb.s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
104 			ufs_clusteracct (sb, ucpi, blkno, 1);
105 		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
106 		fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1);
107 		fs32_add(sb, &sb->fs_cs(cgno).cs_nbfree, 1);
108 		cylno = ufs_cbtocylno (bbase);
109 		fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(bbase)), 1);
110 		fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
111 	}
112 
113 	ubh_mark_buffer_dirty (USPI_UBH);
114 	ubh_mark_buffer_dirty (UCPI_UBH);
115 	if (sb->s_flags & MS_SYNCHRONOUS) {
116 		ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
117 		ubh_wait_on_buffer (UCPI_UBH);
118 	}
119 	sb->s_dirt = 1;
120 
121 	unlock_super (sb);
122 	UFSD(("EXIT\n"))
123 	return;
124 
125 failed:
126 	unlock_super (sb);
127 	UFSD(("EXIT (FAILED)\n"))
128 	return;
129 }
130 
131 /*
132  * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
133  */
ufs_free_blocks(struct inode * inode,unsigned fragment,unsigned count)134 void ufs_free_blocks (struct inode * inode, unsigned fragment, unsigned count) {
135 	struct super_block * sb;
136 	struct ufs_sb_private_info * uspi;
137 	struct ufs_super_block_first * usb1;
138 	struct ufs_cg_private_info * ucpi;
139 	struct ufs_cylinder_group * ucg;
140 	unsigned overflow, cgno, bit, end_bit, blkno, i, cylno;
141 
142 	sb = inode->i_sb;
143 	uspi = sb->u.ufs_sb.s_uspi;
144 	usb1 = ubh_get_usb_first(USPI_UBH);
145 
146 	UFSD(("ENTER, fragment %u, count %u\n", fragment, count))
147 
148 	if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
149 		ufs_error (sb, "ufs_free_blocks", "internal error, "
150 			"fragment %u, count %u\n", fragment, count);
151 		goto failed;
152 	}
153 
154 	lock_super(sb);
155 
156 do_more:
157 	overflow = 0;
158 	cgno = ufs_dtog (fragment);
159 	bit = ufs_dtogd (fragment);
160 	if (cgno >= uspi->s_ncg) {
161 		ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
162 		goto failed;
163 	}
164 	end_bit = bit + count;
165 	if (end_bit > uspi->s_fpg) {
166 		overflow = bit + count - uspi->s_fpg;
167 		count -= overflow;
168 		end_bit -= overflow;
169 	}
170 
171 	ucpi = ufs_load_cylinder (sb, cgno);
172 	if (!ucpi)
173 		goto failed;
174 	ucg = ubh_get_ucg (UCPI_UBH);
175 	if (!ufs_cg_chkmagic(sb, ucg)) {
176 		ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
177 		goto failed;
178 	}
179 
180 	for (i = bit; i < end_bit; i += uspi->s_fpb) {
181 		blkno = ufs_fragstoblks(i);
182 		if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) {
183 			ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
184 		}
185 		ubh_setblock(UCPI_UBH, ucpi->c_freeoff, blkno);
186 		if ((sb->u.ufs_sb.s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
187 			ufs_clusteracct (sb, ucpi, blkno, 1);
188 		DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
189 
190 		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
191 		fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1);
192 		fs32_add(sb, &sb->fs_cs(cgno).cs_nbfree, 1);
193 		cylno = ufs_cbtocylno(i);
194 		fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i)), 1);
195 		fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
196 	}
197 
198 	ubh_mark_buffer_dirty (USPI_UBH);
199 	ubh_mark_buffer_dirty (UCPI_UBH);
200 	if (sb->s_flags & MS_SYNCHRONOUS) {
201 		ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
202 		ubh_wait_on_buffer (UCPI_UBH);
203 	}
204 
205 	if (overflow) {
206 		fragment += count;
207 		count = overflow;
208 		goto do_more;
209 	}
210 
211 	sb->s_dirt = 1;
212 	unlock_super (sb);
213 	UFSD(("EXIT\n"))
214 	return;
215 
216 failed:
217 	unlock_super (sb);
218 	UFSD(("EXIT (FAILED)\n"))
219 	return;
220 }
221 
222 
223 
224 #define NULLIFY_FRAGMENTS \
225 	for (i = oldcount; i < newcount; i++) { \
226 		bh = sb_getblk(sb, result + i); \
227 		memset (bh->b_data, 0, sb->s_blocksize); \
228 		mark_buffer_uptodate(bh, 1); \
229 		mark_buffer_dirty (bh); \
230 		if (IS_SYNC(inode)) { \
231 			ll_rw_block (WRITE, 1, &bh); \
232 			wait_on_buffer (bh); \
233 		} \
234 		brelse (bh); \
235 	}
236 
ufs_new_fragments(struct inode * inode,u32 * p,unsigned fragment,unsigned goal,unsigned count,int * err)237 unsigned ufs_new_fragments (struct inode * inode, u32 * p, unsigned fragment,
238 	unsigned goal, unsigned count, int * err )
239 {
240 	struct super_block * sb;
241 	struct ufs_sb_private_info * uspi;
242 	struct ufs_super_block_first * usb1;
243 	struct buffer_head * bh;
244 	unsigned cgno, oldcount, newcount, tmp, request, i, result;
245 
246 	UFSD(("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count))
247 
248 	sb = inode->i_sb;
249 	uspi = sb->u.ufs_sb.s_uspi;
250 	usb1 = ubh_get_usb_first(USPI_UBH);
251 	*err = -ENOSPC;
252 
253 	lock_super (sb);
254 
255 	tmp = fs32_to_cpu(sb, *p);
256 	if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
257 		ufs_warning (sb, "ufs_new_fragments", "internal warning"
258 			" fragment %u, count %u", fragment, count);
259 		count = uspi->s_fpb - ufs_fragnum(fragment);
260 	}
261 	oldcount = ufs_fragnum (fragment);
262 	newcount = oldcount + count;
263 
264 	/*
265 	 * Somebody else has just allocated our fragments
266 	 */
267 	if (oldcount) {
268 		if (!tmp) {
269 			ufs_error (sb, "ufs_new_fragments", "internal error, "
270 				"fragment %u, tmp %u\n", fragment, tmp);
271 			unlock_super (sb);
272 			return (unsigned)-1;
273 		}
274 		if (fragment < inode->u.ufs_i.i_lastfrag) {
275 			UFSD(("EXIT (ALREADY ALLOCATED)\n"))
276 			unlock_super (sb);
277 			return 0;
278 		}
279 	}
280 	else {
281 		if (tmp) {
282 			UFSD(("EXIT (ALREADY ALLOCATED)\n"))
283 			unlock_super(sb);
284 			return 0;
285 		}
286 	}
287 
288 	/*
289 	 * There is not enough space for user on the device
290 	 */
291 	if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(usb1, UFS_MINFREE) <= 0) {
292 		unlock_super (sb);
293 		UFSD(("EXIT (FAILED)\n"))
294 		return 0;
295 	}
296 
297 	if (goal >= uspi->s_size)
298 		goal = 0;
299 	if (goal == 0)
300 		cgno = ufs_inotocg (inode->i_ino);
301 	else
302 		cgno = ufs_dtog (goal);
303 
304 	/*
305 	 * allocate new fragment
306 	 */
307 	if (oldcount == 0) {
308 		result = ufs_alloc_fragments (inode, cgno, goal, count, err);
309 		if (result) {
310 			*p = cpu_to_fs32(sb, result);
311 			*err = 0;
312 			inode->i_blocks += count << uspi->s_nspfshift;
313 			inode->u.ufs_i.i_lastfrag = max_t(u32, inode->u.ufs_i.i_lastfrag, fragment + count);
314 			NULLIFY_FRAGMENTS
315 		}
316 		unlock_super(sb);
317 		UFSD(("EXIT, result %u\n", result))
318 		return result;
319 	}
320 
321 	/*
322 	 * resize block
323 	 */
324 	result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
325 	if (result) {
326 		*err = 0;
327 		inode->i_blocks += count << uspi->s_nspfshift;
328 		inode->u.ufs_i.i_lastfrag = max_t(u32, inode->u.ufs_i.i_lastfrag, fragment + count);
329 		NULLIFY_FRAGMENTS
330 		unlock_super(sb);
331 		UFSD(("EXIT, result %u\n", result))
332 		return result;
333 	}
334 
335 	/*
336 	 * allocate new block and move data
337 	 */
338 	switch (fs32_to_cpu(sb, usb1->fs_optim)) {
339 	    case UFS_OPTSPACE:
340 		request = newcount;
341 		if (uspi->s_minfree < 5 || fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree)
342 		    > uspi->s_dsize * uspi->s_minfree / (2 * 100) )
343 			break;
344 		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
345 		break;
346 	    default:
347 		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
348 
349 	    case UFS_OPTTIME:
350 		request = uspi->s_fpb;
351 		if (fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree) < uspi->s_dsize *
352 		    (uspi->s_minfree - 2) / 100)
353 			break;
354 		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
355 		break;
356 	}
357 	result = ufs_alloc_fragments (inode, cgno, goal, request, err);
358 	if (result) {
359 		for (i = 0; i < oldcount; i++) {
360 			bh = sb_bread(sb, tmp + i);
361 			if(bh)
362 			{
363 				mark_buffer_clean (bh);
364 				bh->b_blocknr = result + i;
365 				mark_buffer_dirty (bh);
366 				if (IS_SYNC(inode)) {
367 					ll_rw_block (WRITE, 1, &bh);
368 					wait_on_buffer (bh);
369 				}
370 				brelse (bh);
371 			}
372 			else
373 			{
374 				printk(KERN_ERR "ufs_new_fragments: bread fail\n");
375 				return 0;
376 			}
377 		}
378 		*p = cpu_to_fs32(sb, result);
379 		*err = 0;
380 		inode->i_blocks += count << uspi->s_nspfshift;
381 		inode->u.ufs_i.i_lastfrag = max_t(u32, inode->u.ufs_i.i_lastfrag, fragment + count);
382 		NULLIFY_FRAGMENTS
383 		unlock_super(sb);
384 		if (newcount < request)
385 			ufs_free_fragments (inode, result + newcount, request - newcount);
386 		ufs_free_fragments (inode, tmp, oldcount);
387 		UFSD(("EXIT, result %u\n", result))
388 		return result;
389 	}
390 
391 	unlock_super(sb);
392 	UFSD(("EXIT (FAILED)\n"))
393 	return 0;
394 }
395 
ufs_add_fragments(struct inode * inode,unsigned fragment,unsigned oldcount,unsigned newcount,int * err)396 unsigned ufs_add_fragments (struct inode * inode, unsigned fragment,
397 	unsigned oldcount, unsigned newcount, int * err)
398 {
399 	struct super_block * sb;
400 	struct ufs_sb_private_info * uspi;
401 	struct ufs_super_block_first * usb1;
402 	struct ufs_cg_private_info * ucpi;
403 	struct ufs_cylinder_group * ucg;
404 	unsigned cgno, fragno, fragoff, count, fragsize, i;
405 
406 	UFSD(("ENTER, fragment %u, oldcount %u, newcount %u\n", fragment, oldcount, newcount))
407 
408 	sb = inode->i_sb;
409 	uspi = sb->u.ufs_sb.s_uspi;
410 	usb1 = ubh_get_usb_first (USPI_UBH);
411 	count = newcount - oldcount;
412 
413 	cgno = ufs_dtog(fragment);
414 	if (sb->fs_cs(cgno).cs_nffree < count)
415 		return 0;
416 	if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
417 		return 0;
418 	ucpi = ufs_load_cylinder (sb, cgno);
419 	if (!ucpi)
420 		return 0;
421 	ucg = ubh_get_ucg (UCPI_UBH);
422 	if (!ufs_cg_chkmagic(sb, ucg)) {
423 		ufs_panic (sb, "ufs_add_fragments",
424 			"internal error, bad magic number on cg %u", cgno);
425 		return 0;
426 	}
427 
428 	fragno = ufs_dtogd (fragment);
429 	fragoff = ufs_fragnum (fragno);
430 	for (i = oldcount; i < newcount; i++)
431 		if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, fragno + i))
432 			return 0;
433 	/*
434 	 * Block can be extended
435 	 */
436 	ucg->cg_time = cpu_to_fs32(sb, CURRENT_TIME);
437 	for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
438 		if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, fragno + i))
439 			break;
440 	fragsize = i - oldcount;
441 	if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
442 		ufs_panic (sb, "ufs_add_fragments",
443 			"internal error or corrupted bitmap on cg %u", cgno);
444 	fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
445 	if (fragsize != count)
446 		fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
447 	for (i = oldcount; i < newcount; i++)
448 		ubh_clrbit (UCPI_UBH, ucpi->c_freeoff, fragno + i);
449 	if(DQUOT_ALLOC_BLOCK(inode, count)) {
450 		*err = -EDQUOT;
451 		return 0;
452 	}
453 
454 	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
455 	fs32_sub(sb, &sb->fs_cs(cgno).cs_nffree, count);
456 	fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count);
457 
458 	ubh_mark_buffer_dirty (USPI_UBH);
459 	ubh_mark_buffer_dirty (UCPI_UBH);
460 	if (sb->s_flags & MS_SYNCHRONOUS) {
461 		ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
462 		ubh_wait_on_buffer (UCPI_UBH);
463 	}
464 	sb->s_dirt = 1;
465 
466 	UFSD(("EXIT, fragment %u\n", fragment))
467 
468 	return fragment;
469 }
470 
471 #define UFS_TEST_FREE_SPACE_CG \
472 	ucg = (struct ufs_cylinder_group *) sb->u.ufs_sb.s_ucg[cgno]->b_data; \
473 	if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
474 		goto cg_found; \
475 	for (k = count; k < uspi->s_fpb; k++) \
476 		if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
477 			goto cg_found;
478 
ufs_alloc_fragments(struct inode * inode,unsigned cgno,unsigned goal,unsigned count,int * err)479 unsigned ufs_alloc_fragments (struct inode * inode, unsigned cgno,
480 	unsigned goal, unsigned count, int * err)
481 {
482 	struct super_block * sb;
483 	struct ufs_sb_private_info * uspi;
484 	struct ufs_super_block_first * usb1;
485 	struct ufs_cg_private_info * ucpi;
486 	struct ufs_cylinder_group * ucg;
487 	unsigned oldcg, i, j, k, result, allocsize;
488 
489 	UFSD(("ENTER, ino %lu, cgno %u, goal %u, count %u\n", inode->i_ino, cgno, goal, count))
490 
491 	sb = inode->i_sb;
492 	uspi = sb->u.ufs_sb.s_uspi;
493 	usb1 = ubh_get_usb_first(USPI_UBH);
494 	oldcg = cgno;
495 
496 	/*
497 	 * 1. searching on preferred cylinder group
498 	 */
499 	UFS_TEST_FREE_SPACE_CG
500 
501 	/*
502 	 * 2. quadratic rehash
503 	 */
504 	for (j = 1; j < uspi->s_ncg; j *= 2) {
505 		cgno += j;
506 		if (cgno >= uspi->s_ncg)
507 			cgno -= uspi->s_ncg;
508 		UFS_TEST_FREE_SPACE_CG
509 	}
510 
511 	/*
512 	 * 3. brute force search
513 	 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
514 	 */
515 	cgno = (oldcg + 1) % uspi->s_ncg;
516 	for (j = 2; j < uspi->s_ncg; j++) {
517 		cgno++;
518 		if (cgno >= uspi->s_ncg)
519 			cgno = 0;
520 		UFS_TEST_FREE_SPACE_CG
521 	}
522 
523 	UFSD(("EXIT (FAILED)\n"))
524 	return 0;
525 
526 cg_found:
527 	ucpi = ufs_load_cylinder (sb, cgno);
528 	if (!ucpi)
529 		return 0;
530 	ucg = ubh_get_ucg (UCPI_UBH);
531 	if (!ufs_cg_chkmagic(sb, ucg))
532 		ufs_panic (sb, "ufs_alloc_fragments",
533 			"internal error, bad magic number on cg %u", cgno);
534 	ucg->cg_time = cpu_to_fs32(sb, CURRENT_TIME);
535 
536 	if (count == uspi->s_fpb) {
537 		result = ufs_alloccg_block (inode, ucpi, goal, err);
538 		if (result == (unsigned)-1)
539 			return 0;
540 		goto succed;
541 	}
542 
543 	for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
544 		if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
545 			break;
546 
547 	if (allocsize == uspi->s_fpb) {
548 		result = ufs_alloccg_block (inode, ucpi, goal, err);
549 		if (result == (unsigned)-1)
550 			return 0;
551 		goal = ufs_dtogd (result);
552 		for (i = count; i < uspi->s_fpb; i++)
553 			ubh_setbit (UCPI_UBH, ucpi->c_freeoff, goal + i);
554 		i = uspi->s_fpb - count;
555 		DQUOT_FREE_BLOCK(inode, i);
556 
557 		fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
558 		fs32_add(sb, &usb1->fs_cstotal.cs_nffree, i);
559 		fs32_add(sb, &sb->fs_cs(cgno).cs_nffree, i);
560 		fs32_add(sb, &ucg->cg_frsum[i], 1);
561 		goto succed;
562 	}
563 
564 	result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
565 	if (result == (unsigned)-1)
566 		return 0;
567 	if(DQUOT_ALLOC_BLOCK(inode, count)) {
568 		*err = -EDQUOT;
569 		return 0;
570 	}
571 	for (i = 0; i < count; i++)
572 		ubh_clrbit (UCPI_UBH, ucpi->c_freeoff, result + i);
573 
574 	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
575 	fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count);
576 	fs32_sub(sb, &sb->fs_cs(cgno).cs_nffree, count);
577 	fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
578 
579 	if (count != allocsize)
580 		fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
581 
582 succed:
583 	ubh_mark_buffer_dirty (USPI_UBH);
584 	ubh_mark_buffer_dirty (UCPI_UBH);
585 	if (sb->s_flags & MS_SYNCHRONOUS) {
586 		ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
587 		ubh_wait_on_buffer (UCPI_UBH);
588 	}
589 	sb->s_dirt = 1;
590 
591 	result += cgno * uspi->s_fpg;
592 	UFSD(("EXIT3, result %u\n", result))
593 	return result;
594 }
595 
ufs_alloccg_block(struct inode * inode,struct ufs_cg_private_info * ucpi,unsigned goal,int * err)596 unsigned ufs_alloccg_block (struct inode * inode,
597 	struct ufs_cg_private_info * ucpi, unsigned goal, int * err)
598 {
599 	struct super_block * sb;
600 	struct ufs_sb_private_info * uspi;
601 	struct ufs_super_block_first * usb1;
602 	struct ufs_cylinder_group * ucg;
603 	unsigned result, cylno, blkno;
604 
605 	UFSD(("ENTER, goal %u\n", goal))
606 
607 	sb = inode->i_sb;
608 	uspi = sb->u.ufs_sb.s_uspi;
609 	usb1 = ubh_get_usb_first(USPI_UBH);
610 	ucg = ubh_get_ucg(UCPI_UBH);
611 
612 	if (goal == 0) {
613 		goal = ucpi->c_rotor;
614 		goto norot;
615 	}
616 	goal = ufs_blknum (goal);
617 	goal = ufs_dtogd (goal);
618 
619 	/*
620 	 * If the requested block is available, use it.
621 	 */
622 	if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, ufs_fragstoblks(goal))) {
623 		result = goal;
624 		goto gotit;
625 	}
626 
627 norot:
628 	result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
629 	if (result == (unsigned)-1)
630 		return (unsigned)-1;
631 	ucpi->c_rotor = result;
632 gotit:
633 	blkno = ufs_fragstoblks(result);
634 	ubh_clrblock (UCPI_UBH, ucpi->c_freeoff, blkno);
635 	if ((sb->u.ufs_sb.s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
636 		ufs_clusteracct (sb, ucpi, blkno, -1);
637 	if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
638 		*err = -EDQUOT;
639 		return (unsigned)-1;
640 	}
641 
642 	fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
643 	fs32_sub(sb, &usb1->fs_cstotal.cs_nbfree, 1);
644 	fs32_sub(sb, &sb->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
645 	cylno = ufs_cbtocylno(result);
646 	fs16_sub(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(result)), 1);
647 	fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
648 
649 	UFSD(("EXIT, result %u\n", result))
650 
651 	return result;
652 }
653 
ufs_bitmap_search(struct super_block * sb,struct ufs_cg_private_info * ucpi,unsigned goal,unsigned count)654 unsigned ufs_bitmap_search (struct super_block * sb,
655 	struct ufs_cg_private_info * ucpi, unsigned goal, unsigned count)
656 {
657 	struct ufs_sb_private_info * uspi;
658 	struct ufs_super_block_first * usb1;
659 	struct ufs_cylinder_group * ucg;
660 	unsigned start, length, location, result;
661 	unsigned possition, fragsize, blockmap, mask;
662 
663 	UFSD(("ENTER, cg %u, goal %u, count %u\n", ucpi->c_cgx, goal, count))
664 
665 	uspi = sb->u.ufs_sb.s_uspi;
666 	usb1 = ubh_get_usb_first (USPI_UBH);
667 	ucg = ubh_get_ucg(UCPI_UBH);
668 
669 	if (goal)
670 		start = ufs_dtogd(goal) >> 3;
671 	else
672 		start = ucpi->c_frotor >> 3;
673 
674 	length = ((uspi->s_fpg + 7) >> 3) - start;
675 	location = ubh_scanc(UCPI_UBH, ucpi->c_freeoff + start, length,
676 		(uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
677 		1 << (count - 1 + (uspi->s_fpb & 7)));
678 	if (location == 0) {
679 		length = start + 1;
680 		location = ubh_scanc(UCPI_UBH, ucpi->c_freeoff, length,
681 			(uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
682 			1 << (count - 1 + (uspi->s_fpb & 7)));
683 		if (location == 0) {
684 			ufs_error (sb, "ufs_bitmap_search",
685 			"bitmap corrupted on cg %u, start %u, length %u, count %u, freeoff %u\n",
686 			ucpi->c_cgx, start, length, count, ucpi->c_freeoff);
687 			return (unsigned)-1;
688 		}
689 		start = 0;
690 	}
691 	result = (start + length - location) << 3;
692 	ucpi->c_frotor = result;
693 
694 	/*
695 	 * found the byte in the map
696 	 */
697 	blockmap = ubh_blkmap(UCPI_UBH, ucpi->c_freeoff, result);
698 	fragsize = 0;
699 	for (possition = 0, mask = 1; possition < 8; possition++, mask <<= 1) {
700 		if (blockmap & mask) {
701 			if (!(possition & uspi->s_fpbmask))
702 				fragsize = 1;
703 			else
704 				fragsize++;
705 		}
706 		else {
707 			if (fragsize == count) {
708 				result += possition - count;
709 				UFSD(("EXIT, result %u\n", result))
710 				return result;
711 			}
712 			fragsize = 0;
713 		}
714 	}
715 	if (fragsize == count) {
716 		result += possition - count;
717 		UFSD(("EXIT, result %u\n", result))
718 		return result;
719 	}
720 	ufs_error (sb, "ufs_bitmap_search", "block not in map on cg %u\n", ucpi->c_cgx);
721 	UFSD(("EXIT (FAILED)\n"))
722 	return (unsigned)-1;
723 }
724 
ufs_clusteracct(struct super_block * sb,struct ufs_cg_private_info * ucpi,unsigned blkno,int cnt)725 void ufs_clusteracct(struct super_block * sb,
726 	struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
727 {
728 	struct ufs_sb_private_info * uspi;
729 	int i, start, end, forw, back;
730 
731 	uspi = sb->u.ufs_sb.s_uspi;
732 	if (uspi->s_contigsumsize <= 0)
733 		return;
734 
735 	if (cnt > 0)
736 		ubh_setbit(UCPI_UBH, ucpi->c_clusteroff, blkno);
737 	else
738 		ubh_clrbit(UCPI_UBH, ucpi->c_clusteroff, blkno);
739 
740 	/*
741 	 * Find the size of the cluster going forward.
742 	 */
743 	start = blkno + 1;
744 	end = start + uspi->s_contigsumsize;
745 	if ( end >= ucpi->c_nclusterblks)
746 		end = ucpi->c_nclusterblks;
747 	i = ubh_find_next_zero_bit (UCPI_UBH, ucpi->c_clusteroff, end, start);
748 	if (i > end)
749 		i = end;
750 	forw = i - start;
751 
752 	/*
753 	 * Find the size of the cluster going backward.
754 	 */
755 	start = blkno - 1;
756 	end = start - uspi->s_contigsumsize;
757 	if (end < 0 )
758 		end = -1;
759 	i = ubh_find_last_zero_bit (UCPI_UBH, ucpi->c_clusteroff, start, end);
760 	if ( i < end)
761 		i = end;
762 	back = start - i;
763 
764 	/*
765 	 * Account for old cluster and the possibly new forward and
766 	 * back clusters.
767 	 */
768 	i = back + forw + 1;
769 	if (i > uspi->s_contigsumsize)
770 		i = uspi->s_contigsumsize;
771 	fs32_add(sb, (u32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (i << 2)), cnt);
772 	if (back > 0)
773 		fs32_sub(sb, (u32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (back << 2)), cnt);
774 	if (forw > 0)
775 		fs32_sub(sb, (u32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (forw << 2)), cnt);
776 }
777 
778 
779 static unsigned char ufs_fragtable_8fpb[] = {
780 	0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
781 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
782 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
783 	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
784 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
785 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
786 	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
787 	0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
788 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
789 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
790 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
791 	0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
792 	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
793 	0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
794 	0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
795 	0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
796 };
797 
798 static unsigned char ufs_fragtable_other[] = {
799 	0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
800 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
801 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
802 	0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
803 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
804 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
805 	0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
806 	0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
807 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
808 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
809 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
810 	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
811 	0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
812 	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E,	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
813 	0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
814 	0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
815 };
816