1 /*
2  *  linux/fs/hpfs/alloc.c
3  *
4  *  Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
5  *
6  *  HPFS bitmap operations
7  */
8 
9 #include "hpfs_fn.h"
10 
11 /*
12  * Check if a sector is allocated in bitmap
13  * This is really slow. Turned on only if chk==2
14  */
15 
chk_if_allocated(struct super_block * s,secno sec,char * msg)16 static int chk_if_allocated(struct super_block *s, secno sec, char *msg)
17 {
18 	struct quad_buffer_head qbh;
19 	u32 *bmp;
20 	if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "chk"))) goto fail;
21 	if ((cpu_to_le32(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f)) & 1) {
22 		hpfs_error(s, "sector '%s' - %08x not allocated in bitmap", msg, sec);
23 		goto fail1;
24 	}
25 	hpfs_brelse4(&qbh);
26 	if (sec >= hpfs_sb(s)->sb_dirband_start && sec < hpfs_sb(s)->sb_dirband_start + hpfs_sb(s)->sb_dirband_size) {
27 		unsigned ssec = (sec - hpfs_sb(s)->sb_dirband_start) / 4;
28 		if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) goto fail;
29 		if ((le32_to_cpu(bmp[ssec >> 5]) >> (ssec & 0x1f)) & 1) {
30 			hpfs_error(s, "sector '%s' - %08x not allocated in directory bitmap", msg, sec);
31 			goto fail1;
32 		}
33 		hpfs_brelse4(&qbh);
34 	}
35 	return 0;
36 	fail1:
37 	hpfs_brelse4(&qbh);
38 	fail:
39 	return 1;
40 }
41 
42 /*
43  * Check if sector(s) have proper number and additionally check if they're
44  * allocated in bitmap.
45  */
46 
hpfs_chk_sectors(struct super_block * s,secno start,int len,char * msg)47 int hpfs_chk_sectors(struct super_block *s, secno start, int len, char *msg)
48 {
49 	if (start + len < start || start < 0x12 ||
50 	    start + len > hpfs_sb(s)->sb_fs_size) {
51 	    	hpfs_error(s, "sector(s) '%s' badly placed at %08x", msg, start);
52 		return 1;
53 	}
54 	if (hpfs_sb(s)->sb_chk>=2) {
55 		int i;
56 		for (i = 0; i < len; i++)
57 			if (chk_if_allocated(s, start + i, msg)) return 1;
58 	}
59 	return 0;
60 }
61 
alloc_in_bmp(struct super_block * s,secno near,unsigned n,unsigned forward)62 static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigned forward)
63 {
64 	struct quad_buffer_head qbh;
65 	unsigned *bmp;
66 	unsigned bs = near & ~0x3fff;
67 	unsigned nr = (near & 0x3fff) & ~(n - 1);
68 	/*unsigned mnr;*/
69 	unsigned i, q;
70 	int a, b;
71 	secno ret = 0;
72 	if (n != 1 && n != 4) {
73 		hpfs_error(s, "Bad allocation size: %d", n);
74 		return 0;
75 	}
76 	if (bs != ~0x3fff) {
77 		if (!(bmp = hpfs_map_bitmap(s, near >> 14, &qbh, "aib"))) goto uls;
78 	} else {
79 		if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) goto uls;
80 	}
81 	if (!tstbits(bmp, nr, n + forward)) {
82 		ret = bs + nr;
83 		goto rt;
84 	}
85 	q = nr + n; b = 0;
86 	while ((a = tstbits(bmp, q, n + forward)) != 0) {
87 		q += a;
88 		if (n != 1) q = ((q-1)&~(n-1))+n;
89 		if (!b) {
90 			if (q>>5 != nr>>5) {
91 				b = 1;
92 				q = nr & 0x1f;
93 			}
94 		} else if (q > nr) break;
95 	}
96 	if (!a) {
97 		ret = bs + q;
98 		goto rt;
99 	}
100 	nr >>= 5;
101 	/*for (i = nr + 1; i != nr; i++, i &= 0x1ff) */
102 	i = nr;
103 	do {
104 		if (!le32_to_cpu(bmp[i])) goto cont;
105 		if (n + forward >= 0x3f && le32_to_cpu(bmp[i]) != 0xffffffff) goto cont;
106 		q = i<<5;
107 		if (i > 0) {
108 			unsigned k = le32_to_cpu(bmp[i-1]);
109 			while (k & 0x80000000) {
110 				q--; k <<= 1;
111 			}
112 		}
113 		if (n != 1) q = ((q-1)&~(n-1))+n;
114 		while ((a = tstbits(bmp, q, n + forward)) != 0) {
115 			q += a;
116 			if (n != 1) q = ((q-1)&~(n-1))+n;
117 			if (q>>5 > i) break;
118 		}
119 		if (!a) {
120 			ret = bs + q;
121 			goto rt;
122 		}
123 		cont:
124 		i++, i &= 0x1ff;
125 	} while (i != nr);
126 	rt:
127 	if (ret) {
128 		if (hpfs_sb(s)->sb_chk && ((ret >> 14) != (bs >> 14) || (le32_to_cpu(bmp[(ret & 0x3fff) >> 5]) | ~(((1 << n) - 1) << (ret & 0x1f))) != 0xffffffff)) {
129 			hpfs_error(s, "Allocation doesn't work! Wanted %d, allocated at %08x", n, ret);
130 			ret = 0;
131 			goto b;
132 		}
133 		bmp[(ret & 0x3fff) >> 5] &= cpu_to_le32(~(((1 << n) - 1) << (ret & 0x1f)));
134 		hpfs_mark_4buffers_dirty(&qbh);
135 	}
136 	b:
137 	hpfs_brelse4(&qbh);
138 	uls:
139 	return ret;
140 }
141 
142 /*
143  * Allocation strategy:	1) search place near the sector specified
144  *			2) search bitmap where free sectors last found
145  *			3) search all bitmaps
146  *			4) search all bitmaps ignoring number of pre-allocated
147  *				sectors
148  */
149 
hpfs_alloc_sector(struct super_block * s,secno near,unsigned n,int forward)150 secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forward)
151 {
152 	secno sec;
153 	int i;
154 	unsigned n_bmps;
155 	struct hpfs_sb_info *sbi = hpfs_sb(s);
156 	int f_p = 0;
157 	int near_bmp;
158 	if (forward < 0) {
159 		forward = -forward;
160 		f_p = 1;
161 	}
162 	n_bmps = (sbi->sb_fs_size + 0x4000 - 1) >> 14;
163 	if (near && near < sbi->sb_fs_size) {
164 		if ((sec = alloc_in_bmp(s, near, n, f_p ? forward : forward/4))) goto ret;
165 		near_bmp = near >> 14;
166 	} else near_bmp = n_bmps / 2;
167 	/*
168 	if (b != -1) {
169 		if ((sec = alloc_in_bmp(s, b<<14, n, f_p ? forward : forward/2))) {
170 			b &= 0x0fffffff;
171 			goto ret;
172 		}
173 		if (b > 0x10000000) if ((sec = alloc_in_bmp(s, (b&0xfffffff)<<14, n, f_p ? forward : 0))) goto ret;
174 	*/
175 	if (!f_p) if (forward > sbi->sb_max_fwd_alloc) forward = sbi->sb_max_fwd_alloc;
176 	less_fwd:
177 	for (i = 0; i < n_bmps; i++) {
178 		if (near_bmp+i < n_bmps && ((sec = alloc_in_bmp(s, (near_bmp+i) << 14, n, forward)))) {
179 			sbi->sb_c_bitmap = near_bmp+i;
180 			goto ret;
181 		}
182 		if (!forward) {
183 			if (near_bmp-i-1 >= 0 && ((sec = alloc_in_bmp(s, (near_bmp-i-1) << 14, n, forward)))) {
184 				sbi->sb_c_bitmap = near_bmp-i-1;
185 				goto ret;
186 			}
187 		} else {
188 			if (near_bmp+i >= n_bmps && ((sec = alloc_in_bmp(s, (near_bmp+i-n_bmps) << 14, n, forward)))) {
189 				sbi->sb_c_bitmap = near_bmp+i-n_bmps;
190 				goto ret;
191 			}
192 		}
193 		if (i == 1 && sbi->sb_c_bitmap != -1 && ((sec = alloc_in_bmp(s, (sbi->sb_c_bitmap) << 14, n, forward)))) {
194 			goto ret;
195 		}
196 	}
197 	if (!f_p) {
198 		if (forward) {
199 			sbi->sb_max_fwd_alloc = forward * 3 / 4;
200 			forward /= 2;
201 			goto less_fwd;
202 		}
203 	}
204 	sec = 0;
205 	ret:
206 	if (sec && f_p) {
207 		for (i = 0; i < forward; i++) {
208 			if (!hpfs_alloc_if_possible(s, sec + i + 1)) {
209 				hpfs_error(s, "Prealloc doesn't work! Wanted %d, allocated at %08x, can't allocate %d", forward, sec, i);
210 				sec = 0;
211 				break;
212 			}
213 		}
214 	}
215 	return sec;
216 }
217 
alloc_in_dirband(struct super_block * s,secno near)218 static secno alloc_in_dirband(struct super_block *s, secno near)
219 {
220 	unsigned nr = near;
221 	secno sec;
222 	struct hpfs_sb_info *sbi = hpfs_sb(s);
223 	if (nr < sbi->sb_dirband_start)
224 		nr = sbi->sb_dirband_start;
225 	if (nr >= sbi->sb_dirband_start + sbi->sb_dirband_size)
226 		nr = sbi->sb_dirband_start + sbi->sb_dirband_size - 4;
227 	nr -= sbi->sb_dirband_start;
228 	nr >>= 2;
229 	sec = alloc_in_bmp(s, (~0x3fff) | nr, 1, 0);
230 	if (!sec) return 0;
231 	return ((sec & 0x3fff) << 2) + sbi->sb_dirband_start;
232 }
233 
234 /* Alloc sector if it's free */
235 
hpfs_alloc_if_possible(struct super_block * s,secno sec)236 int hpfs_alloc_if_possible(struct super_block *s, secno sec)
237 {
238 	struct quad_buffer_head qbh;
239 	u32 *bmp;
240 	if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "aip"))) goto end;
241 	if (le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) & (1 << (sec & 0x1f))) {
242 		bmp[(sec & 0x3fff) >> 5] &= cpu_to_le32(~(1 << (sec & 0x1f)));
243 		hpfs_mark_4buffers_dirty(&qbh);
244 		hpfs_brelse4(&qbh);
245 		return 1;
246 	}
247 	hpfs_brelse4(&qbh);
248 	end:
249 	return 0;
250 }
251 
252 /* Free sectors in bitmaps */
253 
hpfs_free_sectors(struct super_block * s,secno sec,unsigned n)254 void hpfs_free_sectors(struct super_block *s, secno sec, unsigned n)
255 {
256 	struct quad_buffer_head qbh;
257 	u32 *bmp;
258 	struct hpfs_sb_info *sbi = hpfs_sb(s);
259 	/*printk("2 - ");*/
260 	if (!n) return;
261 	if (sec < 0x12) {
262 		hpfs_error(s, "Trying to free reserved sector %08x", sec);
263 		return;
264 	}
265 	sbi->sb_max_fwd_alloc += n > 0xffff ? 0xffff : n;
266 	if (sbi->sb_max_fwd_alloc > 0xffffff) sbi->sb_max_fwd_alloc = 0xffffff;
267 	new_map:
268 	if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "free"))) {
269 		return;
270 	}
271 	new_tst:
272 	if ((le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f) & 1)) {
273 		hpfs_error(s, "sector %08x not allocated", sec);
274 		hpfs_brelse4(&qbh);
275 		return;
276 	}
277 	bmp[(sec & 0x3fff) >> 5] |= cpu_to_le32(1 << (sec & 0x1f));
278 	if (!--n) {
279 		hpfs_mark_4buffers_dirty(&qbh);
280 		hpfs_brelse4(&qbh);
281 		return;
282 	}
283 	if (!(++sec & 0x3fff)) {
284 		hpfs_mark_4buffers_dirty(&qbh);
285 		hpfs_brelse4(&qbh);
286 		goto new_map;
287 	}
288 	goto new_tst;
289 }
290 
291 /*
292  * Check if there are at least n free dnodes on the filesystem.
293  * Called before adding to dnode. If we run out of space while
294  * splitting dnodes, it would corrupt dnode tree.
295  */
296 
hpfs_check_free_dnodes(struct super_block * s,int n)297 int hpfs_check_free_dnodes(struct super_block *s, int n)
298 {
299 	int n_bmps = (hpfs_sb(s)->sb_fs_size + 0x4000 - 1) >> 14;
300 	int b = hpfs_sb(s)->sb_c_bitmap & 0x0fffffff;
301 	int i, j;
302 	u32 *bmp;
303 	struct quad_buffer_head qbh;
304 	if ((bmp = hpfs_map_dnode_bitmap(s, &qbh))) {
305 		for (j = 0; j < 512; j++) {
306 			unsigned k;
307 			if (!le32_to_cpu(bmp[j])) continue;
308 			for (k = le32_to_cpu(bmp[j]); k; k >>= 1) if (k & 1) if (!--n) {
309 				hpfs_brelse4(&qbh);
310 				return 0;
311 			}
312 		}
313 	}
314 	hpfs_brelse4(&qbh);
315 	i = 0;
316 	if (hpfs_sb(s)->sb_c_bitmap != -1) {
317 		bmp = hpfs_map_bitmap(s, b, &qbh, "chkdn1");
318 		goto chk_bmp;
319 	}
320 	chk_next:
321 	if (i == b) i++;
322 	if (i >= n_bmps) return 1;
323 	bmp = hpfs_map_bitmap(s, i, &qbh, "chkdn2");
324 	chk_bmp:
325 	if (bmp) {
326 		for (j = 0; j < 512; j++) {
327 			u32 k;
328 			if (!le32_to_cpu(bmp[j])) continue;
329 			for (k = 0xf; k; k <<= 4)
330 				if ((le32_to_cpu(bmp[j]) & k) == k) {
331 					if (!--n) {
332 						hpfs_brelse4(&qbh);
333 						return 0;
334 					}
335 				}
336 		}
337 		hpfs_brelse4(&qbh);
338 	}
339 	i++;
340 	goto chk_next;
341 }
342 
hpfs_free_dnode(struct super_block * s,dnode_secno dno)343 void hpfs_free_dnode(struct super_block *s, dnode_secno dno)
344 {
345 	if (hpfs_sb(s)->sb_chk) if (dno & 3) {
346 		hpfs_error(s, "hpfs_free_dnode: dnode %08x not aligned", dno);
347 		return;
348 	}
349 	if (dno < hpfs_sb(s)->sb_dirband_start ||
350 	    dno >= hpfs_sb(s)->sb_dirband_start + hpfs_sb(s)->sb_dirband_size) {
351 		hpfs_free_sectors(s, dno, 4);
352 	} else {
353 		struct quad_buffer_head qbh;
354 		u32 *bmp;
355 		unsigned ssec = (dno - hpfs_sb(s)->sb_dirband_start) / 4;
356 		if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) {
357 			return;
358 		}
359 		bmp[ssec >> 5] |= cpu_to_le32(1 << (ssec & 0x1f));
360 		hpfs_mark_4buffers_dirty(&qbh);
361 		hpfs_brelse4(&qbh);
362 	}
363 }
364 
hpfs_alloc_dnode(struct super_block * s,secno near,dnode_secno * dno,struct quad_buffer_head * qbh)365 struct dnode *hpfs_alloc_dnode(struct super_block *s, secno near,
366 			 dnode_secno *dno, struct quad_buffer_head *qbh)
367 {
368 	struct dnode *d;
369 	if (hpfs_count_one_bitmap(s, hpfs_sb(s)->sb_dmap) > FREE_DNODES_ADD) {
370 		if (!(*dno = alloc_in_dirband(s, near)))
371 			if (!(*dno = hpfs_alloc_sector(s, near, 4, 0))) return NULL;
372 	} else {
373 		if (!(*dno = hpfs_alloc_sector(s, near, 4, 0)))
374 			if (!(*dno = alloc_in_dirband(s, near))) return NULL;
375 	}
376 	if (!(d = hpfs_get_4sectors(s, *dno, qbh))) {
377 		hpfs_free_dnode(s, *dno);
378 		return NULL;
379 	}
380 	memset(d, 0, 2048);
381 	d->magic = cpu_to_le32(DNODE_MAGIC);
382 	d->first_free = cpu_to_le32(52);
383 	d->dirent[0] = 32;
384 	d->dirent[2] = 8;
385 	d->dirent[30] = 1;
386 	d->dirent[31] = 255;
387 	d->self = cpu_to_le32(*dno);
388 	return d;
389 }
390 
hpfs_alloc_fnode(struct super_block * s,secno near,fnode_secno * fno,struct buffer_head ** bh)391 struct fnode *hpfs_alloc_fnode(struct super_block *s, secno near, fnode_secno *fno,
392 			  struct buffer_head **bh)
393 {
394 	struct fnode *f;
395 	if (!(*fno = hpfs_alloc_sector(s, near, 1, FNODE_ALLOC_FWD))) return NULL;
396 	if (!(f = hpfs_get_sector(s, *fno, bh))) {
397 		hpfs_free_sectors(s, *fno, 1);
398 		return NULL;
399 	}
400 	memset(f, 0, 512);
401 	f->magic = cpu_to_le32(FNODE_MAGIC);
402 	f->ea_offs = cpu_to_le16(0xc4);
403 	f->btree.n_free_nodes = 8;
404 	f->btree.first_free = cpu_to_le16(8);
405 	return f;
406 }
407 
hpfs_alloc_anode(struct super_block * s,secno near,anode_secno * ano,struct buffer_head ** bh)408 struct anode *hpfs_alloc_anode(struct super_block *s, secno near, anode_secno *ano,
409 			  struct buffer_head **bh)
410 {
411 	struct anode *a;
412 	if (!(*ano = hpfs_alloc_sector(s, near, 1, ANODE_ALLOC_FWD))) return NULL;
413 	if (!(a = hpfs_get_sector(s, *ano, bh))) {
414 		hpfs_free_sectors(s, *ano, 1);
415 		return NULL;
416 	}
417 	memset(a, 0, 512);
418 	a->magic = cpu_to_le32(ANODE_MAGIC);
419 	a->self = cpu_to_le32(*ano);
420 	a->btree.n_free_nodes = 40;
421 	a->btree.n_used_nodes = 0;
422 	a->btree.first_free = cpu_to_le16(8);
423 	return a;
424 }
425