1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/affs/bitmap.c
4 *
5 * (c) 1996 Hans-Joachim Widmaier
6 *
7 * bitmap.c contains the code that handles all bitmap related stuff -
8 * block allocation, deallocation, calculation of free space.
9 */
10
11 #include <linux/slab.h>
12 #include "affs.h"
13
14 u32
affs_count_free_blocks(struct super_block * sb)15 affs_count_free_blocks(struct super_block *sb)
16 {
17 struct affs_bm_info *bm;
18 u32 free;
19 int i;
20
21 pr_debug("%s()\n", __func__);
22
23 if (sb_rdonly(sb))
24 return 0;
25
26 mutex_lock(&AFFS_SB(sb)->s_bmlock);
27
28 bm = AFFS_SB(sb)->s_bitmap;
29 free = 0;
30 for (i = AFFS_SB(sb)->s_bmap_count; i > 0; bm++, i--)
31 free += bm->bm_free;
32
33 mutex_unlock(&AFFS_SB(sb)->s_bmlock);
34
35 return free;
36 }
37
38 void
affs_free_block(struct super_block * sb,u32 block)39 affs_free_block(struct super_block *sb, u32 block)
40 {
41 struct affs_sb_info *sbi = AFFS_SB(sb);
42 struct affs_bm_info *bm;
43 struct buffer_head *bh;
44 u32 blk, bmap, bit, mask, tmp;
45 __be32 *data;
46
47 pr_debug("%s(%u)\n", __func__, block);
48
49 if (block > sbi->s_partition_size)
50 goto err_range;
51
52 blk = block - sbi->s_reserved;
53 bmap = blk / sbi->s_bmap_bits;
54 bit = blk % sbi->s_bmap_bits;
55 bm = &sbi->s_bitmap[bmap];
56
57 mutex_lock(&sbi->s_bmlock);
58
59 bh = sbi->s_bmap_bh;
60 if (sbi->s_last_bmap != bmap) {
61 affs_brelse(bh);
62 bh = affs_bread(sb, bm->bm_key);
63 if (!bh)
64 goto err_bh_read;
65 sbi->s_bmap_bh = bh;
66 sbi->s_last_bmap = bmap;
67 }
68
69 mask = 1 << (bit & 31);
70 data = (__be32 *)bh->b_data + bit / 32 + 1;
71
72 /* mark block free */
73 tmp = be32_to_cpu(*data);
74 if (tmp & mask)
75 goto err_free;
76 *data = cpu_to_be32(tmp | mask);
77
78 /* fix checksum */
79 tmp = be32_to_cpu(*(__be32 *)bh->b_data);
80 *(__be32 *)bh->b_data = cpu_to_be32(tmp - mask);
81
82 mark_buffer_dirty(bh);
83 affs_mark_sb_dirty(sb);
84 bm->bm_free++;
85
86 mutex_unlock(&sbi->s_bmlock);
87 return;
88
89 err_free:
90 affs_warning(sb,"affs_free_block","Trying to free block %u which is already free", block);
91 mutex_unlock(&sbi->s_bmlock);
92 return;
93
94 err_bh_read:
95 affs_error(sb,"affs_free_block","Cannot read bitmap block %u", bm->bm_key);
96 sbi->s_bmap_bh = NULL;
97 sbi->s_last_bmap = ~0;
98 mutex_unlock(&sbi->s_bmlock);
99 return;
100
101 err_range:
102 affs_error(sb, "affs_free_block","Block %u outside partition", block);
103 }
104
105 /*
106 * Allocate a block in the given allocation zone.
107 * Since we have to byte-swap the bitmap on little-endian
108 * machines, this is rather expensive. Therefore we will
109 * preallocate up to 16 blocks from the same word, if
110 * possible. We are not doing preallocations in the
111 * header zone, though.
112 */
113
114 u32
affs_alloc_block(struct inode * inode,u32 goal)115 affs_alloc_block(struct inode *inode, u32 goal)
116 {
117 struct super_block *sb;
118 struct affs_sb_info *sbi;
119 struct affs_bm_info *bm;
120 struct buffer_head *bh;
121 __be32 *data, *enddata;
122 u32 blk, bmap, bit, mask, mask2, tmp;
123 int i;
124
125 sb = inode->i_sb;
126 sbi = AFFS_SB(sb);
127
128 pr_debug("balloc(inode=%lu,goal=%u): ", inode->i_ino, goal);
129
130 if (AFFS_I(inode)->i_pa_cnt) {
131 pr_debug("%d\n", AFFS_I(inode)->i_lastalloc+1);
132 AFFS_I(inode)->i_pa_cnt--;
133 return ++AFFS_I(inode)->i_lastalloc;
134 }
135
136 if (!goal || goal > sbi->s_partition_size) {
137 if (goal)
138 affs_warning(sb, "affs_balloc", "invalid goal %d", goal);
139 //if (!AFFS_I(inode)->i_last_block)
140 // affs_warning(sb, "affs_balloc", "no last alloc block");
141 goal = sbi->s_reserved;
142 }
143
144 blk = goal - sbi->s_reserved;
145 bmap = blk / sbi->s_bmap_bits;
146 bm = &sbi->s_bitmap[bmap];
147
148 mutex_lock(&sbi->s_bmlock);
149
150 if (bm->bm_free)
151 goto find_bmap_bit;
152
153 find_bmap:
154 /* search for the next bmap buffer with free bits */
155 i = sbi->s_bmap_count;
156 do {
157 if (--i < 0)
158 goto err_full;
159 bmap++;
160 bm++;
161 if (bmap < sbi->s_bmap_count)
162 continue;
163 /* restart search at zero */
164 bmap = 0;
165 bm = sbi->s_bitmap;
166 } while (!bm->bm_free);
167 blk = bmap * sbi->s_bmap_bits;
168
169 find_bmap_bit:
170
171 bh = sbi->s_bmap_bh;
172 if (sbi->s_last_bmap != bmap) {
173 affs_brelse(bh);
174 bh = affs_bread(sb, bm->bm_key);
175 if (!bh)
176 goto err_bh_read;
177 sbi->s_bmap_bh = bh;
178 sbi->s_last_bmap = bmap;
179 }
180
181 /* find an unused block in this bitmap block */
182 bit = blk % sbi->s_bmap_bits;
183 data = (__be32 *)bh->b_data + bit / 32 + 1;
184 enddata = (__be32 *)((u8 *)bh->b_data + sb->s_blocksize);
185 mask = ~0UL << (bit & 31);
186 blk &= ~31UL;
187
188 tmp = be32_to_cpu(*data);
189 if (tmp & mask)
190 goto find_bit;
191
192 /* scan the rest of the buffer */
193 do {
194 blk += 32;
195 if (++data >= enddata)
196 /* didn't find something, can only happen
197 * if scan didn't start at 0, try next bmap
198 */
199 goto find_bmap;
200 } while (!*data);
201 tmp = be32_to_cpu(*data);
202 mask = ~0;
203
204 find_bit:
205 /* finally look for a free bit in the word */
206 bit = ffs(tmp & mask) - 1;
207 blk += bit + sbi->s_reserved;
208 mask2 = mask = 1 << (bit & 31);
209 AFFS_I(inode)->i_lastalloc = blk;
210
211 /* prealloc as much as possible within this word */
212 while ((mask2 <<= 1)) {
213 if (!(tmp & mask2))
214 break;
215 AFFS_I(inode)->i_pa_cnt++;
216 mask |= mask2;
217 }
218 bm->bm_free -= AFFS_I(inode)->i_pa_cnt + 1;
219
220 *data = cpu_to_be32(tmp & ~mask);
221
222 /* fix checksum */
223 tmp = be32_to_cpu(*(__be32 *)bh->b_data);
224 *(__be32 *)bh->b_data = cpu_to_be32(tmp + mask);
225
226 mark_buffer_dirty(bh);
227 affs_mark_sb_dirty(sb);
228
229 mutex_unlock(&sbi->s_bmlock);
230
231 pr_debug("%d\n", blk);
232 return blk;
233
234 err_bh_read:
235 affs_error(sb,"affs_read_block","Cannot read bitmap block %u", bm->bm_key);
236 sbi->s_bmap_bh = NULL;
237 sbi->s_last_bmap = ~0;
238 err_full:
239 mutex_unlock(&sbi->s_bmlock);
240 pr_debug("failed\n");
241 return 0;
242 }
243
affs_init_bitmap(struct super_block * sb,int * flags)244 int affs_init_bitmap(struct super_block *sb, int *flags)
245 {
246 struct affs_bm_info *bm;
247 struct buffer_head *bmap_bh = NULL, *bh = NULL;
248 __be32 *bmap_blk;
249 u32 size, blk, end, offset, mask;
250 int i, res = 0;
251 struct affs_sb_info *sbi = AFFS_SB(sb);
252
253 if (*flags & SB_RDONLY)
254 return 0;
255
256 if (!AFFS_ROOT_TAIL(sb, sbi->s_root_bh)->bm_flag) {
257 pr_notice("Bitmap invalid - mounting %s read only\n", sb->s_id);
258 *flags |= SB_RDONLY;
259 return 0;
260 }
261
262 sbi->s_last_bmap = ~0;
263 sbi->s_bmap_bh = NULL;
264 sbi->s_bmap_bits = sb->s_blocksize * 8 - 32;
265 sbi->s_bmap_count = (sbi->s_partition_size - sbi->s_reserved +
266 sbi->s_bmap_bits - 1) / sbi->s_bmap_bits;
267 size = sbi->s_bmap_count * sizeof(*bm);
268 bm = sbi->s_bitmap = kzalloc(size, GFP_KERNEL);
269 if (!sbi->s_bitmap) {
270 pr_err("Bitmap allocation failed\n");
271 return -ENOMEM;
272 }
273
274 bmap_blk = (__be32 *)sbi->s_root_bh->b_data;
275 blk = sb->s_blocksize / 4 - 49;
276 end = blk + 25;
277
278 for (i = sbi->s_bmap_count; i > 0; bm++, i--) {
279 affs_brelse(bh);
280
281 bm->bm_key = be32_to_cpu(bmap_blk[blk]);
282 bh = affs_bread(sb, bm->bm_key);
283 if (!bh) {
284 pr_err("Cannot read bitmap\n");
285 res = -EIO;
286 goto out;
287 }
288 if (affs_checksum_block(sb, bh)) {
289 pr_warn("Bitmap %u invalid - mounting %s read only.\n",
290 bm->bm_key, sb->s_id);
291 *flags |= SB_RDONLY;
292 goto out;
293 }
294 pr_debug("read bitmap block %d: %d\n", blk, bm->bm_key);
295 bm->bm_free = memweight(bh->b_data + 4, sb->s_blocksize - 4);
296
297 /* Don't try read the extension if this is the last block,
298 * but we also need the right bm pointer below
299 */
300 if (++blk < end || i == 1)
301 continue;
302 if (bmap_bh)
303 affs_brelse(bmap_bh);
304 bmap_bh = affs_bread(sb, be32_to_cpu(bmap_blk[blk]));
305 if (!bmap_bh) {
306 pr_err("Cannot read bitmap extension\n");
307 res = -EIO;
308 goto out;
309 }
310 bmap_blk = (__be32 *)bmap_bh->b_data;
311 blk = 0;
312 end = sb->s_blocksize / 4 - 1;
313 }
314
315 offset = (sbi->s_partition_size - sbi->s_reserved) % sbi->s_bmap_bits;
316 mask = ~(0xFFFFFFFFU << (offset & 31));
317 pr_debug("last word: %d %d %d\n", offset, offset / 32 + 1, mask);
318 offset = offset / 32 + 1;
319
320 if (mask) {
321 u32 old, new;
322
323 /* Mark unused bits in the last word as allocated */
324 old = be32_to_cpu(((__be32 *)bh->b_data)[offset]);
325 new = old & mask;
326 //if (old != new) {
327 ((__be32 *)bh->b_data)[offset] = cpu_to_be32(new);
328 /* fix checksum */
329 //new -= old;
330 //old = be32_to_cpu(*(__be32 *)bh->b_data);
331 //*(__be32 *)bh->b_data = cpu_to_be32(old - new);
332 //mark_buffer_dirty(bh);
333 //}
334 /* correct offset for the bitmap count below */
335 //offset++;
336 }
337 while (++offset < sb->s_blocksize / 4)
338 ((__be32 *)bh->b_data)[offset] = 0;
339 ((__be32 *)bh->b_data)[0] = 0;
340 ((__be32 *)bh->b_data)[0] = cpu_to_be32(-affs_checksum_block(sb, bh));
341 mark_buffer_dirty(bh);
342
343 /* recalculate bitmap count for last block */
344 bm--;
345 bm->bm_free = memweight(bh->b_data + 4, sb->s_blocksize - 4);
346
347 out:
348 affs_brelse(bh);
349 affs_brelse(bmap_bh);
350 return res;
351 }
352
affs_free_bitmap(struct super_block * sb)353 void affs_free_bitmap(struct super_block *sb)
354 {
355 struct affs_sb_info *sbi = AFFS_SB(sb);
356
357 if (!sbi->s_bitmap)
358 return;
359
360 affs_brelse(sbi->s_bmap_bh);
361 sbi->s_bmap_bh = NULL;
362 sbi->s_last_bmap = ~0;
363 kfree(sbi->s_bitmap);
364 sbi->s_bitmap = NULL;
365 }
366