1 #include <linux/kernel.h>
2 #include <linux/fs.h>
3 #include <linux/buffer_head.h>
4 #include <asm/div64.h>
5 #include "omfs.h"
6 
omfs_count_free(struct super_block * sb)7 unsigned long omfs_count_free(struct super_block *sb)
8 {
9 	unsigned int i;
10 	unsigned long sum = 0;
11 	struct omfs_sb_info *sbi = OMFS_SB(sb);
12 	int nbits = sb->s_blocksize * 8;
13 
14 	for (i = 0; i < sbi->s_imap_size; i++)
15 		sum += nbits - bitmap_weight(sbi->s_imap[i], nbits);
16 
17 	return sum;
18 }
19 
20 /*
21  *  Counts the run of zero bits starting at bit up to max.
22  *  It handles the case where a run might spill over a buffer.
23  *  Called with bitmap lock.
24  */
count_run(unsigned long ** addr,int nbits,int addrlen,int bit,int max)25 static int count_run(unsigned long **addr, int nbits,
26 		int addrlen, int bit, int max)
27 {
28 	int count = 0;
29 	int x;
30 
31 	for (; addrlen > 0; addrlen--, addr++) {
32 		x = find_next_bit(*addr, nbits, bit);
33 		count += x - bit;
34 
35 		if (x < nbits || count > max)
36 			return min(count, max);
37 
38 		bit = 0;
39 	}
40 	return min(count, max);
41 }
42 
43 /*
44  * Sets or clears the run of count bits starting with bit.
45  * Called with bitmap lock.
46  */
set_run(struct super_block * sb,int map,int nbits,int bit,int count,int set)47 static int set_run(struct super_block *sb, int map,
48 		int nbits, int bit, int count, int set)
49 {
50 	int i;
51 	int err;
52 	struct buffer_head *bh;
53 	struct omfs_sb_info *sbi = OMFS_SB(sb);
54 
55  	err = -ENOMEM;
56 	bh = sb_bread(sb, clus_to_blk(sbi, sbi->s_bitmap_ino) + map);
57 	if (!bh)
58 		goto out;
59 
60 	for (i = 0; i < count; i++, bit++) {
61 		if (bit >= nbits) {
62 			bit = 0;
63 			map++;
64 
65 			mark_buffer_dirty(bh);
66 			brelse(bh);
67 			bh = sb_bread(sb,
68 				clus_to_blk(sbi, sbi->s_bitmap_ino) + map);
69 			if (!bh)
70 				goto out;
71 		}
72 		if (set) {
73 			set_bit(bit, sbi->s_imap[map]);
74 			set_bit(bit, (unsigned long *)bh->b_data);
75 		} else {
76 			clear_bit(bit, sbi->s_imap[map]);
77 			clear_bit(bit, (unsigned long *)bh->b_data);
78 		}
79 	}
80 	mark_buffer_dirty(bh);
81 	brelse(bh);
82 	err = 0;
83 out:
84 	return err;
85 }
86 
87 /*
88  * Tries to allocate exactly one block.  Returns true if successful.
89  */
omfs_allocate_block(struct super_block * sb,u64 block)90 int omfs_allocate_block(struct super_block *sb, u64 block)
91 {
92 	struct buffer_head *bh;
93 	struct omfs_sb_info *sbi = OMFS_SB(sb);
94 	int bits_per_entry = 8 * sb->s_blocksize;
95 	unsigned int map, bit;
96 	int ret = 0;
97 	u64 tmp;
98 
99 	tmp = block;
100 	bit = do_div(tmp, bits_per_entry);
101 	map = tmp;
102 
103 	mutex_lock(&sbi->s_bitmap_lock);
104 	if (map >= sbi->s_imap_size || test_and_set_bit(bit, sbi->s_imap[map]))
105 		goto out;
106 
107 	if (sbi->s_bitmap_ino > 0) {
108 		bh = sb_bread(sb, clus_to_blk(sbi, sbi->s_bitmap_ino) + map);
109 		if (!bh)
110 			goto out;
111 
112 		set_bit(bit, (unsigned long *)bh->b_data);
113 		mark_buffer_dirty(bh);
114 		brelse(bh);
115 	}
116 	ret = 1;
117 out:
118 	mutex_unlock(&sbi->s_bitmap_lock);
119 	return ret;
120 }
121 
122 
123 /*
124  *  Tries to allocate a set of blocks.	The request size depends on the
125  *  type: for inodes, we must allocate sbi->s_mirrors blocks, and for file
126  *  blocks, we try to allocate sbi->s_clustersize, but can always get away
127  *  with just one block.
128  */
omfs_allocate_range(struct super_block * sb,int min_request,int max_request,u64 * return_block,int * return_size)129 int omfs_allocate_range(struct super_block *sb,
130 			int min_request,
131 			int max_request,
132 			u64 *return_block,
133 			int *return_size)
134 {
135 	struct omfs_sb_info *sbi = OMFS_SB(sb);
136 	int bits_per_entry = 8 * sb->s_blocksize;
137 	int ret = 0;
138 	int i, run, bit;
139 
140 	mutex_lock(&sbi->s_bitmap_lock);
141 	for (i = 0; i < sbi->s_imap_size; i++) {
142 		bit = 0;
143 		while (bit < bits_per_entry) {
144 			bit = find_next_zero_bit(sbi->s_imap[i], bits_per_entry,
145 				bit);
146 
147 			if (bit == bits_per_entry)
148 				break;
149 
150 			run = count_run(&sbi->s_imap[i], bits_per_entry,
151 				sbi->s_imap_size-i, bit, max_request);
152 
153 			if (run >= min_request)
154 				goto found;
155 			bit += run;
156 		}
157 	}
158 	ret = -ENOSPC;
159 	goto out;
160 
161 found:
162 	*return_block = i * bits_per_entry + bit;
163 	*return_size = run;
164 	ret = set_run(sb, i, bits_per_entry, bit, run, 1);
165 
166 out:
167 	mutex_unlock(&sbi->s_bitmap_lock);
168 	return ret;
169 }
170 
171 /*
172  * Clears count bits starting at a given block.
173  */
omfs_clear_range(struct super_block * sb,u64 block,int count)174 int omfs_clear_range(struct super_block *sb, u64 block, int count)
175 {
176 	struct omfs_sb_info *sbi = OMFS_SB(sb);
177 	int bits_per_entry = 8 * sb->s_blocksize;
178 	u64 tmp;
179 	unsigned int map, bit;
180 	int ret;
181 
182 	tmp = block;
183 	bit = do_div(tmp, bits_per_entry);
184 	map = tmp;
185 
186 	if (map >= sbi->s_imap_size)
187 		return 0;
188 
189 	mutex_lock(&sbi->s_bitmap_lock);
190 	ret = set_run(sb, map, bits_per_entry, bit, count, 0);
191 	mutex_unlock(&sbi->s_bitmap_lock);
192 	return ret;
193 }
194