1 /* Generic part */
2 
3 typedef struct {
4 	block_t	*p;
5 	block_t	key;
6 	struct buffer_head *bh;
7 } Indirect;
8 
add_chain(Indirect * p,struct buffer_head * bh,block_t * v)9 static inline void add_chain(Indirect *p, struct buffer_head *bh, block_t *v)
10 {
11 	p->key = *(p->p = v);
12 	p->bh = bh;
13 }
14 
verify_chain(Indirect * from,Indirect * to)15 static inline int verify_chain(Indirect *from, Indirect *to)
16 {
17 	while (from <= to && from->key == *from->p)
18 		from++;
19 	return (from > to);
20 }
21 
block_end(struct buffer_head * bh)22 static inline block_t *block_end(struct buffer_head *bh)
23 {
24 	return (block_t *)((char*)bh->b_data + BLOCK_SIZE);
25 }
26 
get_branch(struct inode * inode,int depth,int * offsets,Indirect chain[DEPTH],int * err)27 static inline Indirect *get_branch(struct inode *inode,
28 					int depth,
29 					int *offsets,
30 					Indirect chain[DEPTH],
31 					int *err)
32 {
33 	struct super_block *sb = inode->i_sb;
34 	Indirect *p = chain;
35 	struct buffer_head *bh;
36 
37 	*err = 0;
38 	/* i_data is not going away, no lock needed */
39 	add_chain (chain, NULL, i_data(inode) + *offsets);
40 	if (!p->key)
41 		goto no_block;
42 	while (--depth) {
43 		bh = sb_bread(sb, block_to_cpu(p->key));
44 		if (!bh)
45 			goto failure;
46 		/* Reader: pointers */
47 		if (!verify_chain(chain, p))
48 			goto changed;
49 		add_chain(++p, bh, (block_t *)bh->b_data + *++offsets);
50 		/* Reader: end */
51 		if (!p->key)
52 			goto no_block;
53 	}
54 	return NULL;
55 
56 changed:
57 	*err = -EAGAIN;
58 	goto no_block;
59 failure:
60 	*err = -EIO;
61 no_block:
62 	return p;
63 }
64 
alloc_branch(struct inode * inode,int num,int * offsets,Indirect * branch)65 static int alloc_branch(struct inode *inode,
66 			     int num,
67 			     int *offsets,
68 			     Indirect *branch)
69 {
70 	int n = 0;
71 	int i;
72 	int parent = minix_new_block(inode);
73 
74 	branch[0].key = cpu_to_block(parent);
75 	if (parent) for (n = 1; n < num; n++) {
76 		struct buffer_head *bh;
77 		/* Allocate the next block */
78 		int nr = minix_new_block(inode);
79 		if (!nr)
80 			break;
81 		branch[n].key = cpu_to_block(nr);
82 		bh = sb_getblk(inode->i_sb, parent);
83 		lock_buffer(bh);
84 		memset(bh->b_data, 0, BLOCK_SIZE);
85 		branch[n].bh = bh;
86 		branch[n].p = (block_t*) bh->b_data + offsets[n];
87 		*branch[n].p = branch[n].key;
88 		mark_buffer_uptodate(bh, 1);
89 		unlock_buffer(bh);
90 		mark_buffer_dirty_inode(bh, inode);
91 		parent = nr;
92 	}
93 	if (n == num)
94 		return 0;
95 
96 	/* Allocation failed, free what we already allocated */
97 	for (i = 1; i < n; i++)
98 		bforget(branch[i].bh);
99 	for (i = 0; i < n; i++)
100 		minix_free_block(inode, block_to_cpu(branch[i].key));
101 	return -ENOSPC;
102 }
103 
splice_branch(struct inode * inode,Indirect chain[DEPTH],Indirect * where,int num)104 static inline int splice_branch(struct inode *inode,
105 				     Indirect chain[DEPTH],
106 				     Indirect *where,
107 				     int num)
108 {
109 	int i;
110 
111 	/* Verify that place we are splicing to is still there and vacant */
112 
113 	/* Writer: pointers */
114 	if (!verify_chain(chain, where-1) || *where->p)
115 		/* Writer: end */
116 		goto changed;
117 
118 	/* That's it */
119 
120 	*where->p = where->key;
121 
122 	/* Writer: end */
123 
124 	/* We are done with atomic stuff, now do the rest of housekeeping */
125 
126 	inode->i_ctime = CURRENT_TIME;
127 
128 	/* had we spliced it onto indirect block? */
129 	if (where->bh)
130 		mark_buffer_dirty_inode(where->bh, inode);
131 
132 	mark_inode_dirty(inode);
133 	return 0;
134 
135 changed:
136 	for (i = 1; i < num; i++)
137 		bforget(where[i].bh);
138 	for (i = 0; i < num; i++)
139 		minix_free_block(inode, block_to_cpu(where[i].key));
140 	return -EAGAIN;
141 }
142 
get_block(struct inode * inode,long block,struct buffer_head * bh_result,int create)143 static inline int get_block(struct inode * inode, long block,
144 			struct buffer_head *bh_result, int create)
145 {
146 	int err = -EIO;
147 	int offsets[DEPTH];
148 	Indirect chain[DEPTH];
149 	Indirect *partial;
150 	int left;
151 	int depth = block_to_path(inode, block, offsets);
152 
153 	if (depth == 0)
154 		goto out;
155 
156 	lock_kernel();
157 reread:
158 	partial = get_branch(inode, depth, offsets, chain, &err);
159 
160 	/* Simplest case - block found, no allocation needed */
161 	if (!partial) {
162 got_it:
163 		bh_result->b_dev = inode->i_dev;
164 		bh_result->b_blocknr = block_to_cpu(chain[depth-1].key);
165 		bh_result->b_state |= (1UL << BH_Mapped);
166 		/* Clean up and exit */
167 		partial = chain+depth-1; /* the whole chain */
168 		goto cleanup;
169 	}
170 
171 	/* Next simple case - plain lookup or failed read of indirect block */
172 	if (!create || err == -EIO) {
173 cleanup:
174 		while (partial > chain) {
175 			brelse(partial->bh);
176 			partial--;
177 		}
178 		unlock_kernel();
179 out:
180 		return err;
181 	}
182 
183 	/*
184 	 * Indirect block might be removed by truncate while we were
185 	 * reading it. Handling of that case (forget what we've got and
186 	 * reread) is taken out of the main path.
187 	 */
188 	if (err == -EAGAIN)
189 		goto changed;
190 
191 	left = (chain + depth) - partial;
192 	err = alloc_branch(inode, left, offsets+(partial-chain), partial);
193 	if (err)
194 		goto cleanup;
195 
196 	if (splice_branch(inode, chain, partial, left) < 0)
197 		goto changed;
198 
199 	bh_result->b_state |= (1UL << BH_New);
200 	goto got_it;
201 
202 changed:
203 	while (partial > chain) {
204 		brelse(partial->bh);
205 		partial--;
206 	}
207 	goto reread;
208 }
209 
all_zeroes(block_t * p,block_t * q)210 static inline int all_zeroes(block_t *p, block_t *q)
211 {
212 	while (p < q)
213 		if (*p++)
214 			return 0;
215 	return 1;
216 }
217 
find_shared(struct inode * inode,int depth,int offsets[DEPTH],Indirect chain[DEPTH],block_t * top)218 static Indirect *find_shared(struct inode *inode,
219 				int depth,
220 				int offsets[DEPTH],
221 				Indirect chain[DEPTH],
222 				block_t *top)
223 {
224 	Indirect *partial, *p;
225 	int k, err;
226 
227 	*top = 0;
228 	for (k = depth; k > 1 && !offsets[k-1]; k--)
229 		;
230 	partial = get_branch(inode, k, offsets, chain, &err);
231 	/* Writer: pointers */
232 	if (!partial)
233 		partial = chain + k-1;
234 	if (!partial->key && *partial->p)
235 		/* Writer: end */
236 		goto no_top;
237 	for (p=partial;p>chain && all_zeroes((block_t*)p->bh->b_data,p->p);p--)
238 		;
239 	if (p == chain + k - 1 && p > chain) {
240 		p->p--;
241 	} else {
242 		*top = *p->p;
243 		*p->p = 0;
244 	}
245 	/* Writer: end */
246 
247 	while(partial > p)
248 	{
249 		brelse(partial->bh);
250 		partial--;
251 	}
252 no_top:
253 	return partial;
254 }
255 
free_data(struct inode * inode,block_t * p,block_t * q)256 static inline void free_data(struct inode *inode, block_t *p, block_t *q)
257 {
258 	unsigned long nr;
259 
260 	for ( ; p < q ; p++) {
261 		nr = block_to_cpu(*p);
262 		if (nr) {
263 			*p = 0;
264 			minix_free_block(inode, nr);
265 		}
266 	}
267 }
268 
free_branches(struct inode * inode,block_t * p,block_t * q,int depth)269 static void free_branches(struct inode *inode, block_t *p, block_t *q, int depth)
270 {
271 	struct buffer_head * bh;
272 	unsigned long nr;
273 
274 	if (depth--) {
275 		for ( ; p < q ; p++) {
276 			nr = block_to_cpu(*p);
277 			if (!nr)
278 				continue;
279 			*p = 0;
280 			bh = sb_bread(inode->i_sb, nr);
281 			if (!bh)
282 				continue;
283 			free_branches(inode, (block_t*)bh->b_data,
284 				      block_end(bh), depth);
285 			bforget(bh);
286 			minix_free_block(inode, nr);
287 			mark_inode_dirty(inode);
288 		}
289 	} else
290 		free_data(inode, p, q);
291 }
292 
truncate(struct inode * inode)293 static inline void truncate (struct inode * inode)
294 {
295 	block_t *idata = i_data(inode);
296 	int offsets[DEPTH];
297 	Indirect chain[DEPTH];
298 	Indirect *partial;
299 	block_t nr = 0;
300 	int n;
301 	int first_whole;
302 	long iblock;
303 
304 	iblock = (inode->i_size + BLOCK_SIZE-1) >> 10;
305 	block_truncate_page(inode->i_mapping, inode->i_size, get_block);
306 
307 	n = block_to_path(inode, iblock, offsets);
308 	if (!n)
309 		return;
310 
311 	if (n == 1) {
312 		free_data(inode, idata+offsets[0], idata + DIRECT);
313 		first_whole = 0;
314 		goto do_indirects;
315 	}
316 
317 	first_whole = offsets[0] + 1 - DIRECT;
318 	partial = find_shared(inode, n, offsets, chain, &nr);
319 	if (nr) {
320 		if (partial == chain)
321 			mark_inode_dirty(inode);
322 		else
323 			mark_buffer_dirty_inode(partial->bh, inode);
324 		free_branches(inode, &nr, &nr+1, (chain+n-1) - partial);
325 	}
326 	/* Clear the ends of indirect blocks on the shared branch */
327 	while (partial > chain) {
328 		free_branches(inode, partial->p + 1, block_end(partial->bh),
329 				(chain+n-1) - partial);
330 		mark_buffer_dirty_inode(partial->bh, inode);
331 		brelse (partial->bh);
332 		partial--;
333 	}
334 do_indirects:
335 	/* Kill the remaining (whole) subtrees */
336 	while (first_whole < DEPTH-1) {
337 		nr = idata[DIRECT+first_whole];
338 		if (nr) {
339 			idata[DIRECT+first_whole] = 0;
340 			mark_inode_dirty(inode);
341 			free_branches(inode, &nr, &nr+1, first_whole+1);
342 		}
343 		first_whole++;
344 	}
345 	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
346 	mark_inode_dirty(inode);
347 }
348