1 /*
2 * linux/fs/ext3/inode.c
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * from
10 *
11 * linux/fs/minix/inode.c
12 *
13 * Copyright (C) 1991, 1992 Linus Torvalds
14 *
15 * Goal-directed block allocation by Stephen Tweedie
16 * (sct@redhat.com), 1993, 1998
17 * Big-endian to little-endian byte-swapping/bitmaps by
18 * David S. Miller (davem@caip.rutgers.edu), 1995
19 * 64-bit file support on 64-bit platforms by Jakub Jelinek
20 * (jj@sunsite.ms.mff.cuni.cz)
21 *
22 * Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000
23 */
24
25 #include <linux/fs.h>
26 #include <linux/sched.h>
27 #include <linux/ext3_jbd.h>
28 #include <linux/jbd.h>
29 #include <linux/locks.h>
30 #include <linux/smp_lock.h>
31 #include <linux/highuid.h>
32 #include <linux/quotaops.h>
33 #include <linux/module.h>
34
35 /*
36 * SEARCH_FROM_ZERO forces each block allocation to search from the start
37 * of the filesystem. This is to force rapid reallocation of recently-freed
38 * blocks. The file fragmentation is horrendous.
39 */
40 #undef SEARCH_FROM_ZERO
41
42 /*
43 * Test whether an inode is a fast symlink.
44 */
ext3_inode_is_fast_symlink(struct inode * inode)45 static inline int ext3_inode_is_fast_symlink(struct inode *inode)
46 {
47 int ea_blocks = EXT3_I(inode)->i_file_acl ?
48 (inode->i_sb->s_blocksize >> 9) : 0;
49
50 return (S_ISLNK(inode->i_mode) &&
51 inode->i_blocks - ea_blocks == 0);
52 }
53
54 /* The ext3 forget function must perform a revoke if we are freeing data
55 * which has been journaled. Metadata (eg. indirect blocks) must be
56 * revoked in all cases.
57 *
58 * "bh" may be NULL: a metadata block may have been freed from memory
59 * but there may still be a record of it in the journal, and that record
60 * still needs to be revoked.
61 */
62
ext3_forget(handle_t * handle,int is_metadata,struct inode * inode,struct buffer_head * bh,int blocknr)63 static int ext3_forget(handle_t *handle, int is_metadata,
64 struct inode *inode, struct buffer_head *bh,
65 int blocknr)
66 {
67 int err;
68
69 BUFFER_TRACE(bh, "enter");
70
71 jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
72 "data mode %lx\n",
73 bh, is_metadata, inode->i_mode,
74 test_opt(inode->i_sb, DATA_FLAGS));
75
76 /* Never use the revoke function if we are doing full data
77 * journaling: there is no need to, and a V1 superblock won't
78 * support it. Otherwise, only skip the revoke on un-journaled
79 * data blocks. */
80
81 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ||
82 (!is_metadata && !ext3_should_journal_data(inode))) {
83 if (bh) {
84 BUFFER_TRACE(bh, "call journal_forget");
85 ext3_journal_forget(handle, bh);
86 }
87 return 0;
88 }
89
90 /*
91 * data!=journal && (is_metadata || should_journal_data(inode))
92 */
93 BUFFER_TRACE(bh, "call ext3_journal_revoke");
94 err = ext3_journal_revoke(handle, blocknr, bh);
95 if (err)
96 ext3_abort(inode->i_sb, __FUNCTION__,
97 "error %d when attempting revoke", err);
98 BUFFER_TRACE(bh, "exit");
99 return err;
100 }
101
102 /*
103 * Work out how many blocks we need to progress with the next chunk of a
104 * truncate transaction.
105 */
106
blocks_for_truncate(struct inode * inode)107 static unsigned long blocks_for_truncate(struct inode *inode)
108 {
109 unsigned long needed;
110
111 needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
112
113 /* Give ourselves just enough room to cope with inodes in which
114 * i_blocks is corrupt: we've seen disk corruptions in the past
115 * which resulted in random data in an inode which looked enough
116 * like a regular file for ext3 to try to delete it. Things
117 * will go a bit crazy if that happens, but at least we should
118 * try not to panic the whole kernel. */
119 if (needed < 2)
120 needed = 2;
121
122 /* But we need to bound the transaction so we don't overflow the
123 * journal. */
124 if (needed > EXT3_MAX_TRANS_DATA)
125 needed = EXT3_MAX_TRANS_DATA;
126
127 return EXT3_DATA_TRANS_BLOCKS + needed;
128 }
129
130 /*
131 * Truncate transactions can be complex and absolutely huge. So we need to
132 * be able to restart the transaction at a conventient checkpoint to make
133 * sure we don't overflow the journal.
134 *
135 * start_transaction gets us a new handle for a truncate transaction,
136 * and extend_transaction tries to extend the existing one a bit. If
137 * extend fails, we need to propagate the failure up and restart the
138 * transaction in the top-level truncate loop. --sct
139 */
140
start_transaction(struct inode * inode)141 static handle_t *start_transaction(struct inode *inode)
142 {
143 handle_t *result;
144
145 result = ext3_journal_start(inode, blocks_for_truncate(inode));
146 if (!IS_ERR(result))
147 return result;
148
149 ext3_std_error(inode->i_sb, PTR_ERR(result));
150 return result;
151 }
152
153 /*
154 * Try to extend this transaction for the purposes of truncation.
155 *
156 * Returns 0 if we managed to create more room. If we can't create more
157 * room, and the transaction must be restarted we return 1.
158 */
try_to_extend_transaction(handle_t * handle,struct inode * inode)159 static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
160 {
161 if (handle->h_buffer_credits > EXT3_RESERVE_TRANS_BLOCKS)
162 return 0;
163 if (!ext3_journal_extend(handle, blocks_for_truncate(inode)))
164 return 0;
165 return 1;
166 }
167
168 /*
169 * Restart the transaction associated with *handle. This does a commit,
170 * so before we call here everything must be consistently dirtied against
171 * this transaction.
172 */
ext3_journal_test_restart(handle_t * handle,struct inode * inode)173 static int ext3_journal_test_restart(handle_t *handle, struct inode *inode)
174 {
175 jbd_debug(2, "restarting handle %p\n", handle);
176 return ext3_journal_restart(handle, blocks_for_truncate(inode));
177 }
178
179 /*
180 * Called at each iput()
181 */
ext3_put_inode(struct inode * inode)182 void ext3_put_inode (struct inode * inode)
183 {
184 ext3_discard_prealloc (inode);
185 }
186
187 /*
188 * Called at the last iput() if i_nlink is zero.
189 */
ext3_delete_inode(struct inode * inode)190 void ext3_delete_inode (struct inode * inode)
191 {
192 handle_t *handle;
193
194 if (is_bad_inode(inode) ||
195 inode->i_ino == EXT3_ACL_IDX_INO ||
196 inode->i_ino == EXT3_ACL_DATA_INO)
197 goto no_delete;
198
199 lock_kernel();
200 handle = start_transaction(inode);
201 if (IS_ERR(handle)) {
202 /* If we're going to skip the normal cleanup, we still
203 * need to make sure that the in-core orphan linked list
204 * is properly cleaned up. */
205 ext3_orphan_del(NULL, inode);
206
207 ext3_std_error(inode->i_sb, PTR_ERR(handle));
208 unlock_kernel();
209 goto no_delete;
210 }
211
212 if (IS_SYNC(inode))
213 handle->h_sync = 1;
214 inode->i_size = 0;
215 if (inode->i_blocks)
216 ext3_truncate(inode);
217 /*
218 * Kill off the orphan record which ext3_truncate created.
219 * AKPM: I think this can be inside the above `if'.
220 * Note that ext3_orphan_del() has to be able to cope with the
221 * deletion of a non-existent orphan - this is because we don't
222 * know if ext3_truncate() actually created an orphan record.
223 * (Well, we could do this if we need to, but heck - it works)
224 */
225 ext3_orphan_del(handle, inode);
226 inode->u.ext3_i.i_dtime = CURRENT_TIME;
227
228 /*
229 * One subtle ordering requirement: if anything has gone wrong
230 * (transaction abort, IO errors, whatever), then we can still
231 * do these next steps (the fs will already have been marked as
232 * having errors), but we can't free the inode if the mark_dirty
233 * fails.
234 */
235 if (ext3_mark_inode_dirty(handle, inode))
236 /* If that failed, just do the required in-core inode clear. */
237 clear_inode(inode);
238 else
239 ext3_free_inode(handle, inode);
240 ext3_journal_stop(handle, inode);
241 unlock_kernel();
242 return;
243 no_delete:
244 clear_inode(inode); /* We must guarantee clearing of inode... */
245 }
246
ext3_discard_prealloc(struct inode * inode)247 void ext3_discard_prealloc (struct inode * inode)
248 {
249 #ifdef EXT3_PREALLOCATE
250 lock_kernel();
251 /* Writer: ->i_prealloc* */
252 if (inode->u.ext3_i.i_prealloc_count) {
253 unsigned short total = inode->u.ext3_i.i_prealloc_count;
254 unsigned long block = inode->u.ext3_i.i_prealloc_block;
255 inode->u.ext3_i.i_prealloc_count = 0;
256 inode->u.ext3_i.i_prealloc_block = 0;
257 /* Writer: end */
258 ext3_free_blocks (inode, block, total);
259 }
260 unlock_kernel();
261 #endif
262 }
263
ext3_alloc_block(handle_t * handle,struct inode * inode,unsigned long goal,int * err)264 static int ext3_alloc_block (handle_t *handle,
265 struct inode * inode, unsigned long goal, int *err)
266 {
267 #ifdef EXT3FS_DEBUG
268 static unsigned long alloc_hits = 0, alloc_attempts = 0;
269 #endif
270 unsigned long result;
271
272 #ifdef EXT3_PREALLOCATE
273 /* Writer: ->i_prealloc* */
274 if (inode->u.ext3_i.i_prealloc_count &&
275 (goal == inode->u.ext3_i.i_prealloc_block ||
276 goal + 1 == inode->u.ext3_i.i_prealloc_block))
277 {
278 result = inode->u.ext3_i.i_prealloc_block++;
279 inode->u.ext3_i.i_prealloc_count--;
280 /* Writer: end */
281 ext3_debug ("preallocation hit (%lu/%lu).\n",
282 ++alloc_hits, ++alloc_attempts);
283 } else {
284 ext3_discard_prealloc (inode);
285 ext3_debug ("preallocation miss (%lu/%lu).\n",
286 alloc_hits, ++alloc_attempts);
287 if (S_ISREG(inode->i_mode))
288 result = ext3_new_block (inode, goal,
289 &inode->u.ext3_i.i_prealloc_count,
290 &inode->u.ext3_i.i_prealloc_block, err);
291 else
292 result = ext3_new_block (inode, goal, 0, 0, err);
293 /*
294 * AKPM: this is somewhat sticky. I'm not surprised it was
295 * disabled in 2.2's ext3. Need to integrate b_committed_data
296 * guarding with preallocation, if indeed preallocation is
297 * effective.
298 */
299 }
300 #else
301 result = ext3_new_block (handle, inode, goal, 0, 0, err);
302 #endif
303 return result;
304 }
305
306
307 typedef struct {
308 u32 *p;
309 u32 key;
310 struct buffer_head *bh;
311 } Indirect;
312
add_chain(Indirect * p,struct buffer_head * bh,u32 * v)313 static inline void add_chain(Indirect *p, struct buffer_head *bh, u32 *v)
314 {
315 p->key = *(p->p = v);
316 p->bh = bh;
317 }
318
verify_chain(Indirect * from,Indirect * to)319 static inline int verify_chain(Indirect *from, Indirect *to)
320 {
321 while (from <= to && from->key == *from->p)
322 from++;
323 return (from > to);
324 }
325
326 /**
327 * ext3_block_to_path - parse the block number into array of offsets
328 * @inode: inode in question (we are only interested in its superblock)
329 * @i_block: block number to be parsed
330 * @offsets: array to store the offsets in
331 *
332 * To store the locations of file's data ext3 uses a data structure common
333 * for UNIX filesystems - tree of pointers anchored in the inode, with
334 * data blocks at leaves and indirect blocks in intermediate nodes.
335 * This function translates the block number into path in that tree -
336 * return value is the path length and @offsets[n] is the offset of
337 * pointer to (n+1)th node in the nth one. If @block is out of range
338 * (negative or too large) warning is printed and zero returned.
339 *
340 * Note: function doesn't find node addresses, so no IO is needed. All
341 * we need to know is the capacity of indirect blocks (taken from the
342 * inode->i_sb).
343 */
344
345 /*
346 * Portability note: the last comparison (check that we fit into triple
347 * indirect block) is spelled differently, because otherwise on an
348 * architecture with 32-bit longs and 8Kb pages we might get into trouble
349 * if our filesystem had 8Kb blocks. We might use long long, but that would
350 * kill us on x86. Oh, well, at least the sign propagation does not matter -
351 * i_block would have to be negative in the very beginning, so we would not
352 * get there at all.
353 */
354
ext3_block_to_path(struct inode * inode,long i_block,int offsets[4])355 static int ext3_block_to_path(struct inode *inode, long i_block, int offsets[4])
356 {
357 int ptrs = EXT3_ADDR_PER_BLOCK(inode->i_sb);
358 int ptrs_bits = EXT3_ADDR_PER_BLOCK_BITS(inode->i_sb);
359 const long direct_blocks = EXT3_NDIR_BLOCKS,
360 indirect_blocks = ptrs,
361 double_blocks = (1 << (ptrs_bits * 2));
362 int n = 0;
363
364 if (i_block < 0) {
365 ext3_warning (inode->i_sb, "ext3_block_to_path", "block < 0");
366 } else if (i_block < direct_blocks) {
367 offsets[n++] = i_block;
368 } else if ( (i_block -= direct_blocks) < indirect_blocks) {
369 offsets[n++] = EXT3_IND_BLOCK;
370 offsets[n++] = i_block;
371 } else if ((i_block -= indirect_blocks) < double_blocks) {
372 offsets[n++] = EXT3_DIND_BLOCK;
373 offsets[n++] = i_block >> ptrs_bits;
374 offsets[n++] = i_block & (ptrs - 1);
375 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
376 offsets[n++] = EXT3_TIND_BLOCK;
377 offsets[n++] = i_block >> (ptrs_bits * 2);
378 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
379 offsets[n++] = i_block & (ptrs - 1);
380 } else {
381 ext3_warning (inode->i_sb, "ext3_block_to_path", "block > big");
382 }
383 return n;
384 }
385
386 /**
387 * ext3_get_branch - read the chain of indirect blocks leading to data
388 * @inode: inode in question
389 * @depth: depth of the chain (1 - direct pointer, etc.)
390 * @offsets: offsets of pointers in inode/indirect blocks
391 * @chain: place to store the result
392 * @err: here we store the error value
393 *
394 * Function fills the array of triples <key, p, bh> and returns %NULL
395 * if everything went OK or the pointer to the last filled triple
396 * (incomplete one) otherwise. Upon the return chain[i].key contains
397 * the number of (i+1)-th block in the chain (as it is stored in memory,
398 * i.e. little-endian 32-bit), chain[i].p contains the address of that
399 * number (it points into struct inode for i==0 and into the bh->b_data
400 * for i>0) and chain[i].bh points to the buffer_head of i-th indirect
401 * block for i>0 and NULL for i==0. In other words, it holds the block
402 * numbers of the chain, addresses they were taken from (and where we can
403 * verify that chain did not change) and buffer_heads hosting these
404 * numbers.
405 *
406 * Function stops when it stumbles upon zero pointer (absent block)
407 * (pointer to last triple returned, *@err == 0)
408 * or when it gets an IO error reading an indirect block
409 * (ditto, *@err == -EIO)
410 * or when it notices that chain had been changed while it was reading
411 * (ditto, *@err == -EAGAIN)
412 * or when it reads all @depth-1 indirect blocks successfully and finds
413 * the whole chain, all way to the data (returns %NULL, *err == 0).
414 */
ext3_get_branch(struct inode * inode,int depth,int * offsets,Indirect chain[4],int * err)415 static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets,
416 Indirect chain[4], int *err)
417 {
418 struct super_block *sb = inode->i_sb;
419 Indirect *p = chain;
420 struct buffer_head *bh;
421
422 *err = 0;
423 /* i_data is not going away, no lock needed */
424 add_chain (chain, NULL, inode->u.ext3_i.i_data + *offsets);
425 if (!p->key)
426 goto no_block;
427 while (--depth) {
428 bh = sb_bread(sb, le32_to_cpu(p->key));
429 if (!bh)
430 goto failure;
431 /* Reader: pointers */
432 if (!verify_chain(chain, p))
433 goto changed;
434 add_chain(++p, bh, (u32*)bh->b_data + *++offsets);
435 /* Reader: end */
436 if (!p->key)
437 goto no_block;
438 }
439 return NULL;
440
441 changed:
442 brelse(bh);
443 *err = -EAGAIN;
444 goto no_block;
445 failure:
446 *err = -EIO;
447 no_block:
448 return p;
449 }
450
451 /**
452 * ext3_find_near - find a place for allocation with sufficient locality
453 * @inode: owner
454 * @ind: descriptor of indirect block.
455 *
456 * This function returns the prefered place for block allocation.
457 * It is used when heuristic for sequential allocation fails.
458 * Rules are:
459 * + if there is a block to the left of our position - allocate near it.
460 * + if pointer will live in indirect block - allocate near that block.
461 * + if pointer will live in inode - allocate in the same
462 * cylinder group.
463 * Caller must make sure that @ind is valid and will stay that way.
464 */
465
ext3_find_near(struct inode * inode,Indirect * ind)466 static inline unsigned long ext3_find_near(struct inode *inode, Indirect *ind)
467 {
468 u32 *start = ind->bh ? (u32*) ind->bh->b_data : inode->u.ext3_i.i_data;
469 u32 *p;
470
471 /* Try to find previous block */
472 for (p = ind->p - 1; p >= start; p--)
473 if (*p)
474 return le32_to_cpu(*p);
475
476 /* No such thing, so let's try location of indirect block */
477 if (ind->bh)
478 return ind->bh->b_blocknr;
479
480 /*
481 * It is going to be refered from inode itself? OK, just put it into
482 * the same cylinder group then.
483 */
484 return (inode->u.ext3_i.i_block_group *
485 EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
486 le32_to_cpu(inode->i_sb->u.ext3_sb.s_es->s_first_data_block);
487 }
488
489 /**
490 * ext3_find_goal - find a prefered place for allocation.
491 * @inode: owner
492 * @block: block we want
493 * @chain: chain of indirect blocks
494 * @partial: pointer to the last triple within a chain
495 * @goal: place to store the result.
496 *
497 * Normally this function find the prefered place for block allocation,
498 * stores it in *@goal and returns zero. If the branch had been changed
499 * under us we return -EAGAIN.
500 */
501
ext3_find_goal(struct inode * inode,long block,Indirect chain[4],Indirect * partial,unsigned long * goal)502 static int ext3_find_goal(struct inode *inode, long block, Indirect chain[4],
503 Indirect *partial, unsigned long *goal)
504 {
505 /* Writer: ->i_next_alloc* */
506 if (block == inode->u.ext3_i.i_next_alloc_block + 1) {
507 inode->u.ext3_i.i_next_alloc_block++;
508 inode->u.ext3_i.i_next_alloc_goal++;
509 }
510 #ifdef SEARCH_FROM_ZERO
511 inode->u.ext3_i.i_next_alloc_block = 0;
512 inode->u.ext3_i.i_next_alloc_goal = 0;
513 #endif
514 /* Writer: end */
515 /* Reader: pointers, ->i_next_alloc* */
516 if (verify_chain(chain, partial)) {
517 /*
518 * try the heuristic for sequential allocation,
519 * failing that at least try to get decent locality.
520 */
521 if (block == inode->u.ext3_i.i_next_alloc_block)
522 *goal = inode->u.ext3_i.i_next_alloc_goal;
523 if (!*goal)
524 *goal = ext3_find_near(inode, partial);
525 #ifdef SEARCH_FROM_ZERO
526 *goal = 0;
527 #endif
528 return 0;
529 }
530 /* Reader: end */
531 return -EAGAIN;
532 }
533
534 /**
535 * ext3_alloc_branch - allocate and set up a chain of blocks.
536 * @inode: owner
537 * @num: depth of the chain (number of blocks to allocate)
538 * @offsets: offsets (in the blocks) to store the pointers to next.
539 * @branch: place to store the chain in.
540 *
541 * This function allocates @num blocks, zeroes out all but the last one,
542 * links them into chain and (if we are synchronous) writes them to disk.
543 * In other words, it prepares a branch that can be spliced onto the
544 * inode. It stores the information about that chain in the branch[], in
545 * the same format as ext3_get_branch() would do. We are calling it after
546 * we had read the existing part of chain and partial points to the last
547 * triple of that (one with zero ->key). Upon the exit we have the same
548 * picture as after the successful ext3_get_block(), excpet that in one
549 * place chain is disconnected - *branch->p is still zero (we did not
550 * set the last link), but branch->key contains the number that should
551 * be placed into *branch->p to fill that gap.
552 *
553 * If allocation fails we free all blocks we've allocated (and forget
554 * their buffer_heads) and return the error value the from failed
555 * ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain
556 * as described above and return 0.
557 */
558
ext3_alloc_branch(handle_t * handle,struct inode * inode,int num,unsigned long goal,int * offsets,Indirect * branch)559 static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
560 int num,
561 unsigned long goal,
562 int *offsets,
563 Indirect *branch)
564 {
565 int blocksize = inode->i_sb->s_blocksize;
566 int n = 0, keys = 0;
567 int err = 0;
568 int i;
569 int parent = ext3_alloc_block(handle, inode, goal, &err);
570
571 branch[0].key = cpu_to_le32(parent);
572 if (parent) {
573 keys = 1;
574 for (n = 1; n < num; n++) {
575 struct buffer_head *bh;
576 /* Allocate the next block */
577 int nr = ext3_alloc_block(handle, inode, parent, &err);
578 if (!nr)
579 break;
580 branch[n].key = cpu_to_le32(nr);
581 keys = n+1;
582
583 /*
584 * Get buffer_head for parent block, zero it out
585 * and set the pointer to new one, then send
586 * parent to disk.
587 */
588 bh = sb_getblk(inode->i_sb, parent);
589 branch[n].bh = bh;
590 lock_buffer(bh);
591 BUFFER_TRACE(bh, "call get_create_access");
592 err = ext3_journal_get_create_access(handle, bh);
593 if (err) {
594 unlock_buffer(bh);
595 brelse(bh);
596 break;
597 }
598
599 memset(bh->b_data, 0, blocksize);
600 branch[n].p = (u32*) bh->b_data + offsets[n];
601 *branch[n].p = branch[n].key;
602 BUFFER_TRACE(bh, "marking uptodate");
603 mark_buffer_uptodate(bh, 1);
604 unlock_buffer(bh);
605
606 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
607 err = ext3_journal_dirty_metadata(handle, bh);
608 if (err)
609 break;
610
611 parent = nr;
612 }
613 }
614 if (n == num)
615 return 0;
616
617 /* Allocation failed, free what we already allocated */
618 for (i = 1; i < keys; i++) {
619 BUFFER_TRACE(branch[i].bh, "call journal_forget");
620 ext3_journal_forget(handle, branch[i].bh);
621 }
622 for (i = 0; i < keys; i++)
623 ext3_free_blocks(handle, inode, le32_to_cpu(branch[i].key), 1);
624 return err;
625 }
626
627 /**
628 * ext3_splice_branch - splice the allocated branch onto inode.
629 * @inode: owner
630 * @block: (logical) number of block we are adding
631 * @chain: chain of indirect blocks (with a missing link - see
632 * ext3_alloc_branch)
633 * @where: location of missing link
634 * @num: number of blocks we are adding
635 *
636 * This function verifies that chain (up to the missing link) had not
637 * changed, fills the missing link and does all housekeeping needed in
638 * inode (->i_blocks, etc.). In case of success we end up with the full
639 * chain to new block and return 0. Otherwise (== chain had been changed)
640 * we free the new blocks (forgetting their buffer_heads, indeed) and
641 * return -EAGAIN.
642 */
643
ext3_splice_branch(handle_t * handle,struct inode * inode,long block,Indirect chain[4],Indirect * where,int num)644 static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
645 Indirect chain[4], Indirect *where, int num)
646 {
647 int i;
648 int err = 0;
649
650 /*
651 * If we're splicing into a [td]indirect block (as opposed to the
652 * inode) then we need to get write access to the [td]indirect block
653 * before the splice.
654 */
655 if (where->bh) {
656 BUFFER_TRACE(where->bh, "get_write_access");
657 err = ext3_journal_get_write_access(handle, where->bh);
658 if (err)
659 goto err_out;
660 }
661 /* Verify that place we are splicing to is still there and vacant */
662
663 /* Writer: pointers, ->i_next_alloc* */
664 if (!verify_chain(chain, where-1) || *where->p)
665 /* Writer: end */
666 goto changed;
667
668 /* That's it */
669
670 *where->p = where->key;
671 inode->u.ext3_i.i_next_alloc_block = block;
672 inode->u.ext3_i.i_next_alloc_goal = le32_to_cpu(where[num-1].key);
673 #ifdef SEARCH_FROM_ZERO
674 inode->u.ext3_i.i_next_alloc_block = 0;
675 inode->u.ext3_i.i_next_alloc_goal = 0;
676 #endif
677 /* Writer: end */
678
679 /* We are done with atomic stuff, now do the rest of housekeeping */
680
681 inode->i_ctime = CURRENT_TIME;
682 ext3_mark_inode_dirty(handle, inode);
683
684 /* had we spliced it onto indirect block? */
685 if (where->bh) {
686 /*
687 * akpm: If we spliced it onto an indirect block, we haven't
688 * altered the inode. Note however that if it is being spliced
689 * onto an indirect block at the very end of the file (the
690 * file is growing) then we *will* alter the inode to reflect
691 * the new i_size. But that is not done here - it is done in
692 * generic_commit_write->__mark_inode_dirty->ext3_dirty_inode.
693 */
694 jbd_debug(5, "splicing indirect only\n");
695 BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata");
696 err = ext3_journal_dirty_metadata(handle, where->bh);
697 if (err)
698 goto err_out;
699 } else {
700 /*
701 * OK, we spliced it into the inode itself on a direct block.
702 * Inode was dirtied above.
703 */
704 jbd_debug(5, "splicing direct\n");
705 }
706 return err;
707
708 changed:
709 /*
710 * AKPM: if where[i].bh isn't part of the current updating
711 * transaction then we explode nastily. Test this code path.
712 */
713 jbd_debug(1, "the chain changed: try again\n");
714 err = -EAGAIN;
715
716 err_out:
717 for (i = 1; i < num; i++) {
718 BUFFER_TRACE(where[i].bh, "call journal_forget");
719 ext3_journal_forget(handle, where[i].bh);
720 }
721 /* For the normal collision cleanup case, we free up the blocks.
722 * On genuine filesystem errors we don't even think about doing
723 * that. */
724 if (err == -EAGAIN)
725 for (i = 0; i < num; i++)
726 ext3_free_blocks(handle, inode,
727 le32_to_cpu(where[i].key), 1);
728 return err;
729 }
730
731 /*
732 * Allocation strategy is simple: if we have to allocate something, we will
733 * have to go the whole way to leaf. So let's do it before attaching anything
734 * to tree, set linkage between the newborn blocks, write them if sync is
735 * required, recheck the path, free and repeat if check fails, otherwise
736 * set the last missing link (that will protect us from any truncate-generated
737 * removals - all blocks on the path are immune now) and possibly force the
738 * write on the parent block.
739 * That has a nice additional property: no special recovery from the failed
740 * allocations is needed - we simply release blocks and do not touch anything
741 * reachable from inode.
742 *
743 * akpm: `handle' can be NULL if create == 0.
744 *
745 * The BKL may not be held on entry here. Be sure to take it early.
746 */
747
ext3_get_block_handle(handle_t * handle,struct inode * inode,long iblock,struct buffer_head * bh_result,int create)748 static int ext3_get_block_handle(handle_t *handle, struct inode *inode,
749 long iblock,
750 struct buffer_head *bh_result, int create)
751 {
752 int err = -EIO;
753 int offsets[4];
754 Indirect chain[4];
755 Indirect *partial;
756 unsigned long goal;
757 int left;
758 int depth = ext3_block_to_path(inode, iblock, offsets);
759 loff_t new_size;
760
761 J_ASSERT(handle != NULL || create == 0);
762
763 if (depth == 0)
764 goto out;
765
766 lock_kernel();
767 reread:
768 partial = ext3_get_branch(inode, depth, offsets, chain, &err);
769
770 /* Simplest case - block found, no allocation needed */
771 if (!partial) {
772 bh_result->b_state &= ~(1UL << BH_New);
773 got_it:
774 bh_result->b_dev = inode->i_dev;
775 bh_result->b_blocknr = le32_to_cpu(chain[depth-1].key);
776 bh_result->b_state |= (1UL << BH_Mapped);
777 /* Clean up and exit */
778 partial = chain+depth-1; /* the whole chain */
779 goto cleanup;
780 }
781
782 /* Next simple case - plain lookup or failed read of indirect block */
783 if (!create || err == -EIO) {
784 cleanup:
785 while (partial > chain) {
786 BUFFER_TRACE(partial->bh, "call brelse");
787 brelse(partial->bh);
788 partial--;
789 }
790 BUFFER_TRACE(bh_result, "returned");
791 unlock_kernel();
792 out:
793 return err;
794 }
795
796 /*
797 * Indirect block might be removed by truncate while we were
798 * reading it. Handling of that case (forget what we've got and
799 * reread) is taken out of the main path.
800 */
801 if (err == -EAGAIN)
802 goto changed;
803
804 goal = 0;
805 if (ext3_find_goal(inode, iblock, chain, partial, &goal) < 0)
806 goto changed;
807
808 left = (chain + depth) - partial;
809
810 /*
811 * Block out ext3_truncate while we alter the tree
812 */
813 down_read(&inode->u.ext3_i.truncate_sem);
814 err = ext3_alloc_branch(handle, inode, left, goal,
815 offsets+(partial-chain), partial);
816
817 /* The ext3_splice_branch call will free and forget any buffers
818 * on the new chain if there is a failure, but that risks using
819 * up transaction credits, especially for bitmaps where the
820 * credits cannot be returned. Can we handle this somehow? We
821 * may need to return -EAGAIN upwards in the worst case. --sct */
822 if (!err)
823 err = ext3_splice_branch(handle, inode, iblock, chain,
824 partial, left);
825 up_read(&inode->u.ext3_i.truncate_sem);
826 if (err == -EAGAIN)
827 goto changed;
828 if (err)
829 goto cleanup;
830
831 new_size = inode->i_size;
832 /*
833 * This is not racy against ext3_truncate's modification of i_disksize
834 * because VM/VFS ensures that the file cannot be extended while
835 * truncate is in progress. It is racy between multiple parallel
836 * instances of get_block, but we have the BKL.
837 */
838 if (new_size > inode->u.ext3_i.i_disksize)
839 inode->u.ext3_i.i_disksize = new_size;
840
841 bh_result->b_state |= (1UL << BH_New);
842 goto got_it;
843
844 changed:
845 while (partial > chain) {
846 jbd_debug(1, "buffer chain changed, retrying\n");
847 BUFFER_TRACE(partial->bh, "brelsing");
848 brelse(partial->bh);
849 partial--;
850 }
851 goto reread;
852 }
853
854 /*
855 * The BKL is not held on entry here.
856 */
ext3_get_block(struct inode * inode,long iblock,struct buffer_head * bh_result,int create)857 static int ext3_get_block(struct inode *inode, long iblock,
858 struct buffer_head *bh_result, int create)
859 {
860 handle_t *handle = 0;
861 int ret;
862
863 if (create) {
864 handle = ext3_journal_current_handle();
865 J_ASSERT(handle != 0);
866 }
867 ret = ext3_get_block_handle(handle, inode, iblock, bh_result, create);
868 return ret;
869 }
870
871 /*
872 * `handle' can be NULL if create is zero
873 */
ext3_getblk(handle_t * handle,struct inode * inode,long block,int create,int * errp)874 struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode,
875 long block, int create, int * errp)
876 {
877 struct buffer_head dummy;
878 int fatal = 0, err;
879
880 J_ASSERT(handle != NULL || create == 0);
881
882 dummy.b_state = 0;
883 dummy.b_blocknr = -1000;
884 buffer_trace_init(&dummy.b_history);
885 *errp = ext3_get_block_handle(handle, inode, block, &dummy, create);
886 if (!*errp && buffer_mapped(&dummy)) {
887 struct buffer_head *bh;
888 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
889 if (buffer_new(&dummy)) {
890 J_ASSERT(create != 0);
891 J_ASSERT(handle != 0);
892
893 /* Now that we do not always journal data, we
894 should keep in mind whether this should
895 always journal the new buffer as metadata.
896 For now, regular file writes use
897 ext3_get_block instead, so it's not a
898 problem. */
899 lock_kernel();
900 lock_buffer(bh);
901 BUFFER_TRACE(bh, "call get_create_access");
902 fatal = ext3_journal_get_create_access(handle, bh);
903 if (!fatal) {
904 memset(bh->b_data, 0,
905 inode->i_sb->s_blocksize);
906 mark_buffer_uptodate(bh, 1);
907 }
908 unlock_buffer(bh);
909 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
910 err = ext3_journal_dirty_metadata(handle, bh);
911 if (!fatal) fatal = err;
912 unlock_kernel();
913 } else {
914 BUFFER_TRACE(bh, "not a new buffer");
915 }
916 if (fatal) {
917 *errp = fatal;
918 brelse(bh);
919 bh = NULL;
920 }
921 return bh;
922 }
923 return NULL;
924 }
925
ext3_bread(handle_t * handle,struct inode * inode,int block,int create,int * err)926 struct buffer_head *ext3_bread(handle_t *handle, struct inode * inode,
927 int block, int create, int *err)
928 {
929 struct buffer_head * bh;
930 int prev_blocks;
931
932 prev_blocks = inode->i_blocks;
933
934 bh = ext3_getblk (handle, inode, block, create, err);
935 if (!bh)
936 return bh;
937 #ifdef EXT3_PREALLOCATE
938 /*
939 * If the inode has grown, and this is a directory, then use a few
940 * more of the preallocated blocks to keep directory fragmentation
941 * down. The preallocated blocks are guaranteed to be contiguous.
942 */
943 if (create &&
944 S_ISDIR(inode->i_mode) &&
945 inode->i_blocks > prev_blocks &&
946 EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
947 EXT3_FEATURE_COMPAT_DIR_PREALLOC)) {
948 int i;
949 struct buffer_head *tmp_bh;
950
951 for (i = 1;
952 inode->u.ext3_i.i_prealloc_count &&
953 i < EXT3_SB(inode->i_sb)->s_es->s_prealloc_dir_blocks;
954 i++) {
955 /*
956 * ext3_getblk will zero out the contents of the
957 * directory for us
958 */
959 tmp_bh = ext3_getblk(handle, inode,
960 block+i, create, err);
961 if (!tmp_bh) {
962 brelse (bh);
963 return 0;
964 }
965 brelse (tmp_bh);
966 }
967 }
968 #endif
969 if (buffer_uptodate(bh))
970 return bh;
971 ll_rw_block (READ, 1, &bh);
972 wait_on_buffer (bh);
973 if (buffer_uptodate(bh))
974 return bh;
975 brelse (bh);
976 *err = -EIO;
977 return NULL;
978 }
979
walk_page_buffers(handle_t * handle,struct inode * inode,struct buffer_head * head,unsigned from,unsigned to,int * partial,int (* fn)(handle_t * handle,struct inode * inode,struct buffer_head * bh))980 static int walk_page_buffers( handle_t *handle,
981 struct inode *inode,
982 struct buffer_head *head,
983 unsigned from,
984 unsigned to,
985 int *partial,
986 int (*fn)( handle_t *handle,
987 struct inode *inode,
988 struct buffer_head *bh))
989 {
990 struct buffer_head *bh;
991 unsigned block_start, block_end;
992 unsigned blocksize = head->b_size;
993 int err, ret = 0;
994
995 for ( bh = head, block_start = 0;
996 ret == 0 && (bh != head || !block_start);
997 block_start = block_end, bh = bh->b_this_page)
998 {
999 block_end = block_start + blocksize;
1000 if (block_end <= from || block_start >= to) {
1001 if (partial && !buffer_uptodate(bh))
1002 *partial = 1;
1003 continue;
1004 }
1005 err = (*fn)(handle, inode, bh);
1006 if (!ret)
1007 ret = err;
1008 }
1009 return ret;
1010 }
1011
1012 /*
1013 * To preserve ordering, it is essential that the hole instantiation and
1014 * the data write be encapsulated in a single transaction. We cannot
1015 * close off a transaction and start a new one between the ext3_get_block()
1016 * and the commit_write(). So doing the journal_start at the start of
1017 * prepare_write() is the right place.
1018 *
1019 * Also, this function can nest inside ext3_writepage() ->
1020 * block_write_full_page(). In that case, we *know* that ext3_writepage()
1021 * has generated enough buffer credits to do the whole page. So we won't
1022 * block on the journal in that case, which is good, because the caller may
1023 * be PF_MEMALLOC.
1024 *
1025 * By accident, ext3 can be reentered when a transaction is open via
1026 * quota file writes. If we were to commit the transaction while thus
1027 * reentered, there can be a deadlock - we would be holding a quota
1028 * lock, and the commit would never complete if another thread had a
1029 * transaction open and was blocking on the quota lock - a ranking
1030 * violation.
1031 *
1032 * So what we do is to rely on the fact that journal_stop/journal_start
1033 * will _not_ run commit under these circumstances because handle->h_ref
1034 * is elevated. We'll still have enough credits for the tiny quotafile
1035 * write.
1036 */
1037
do_journal_get_write_access(handle_t * handle,struct inode * inode,struct buffer_head * bh)1038 static int do_journal_get_write_access(handle_t *handle, struct inode *inode,
1039 struct buffer_head *bh)
1040 {
1041 return ext3_journal_get_write_access(handle, bh);
1042 }
1043
ext3_prepare_write(struct file * file,struct page * page,unsigned from,unsigned to)1044 static int ext3_prepare_write(struct file *file, struct page *page,
1045 unsigned from, unsigned to)
1046 {
1047 struct inode *inode = page->mapping->host;
1048 int ret, needed_blocks = ext3_writepage_trans_blocks(inode);
1049 handle_t *handle;
1050
1051 lock_kernel();
1052 handle = ext3_journal_start(inode, needed_blocks);
1053 if (IS_ERR(handle)) {
1054 ret = PTR_ERR(handle);
1055 goto out;
1056 }
1057 unlock_kernel();
1058 ret = block_prepare_write(page, from, to, ext3_get_block);
1059 lock_kernel();
1060 if (ret != 0)
1061 goto prepare_write_failed;
1062
1063 if (ext3_should_journal_data(inode)) {
1064 ret = walk_page_buffers(handle, inode, page->buffers,
1065 from, to, NULL, do_journal_get_write_access);
1066 if (ret) {
1067 /*
1068 * We're going to fail this prepare_write(),
1069 * so commit_write() will not be called.
1070 * We need to undo block_prepare_write()'s kmap().
1071 * AKPM: Do we need to clear PageUptodate? I don't
1072 * think so.
1073 */
1074 kunmap(page);
1075 }
1076 }
1077 prepare_write_failed:
1078 if (ret)
1079 ext3_journal_stop(handle, inode);
1080 out:
1081 unlock_kernel();
1082 return ret;
1083 }
1084
journal_dirty_sync_data(handle_t * handle,struct inode * inode,struct buffer_head * bh)1085 static int journal_dirty_sync_data(handle_t *handle, struct inode *inode,
1086 struct buffer_head *bh)
1087 {
1088 int ret = ext3_journal_dirty_data(handle, bh, 0);
1089 buffer_insert_inode_data_queue(bh, inode);
1090 return ret;
1091 }
1092
1093 /*
1094 * For ext3_writepage(). We also brelse() the buffer to account for
1095 * the bget() which ext3_writepage() performs.
1096 */
journal_dirty_async_data(handle_t * handle,struct inode * inode,struct buffer_head * bh)1097 static int journal_dirty_async_data(handle_t *handle, struct inode *inode,
1098 struct buffer_head *bh)
1099 {
1100 int ret = ext3_journal_dirty_data(handle, bh, 1);
1101 buffer_insert_inode_data_queue(bh, inode);
1102 __brelse(bh);
1103 return ret;
1104 }
1105
1106 /* For commit_write() in data=journal mode */
commit_write_fn(handle_t * handle,struct inode * inode,struct buffer_head * bh)1107 static int commit_write_fn(handle_t *handle, struct inode *inode,
1108 struct buffer_head *bh)
1109 {
1110 set_bit(BH_Uptodate, &bh->b_state);
1111 return ext3_journal_dirty_metadata(handle, bh);
1112 }
1113
1114 /*
1115 * We need to pick up the new inode size which generic_commit_write gave us
1116 * `file' can be NULL - eg, when called from block_symlink().
1117 *
1118 * ext3 inode->i_dirty_buffers policy: If we're journalling data we
1119 * definitely don't want them to appear on the inode at all - instead
1120 * we need to manage them at the JBD layer and we need to intercept
1121 * the relevant sync operations and translate them into journal operations.
1122 *
1123 * If we're not journalling data then we can just leave the buffers
1124 * on ->i_dirty_buffers. If someone writes them out for us then thanks.
1125 * Otherwise we'll do it in commit, if we're using ordered data.
1126 */
1127
ext3_commit_write(struct file * file,struct page * page,unsigned from,unsigned to)1128 static int ext3_commit_write(struct file *file, struct page *page,
1129 unsigned from, unsigned to)
1130 {
1131 handle_t *handle = ext3_journal_current_handle();
1132 struct inode *inode = page->mapping->host;
1133 int ret = 0, ret2;
1134
1135 lock_kernel();
1136 if (ext3_should_journal_data(inode)) {
1137 /*
1138 * Here we duplicate the generic_commit_write() functionality
1139 */
1140 int partial = 0;
1141 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1142
1143 ret = walk_page_buffers(handle, inode, page->buffers,
1144 from, to, &partial, commit_write_fn);
1145 if (!partial)
1146 SetPageUptodate(page);
1147 kunmap(page);
1148 if (pos > inode->i_size)
1149 inode->i_size = pos;
1150 EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
1151 } else {
1152 if (ext3_should_order_data(inode)) {
1153 ret = walk_page_buffers(handle, inode, page->buffers,
1154 from, to, NULL, journal_dirty_sync_data);
1155 }
1156 /* Be careful here if generic_commit_write becomes a
1157 * required invocation after block_prepare_write. */
1158 if (ret == 0) {
1159 ret = generic_commit_write(file, page, from, to);
1160 } else {
1161 /*
1162 * block_prepare_write() was called, but we're not
1163 * going to call generic_commit_write(). So we
1164 * need to perform generic_commit_write()'s kunmap
1165 * by hand.
1166 */
1167 kunmap(page);
1168 }
1169 }
1170 if (inode->i_size > inode->u.ext3_i.i_disksize) {
1171 inode->u.ext3_i.i_disksize = inode->i_size;
1172 ret2 = ext3_mark_inode_dirty(handle, inode);
1173 if (!ret)
1174 ret = ret2;
1175 }
1176 ret2 = ext3_journal_stop(handle, inode);
1177 unlock_kernel();
1178 if (!ret)
1179 ret = ret2;
1180 return ret;
1181 }
1182
1183 /*
1184 * bmap() is special. It gets used by applications such as lilo and by
1185 * the swapper to find the on-disk block of a specific piece of data.
1186 *
1187 * Naturally, this is dangerous if the block concerned is still in the
1188 * journal. If somebody makes a swapfile on an ext3 data-journaling
1189 * filesystem and enables swap, then they may get a nasty shock when the
1190 * data getting swapped to that swapfile suddenly gets overwritten by
1191 * the original zero's written out previously to the journal and
1192 * awaiting writeback in the kernel's buffer cache.
1193 *
1194 * So, if we see any bmap calls here on a modified, data-journaled file,
1195 * take extra steps to flush any blocks which might be in the cache.
1196 */
ext3_bmap(struct address_space * mapping,long block)1197 static int ext3_bmap(struct address_space *mapping, long block)
1198 {
1199 struct inode *inode = mapping->host;
1200 journal_t *journal;
1201 int err;
1202
1203 if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) {
1204 /*
1205 * This is a REALLY heavyweight approach, but the use of
1206 * bmap on dirty files is expected to be extremely rare:
1207 * only if we run lilo or swapon on a freshly made file
1208 * do we expect this to happen.
1209 *
1210 * (bmap requires CAP_SYS_RAWIO so this does not
1211 * represent an unprivileged user DOS attack --- we'd be
1212 * in trouble if mortal users could trigger this path at
1213 * will.)
1214 *
1215 * NB. EXT3_STATE_JDATA is not set on files other than
1216 * regular files. If somebody wants to bmap a directory
1217 * or symlink and gets confused because the buffer
1218 * hasn't yet been flushed to disk, they deserve
1219 * everything they get.
1220 */
1221
1222 EXT3_I(inode)->i_state &= ~EXT3_STATE_JDATA;
1223 journal = EXT3_JOURNAL(inode);
1224 journal_lock_updates(journal);
1225 err = journal_flush(journal);
1226 journal_unlock_updates(journal);
1227
1228 if (err)
1229 return 0;
1230 }
1231
1232 return generic_block_bmap(mapping,block,ext3_get_block);
1233 }
1234
bget_one(handle_t * handle,struct inode * inode,struct buffer_head * bh)1235 static int bget_one(handle_t *handle, struct inode *inode,
1236 struct buffer_head *bh)
1237 {
1238 atomic_inc(&bh->b_count);
1239 return 0;
1240 }
1241
1242 /*
1243 * Note that we always start a transaction even if we're not journalling
1244 * data. This is to preserve ordering: any hole instantiation within
1245 * __block_write_full_page -> ext3_get_block() should be journalled
1246 * along with the data so we don't crash and then get metadata which
1247 * refers to old data.
1248 *
1249 * In all journalling modes block_write_full_page() will start the I/O.
1250 *
1251 * Problem:
1252 *
1253 * ext3_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1254 * ext3_writepage()
1255 *
1256 * Similar for:
1257 *
1258 * ext3_file_write() -> generic_file_write() -> __alloc_pages() -> ...
1259 *
1260 * Same applies to ext3_get_block(). We will deadlock on various things like
1261 * lock_journal and i_truncate_sem.
1262 *
1263 * Setting PF_MEMALLOC here doesn't work - too many internal memory
1264 * allocations fail.
1265 *
1266 * 16May01: If we're reentered then journal_current_handle() will be
1267 * non-zero. We simply *return*.
1268 *
1269 * 1 July 2001: @@@ FIXME:
1270 * In journalled data mode, a data buffer may be metadata against the
1271 * current transaction. But the same file is part of a shared mapping
1272 * and someone does a writepage() on it.
1273 *
1274 * We will move the buffer onto the async_data list, but *after* it has
1275 * been dirtied. So there's a small window where we have dirty data on
1276 * BJ_Metadata.
1277 *
1278 * Note that this only applies to the last partial page in the file. The
1279 * bit which block_write_full_page() uses prepare/commit for. (That's
1280 * broken code anyway: it's wrong for msync()).
1281 *
1282 * It's a rare case: affects the final partial page, for journalled data
1283 * where the file is subject to bith write() and writepage() in the same
1284 * transction. To fix it we'll need a custom block_write_full_page().
1285 * We'll probably need that anyway for journalling writepage() output.
1286 *
1287 * We don't honour synchronous mounts for writepage(). That would be
1288 * disastrous. Any write() or metadata operation will sync the fs for
1289 * us.
1290 */
ext3_writepage(struct page * page)1291 static int ext3_writepage(struct page *page)
1292 {
1293 struct inode *inode = page->mapping->host;
1294 struct buffer_head *page_buffers;
1295 handle_t *handle = NULL;
1296 int ret = 0, err;
1297 int needed;
1298 int order_data;
1299
1300 J_ASSERT(PageLocked(page));
1301
1302 /*
1303 * We give up here if we're reentered, because it might be
1304 * for a different filesystem. One *could* look for a
1305 * nested transaction opportunity.
1306 */
1307 lock_kernel();
1308 if (ext3_journal_current_handle())
1309 goto out_fail;
1310
1311 needed = ext3_writepage_trans_blocks(inode);
1312 if (current->flags & PF_MEMALLOC)
1313 handle = ext3_journal_try_start(inode, needed);
1314 else
1315 handle = ext3_journal_start(inode, needed);
1316
1317 if (IS_ERR(handle)) {
1318 ret = PTR_ERR(handle);
1319 goto out_fail;
1320 }
1321
1322 order_data = ext3_should_order_data(inode) ||
1323 ext3_should_journal_data(inode);
1324
1325 unlock_kernel();
1326
1327 page_buffers = NULL; /* Purely to prevent compiler warning */
1328
1329 /* bget() all the buffers */
1330 if (order_data) {
1331 if (!page->buffers)
1332 create_empty_buffers(page,
1333 inode->i_dev, inode->i_sb->s_blocksize);
1334 page_buffers = page->buffers;
1335 walk_page_buffers(handle, inode, page_buffers, 0,
1336 PAGE_CACHE_SIZE, NULL, bget_one);
1337 }
1338
1339 ret = block_write_full_page(page, ext3_get_block);
1340
1341 /*
1342 * The page can become unlocked at any point now, and
1343 * truncate can then come in and change things. So we
1344 * can't touch *page from now on. But *page_buffers is
1345 * safe due to elevated refcount.
1346 */
1347
1348 handle = ext3_journal_current_handle();
1349 lock_kernel();
1350
1351 /* And attach them to the current transaction */
1352 if (order_data) {
1353 err = walk_page_buffers(handle, inode, page_buffers,
1354 0, PAGE_CACHE_SIZE, NULL, journal_dirty_async_data);
1355 if (!ret)
1356 ret = err;
1357 }
1358
1359 err = ext3_journal_stop(handle, inode);
1360 if (!ret)
1361 ret = err;
1362 unlock_kernel();
1363 return ret;
1364
1365 out_fail:
1366
1367 unlock_kernel();
1368 SetPageDirty(page);
1369 UnlockPage(page);
1370 return ret;
1371 }
1372
ext3_readpage(struct file * file,struct page * page)1373 static int ext3_readpage(struct file *file, struct page *page)
1374 {
1375 return block_read_full_page(page,ext3_get_block);
1376 }
1377
1378
ext3_flushpage(struct page * page,unsigned long offset)1379 static int ext3_flushpage(struct page *page, unsigned long offset)
1380 {
1381 journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1382 return journal_flushpage(journal, page, offset);
1383 }
1384
ext3_releasepage(struct page * page,int wait)1385 static int ext3_releasepage(struct page *page, int wait)
1386 {
1387 journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1388 return journal_try_to_free_buffers(journal, page, wait);
1389 }
1390
1391
1392 struct address_space_operations ext3_aops = {
1393 readpage: ext3_readpage, /* BKL not held. Don't need */
1394 writepage: ext3_writepage, /* BKL not held. We take it */
1395 sync_page: block_sync_page,
1396 prepare_write: ext3_prepare_write, /* BKL not held. We take it */
1397 commit_write: ext3_commit_write, /* BKL not held. We take it */
1398 bmap: ext3_bmap, /* BKL held */
1399 flushpage: ext3_flushpage, /* BKL not held. Don't need */
1400 releasepage: ext3_releasepage, /* BKL not held. Don't need */
1401 };
1402
1403 /*
1404 * ext3_block_truncate_page() zeroes out a mapping from file offset `from'
1405 * up to the end of the block which corresponds to `from'.
1406 * This required during truncate. We need to physically zero the tail end
1407 * of that block so it doesn't yield old data if the file is later grown.
1408 */
ext3_block_truncate_page(handle_t * handle,struct address_space * mapping,loff_t from)1409 static int ext3_block_truncate_page(handle_t *handle,
1410 struct address_space *mapping, loff_t from)
1411 {
1412 unsigned long index = from >> PAGE_CACHE_SHIFT;
1413 unsigned offset = from & (PAGE_CACHE_SIZE-1);
1414 unsigned blocksize, iblock, length, pos;
1415 struct inode *inode = mapping->host;
1416 struct page *page;
1417 struct buffer_head *bh;
1418 int err;
1419
1420 blocksize = inode->i_sb->s_blocksize;
1421 length = offset & (blocksize - 1);
1422
1423 /* Block boundary? Nothing to do */
1424 if (!length)
1425 return 0;
1426
1427 length = blocksize - length;
1428 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
1429
1430 page = find_or_create_page(mapping, index, GFP_NOFS);
1431 err = -ENOMEM;
1432 if (!page)
1433 goto out;
1434
1435 if (!page->buffers)
1436 create_empty_buffers(page, inode->i_dev, blocksize);
1437
1438 /* Find the buffer that contains "offset" */
1439 bh = page->buffers;
1440 pos = blocksize;
1441 while (offset >= pos) {
1442 bh = bh->b_this_page;
1443 iblock++;
1444 pos += blocksize;
1445 }
1446
1447 err = 0;
1448 if (!buffer_mapped(bh)) {
1449 /* Hole? Nothing to do */
1450 if (buffer_uptodate(bh))
1451 goto unlock;
1452 ext3_get_block(inode, iblock, bh, 0);
1453 /* Still unmapped? Nothing to do */
1454 if (!buffer_mapped(bh))
1455 goto unlock;
1456 }
1457
1458 /* Ok, it's mapped. Make sure it's up-to-date */
1459 if (Page_Uptodate(page))
1460 set_bit(BH_Uptodate, &bh->b_state);
1461
1462 if (!buffer_uptodate(bh)) {
1463 err = -EIO;
1464 ll_rw_block(READ, 1, &bh);
1465 wait_on_buffer(bh);
1466 /* Uhhuh. Read error. Complain and punt. */
1467 if (!buffer_uptodate(bh))
1468 goto unlock;
1469 }
1470
1471 if (ext3_should_journal_data(inode)) {
1472 BUFFER_TRACE(bh, "get write access");
1473 err = ext3_journal_get_write_access(handle, bh);
1474 if (err)
1475 goto unlock;
1476 }
1477
1478 memset(kmap(page) + offset, 0, length);
1479 flush_dcache_page(page);
1480 kunmap(page);
1481
1482 BUFFER_TRACE(bh, "zeroed end of block");
1483
1484 err = 0;
1485 if (ext3_should_journal_data(inode)) {
1486 err = ext3_journal_dirty_metadata(handle, bh);
1487 } else {
1488 if (ext3_should_order_data(inode))
1489 err = ext3_journal_dirty_data(handle, bh, 0);
1490 __mark_buffer_dirty(bh);
1491 }
1492
1493 unlock:
1494 UnlockPage(page);
1495 page_cache_release(page);
1496 out:
1497 return err;
1498 }
1499
1500 /*
1501 * Probably it should be a library function... search for first non-zero word
1502 * or memcmp with zero_page, whatever is better for particular architecture.
1503 * Linus?
1504 */
all_zeroes(u32 * p,u32 * q)1505 static inline int all_zeroes(u32 *p, u32 *q)
1506 {
1507 while (p < q)
1508 if (*p++)
1509 return 0;
1510 return 1;
1511 }
1512
1513 /**
1514 * ext3_find_shared - find the indirect blocks for partial truncation.
1515 * @inode: inode in question
1516 * @depth: depth of the affected branch
1517 * @offsets: offsets of pointers in that branch (see ext3_block_to_path)
1518 * @chain: place to store the pointers to partial indirect blocks
1519 * @top: place to the (detached) top of branch
1520 *
1521 * This is a helper function used by ext3_truncate().
1522 *
1523 * When we do truncate() we may have to clean the ends of several
1524 * indirect blocks but leave the blocks themselves alive. Block is
1525 * partially truncated if some data below the new i_size is refered
1526 * from it (and it is on the path to the first completely truncated
1527 * data block, indeed). We have to free the top of that path along
1528 * with everything to the right of the path. Since no allocation
1529 * past the truncation point is possible until ext3_truncate()
1530 * finishes, we may safely do the latter, but top of branch may
1531 * require special attention - pageout below the truncation point
1532 * might try to populate it.
1533 *
1534 * We atomically detach the top of branch from the tree, store the
1535 * block number of its root in *@top, pointers to buffer_heads of
1536 * partially truncated blocks - in @chain[].bh and pointers to
1537 * their last elements that should not be removed - in
1538 * @chain[].p. Return value is the pointer to last filled element
1539 * of @chain.
1540 *
1541 * The work left to caller to do the actual freeing of subtrees:
1542 * a) free the subtree starting from *@top
1543 * b) free the subtrees whose roots are stored in
1544 * (@chain[i].p+1 .. end of @chain[i].bh->b_data)
1545 * c) free the subtrees growing from the inode past the @chain[0].
1546 * (no partially truncated stuff there). */
1547
ext3_find_shared(struct inode * inode,int depth,int offsets[4],Indirect chain[4],u32 * top)1548 static Indirect *ext3_find_shared(struct inode *inode,
1549 int depth,
1550 int offsets[4],
1551 Indirect chain[4],
1552 u32 *top)
1553 {
1554 Indirect *partial, *p;
1555 int k, err;
1556
1557 *top = 0;
1558 /* Make k index the deepest non-null offest + 1 */
1559 for (k = depth; k > 1 && !offsets[k-1]; k--)
1560 ;
1561 partial = ext3_get_branch(inode, k, offsets, chain, &err);
1562 /* Writer: pointers */
1563 if (!partial)
1564 partial = chain + k-1;
1565 /*
1566 * If the branch acquired continuation since we've looked at it -
1567 * fine, it should all survive and (new) top doesn't belong to us.
1568 */
1569 if (!partial->key && *partial->p)
1570 /* Writer: end */
1571 goto no_top;
1572 for (p=partial; p>chain && all_zeroes((u32*)p->bh->b_data,p->p); p--)
1573 ;
1574 /*
1575 * OK, we've found the last block that must survive. The rest of our
1576 * branch should be detached before unlocking. However, if that rest
1577 * of branch is all ours and does not grow immediately from the inode
1578 * it's easier to cheat and just decrement partial->p.
1579 */
1580 if (p == chain + k - 1 && p > chain) {
1581 p->p--;
1582 } else {
1583 *top = *p->p;
1584 /* Nope, don't do this in ext3. Must leave the tree intact */
1585 #if 0
1586 *p->p = 0;
1587 #endif
1588 }
1589 /* Writer: end */
1590
1591 while(partial > p)
1592 {
1593 brelse(partial->bh);
1594 partial--;
1595 }
1596 no_top:
1597 return partial;
1598 }
1599
1600 /*
1601 * Zero a number of block pointers in either an inode or an indirect block.
1602 * If we restart the transaction we must again get write access to the
1603 * indirect block for further modification.
1604 *
1605 * We release `count' blocks on disk, but (last - first) may be greater
1606 * than `count' because there can be holes in there.
1607 */
1608 static void
ext3_clear_blocks(handle_t * handle,struct inode * inode,struct buffer_head * bh,unsigned long block_to_free,unsigned long count,u32 * first,u32 * last)1609 ext3_clear_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh,
1610 unsigned long block_to_free, unsigned long count,
1611 u32 *first, u32 *last)
1612 {
1613 u32 *p;
1614 if (try_to_extend_transaction(handle, inode)) {
1615 if (bh) {
1616 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
1617 ext3_journal_dirty_metadata(handle, bh);
1618 }
1619 ext3_mark_inode_dirty(handle, inode);
1620 ext3_journal_test_restart(handle, inode);
1621 if (bh) {
1622 BUFFER_TRACE(bh, "retaking write access");
1623 ext3_journal_get_write_access(handle, bh);
1624 }
1625 }
1626
1627 /*
1628 * Any buffers which are on the journal will be in memory. We find
1629 * them on the hash table so journal_revoke() will run journal_forget()
1630 * on them. We've already detached each block from the file, so
1631 * bforget() in journal_forget() should be safe.
1632 *
1633 * AKPM: turn on bforget in journal_forget()!!!
1634 */
1635 for (p = first; p < last; p++) {
1636 u32 nr = le32_to_cpu(*p);
1637 if (nr) {
1638 struct buffer_head *bh;
1639
1640 *p = 0;
1641 bh = sb_get_hash_table(inode->i_sb, nr);
1642 ext3_forget(handle, 0, inode, bh, nr);
1643 }
1644 }
1645
1646 ext3_free_blocks(handle, inode, block_to_free, count);
1647 }
1648
1649 /**
1650 * ext3_free_data - free a list of data blocks
1651 * @handle: handle for this transaction
1652 * @inode: inode we are dealing with
1653 * @this_bh: indirect buffer_head which contains *@first and *@last
1654 * @first: array of block numbers
1655 * @last: points immediately past the end of array
1656 *
1657 * We are freeing all blocks refered from that array (numbers are stored as
1658 * little-endian 32-bit) and updating @inode->i_blocks appropriately.
1659 *
1660 * We accumulate contiguous runs of blocks to free. Conveniently, if these
1661 * blocks are contiguous then releasing them at one time will only affect one
1662 * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
1663 * actually use a lot of journal space.
1664 *
1665 * @this_bh will be %NULL if @first and @last point into the inode's direct
1666 * block pointers.
1667 */
ext3_free_data(handle_t * handle,struct inode * inode,struct buffer_head * this_bh,u32 * first,u32 * last)1668 static void ext3_free_data(handle_t *handle, struct inode *inode,
1669 struct buffer_head *this_bh, u32 *first, u32 *last)
1670 {
1671 unsigned long block_to_free = 0; /* Starting block # of a run */
1672 unsigned long count = 0; /* Number of blocks in the run */
1673 u32 *block_to_free_p = NULL; /* Pointer into inode/ind
1674 corresponding to
1675 block_to_free */
1676 unsigned long nr; /* Current block # */
1677 u32 *p; /* Pointer into inode/ind
1678 for current block */
1679 int err;
1680
1681 if (this_bh) { /* For indirect block */
1682 BUFFER_TRACE(this_bh, "get_write_access");
1683 err = ext3_journal_get_write_access(handle, this_bh);
1684 /* Important: if we can't update the indirect pointers
1685 * to the blocks, we can't free them. */
1686 if (err)
1687 return;
1688 }
1689
1690 for (p = first; p < last; p++) {
1691 nr = le32_to_cpu(*p);
1692 if (nr) {
1693 /* accumulate blocks to free if they're contiguous */
1694 if (count == 0) {
1695 block_to_free = nr;
1696 block_to_free_p = p;
1697 count = 1;
1698 } else if (nr == block_to_free + count) {
1699 count++;
1700 } else {
1701 ext3_clear_blocks(handle, inode, this_bh,
1702 block_to_free,
1703 count, block_to_free_p, p);
1704 block_to_free = nr;
1705 block_to_free_p = p;
1706 count = 1;
1707 }
1708 }
1709 }
1710
1711 if (count > 0)
1712 ext3_clear_blocks(handle, inode, this_bh, block_to_free,
1713 count, block_to_free_p, p);
1714
1715 if (this_bh) {
1716 BUFFER_TRACE(this_bh, "call ext3_journal_dirty_metadata");
1717 ext3_journal_dirty_metadata(handle, this_bh);
1718 }
1719 }
1720
1721 /**
1722 * ext3_free_branches - free an array of branches
1723 * @handle: JBD handle for this transaction
1724 * @inode: inode we are dealing with
1725 * @parent_bh: the buffer_head which contains *@first and *@last
1726 * @first: array of block numbers
1727 * @last: pointer immediately past the end of array
1728 * @depth: depth of the branches to free
1729 *
1730 * We are freeing all blocks refered from these branches (numbers are
1731 * stored as little-endian 32-bit) and updating @inode->i_blocks
1732 * appropriately.
1733 */
ext3_free_branches(handle_t * handle,struct inode * inode,struct buffer_head * parent_bh,u32 * first,u32 * last,int depth)1734 static void ext3_free_branches(handle_t *handle, struct inode *inode,
1735 struct buffer_head *parent_bh,
1736 u32 *first, u32 *last, int depth)
1737 {
1738 unsigned long nr;
1739 u32 *p;
1740
1741 if (is_handle_aborted(handle))
1742 return;
1743
1744 if (depth--) {
1745 struct buffer_head *bh;
1746 int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
1747 p = last;
1748 while (--p >= first) {
1749 nr = le32_to_cpu(*p);
1750 if (!nr)
1751 continue; /* A hole */
1752
1753 /* Go read the buffer for the next level down */
1754 bh = sb_bread(inode->i_sb, nr);
1755
1756 /*
1757 * A read failure? Report error and clear slot
1758 * (should be rare).
1759 */
1760 if (!bh) {
1761 ext3_error(inode->i_sb, "ext3_free_branches",
1762 "Read failure, inode=%ld, block=%ld",
1763 inode->i_ino, nr);
1764 continue;
1765 }
1766
1767 /* This zaps the entire block. Bottom up. */
1768 BUFFER_TRACE(bh, "free child branches");
1769 ext3_free_branches(handle, inode, bh, (u32*)bh->b_data,
1770 (u32*)bh->b_data + addr_per_block,
1771 depth);
1772
1773 /*
1774 * We've probably journalled the indirect block several
1775 * times during the truncate. But it's no longer
1776 * needed and we now drop it from the transaction via
1777 * journal_revoke().
1778 *
1779 * That's easy if it's exclusively part of this
1780 * transaction. But if it's part of the committing
1781 * transaction then journal_forget() will simply
1782 * brelse() it. That means that if the underlying
1783 * block is reallocated in ext3_get_block(),
1784 * unmap_underlying_metadata() will find this block
1785 * and will try to get rid of it. damn, damn.
1786 *
1787 * If this block has already been committed to the
1788 * journal, a revoke record will be written. And
1789 * revoke records must be emitted *before* clearing
1790 * this block's bit in the bitmaps.
1791 */
1792 ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
1793
1794 /*
1795 * Everything below this this pointer has been
1796 * released. Now let this top-of-subtree go.
1797 *
1798 * We want the freeing of this indirect block to be
1799 * atomic in the journal with the updating of the
1800 * bitmap block which owns it. So make some room in
1801 * the journal.
1802 *
1803 * We zero the parent pointer *after* freeing its
1804 * pointee in the bitmaps, so if extend_transaction()
1805 * for some reason fails to put the bitmap changes and
1806 * the release into the same transaction, recovery
1807 * will merely complain about releasing a free block,
1808 * rather than leaking blocks.
1809 */
1810 if (is_handle_aborted(handle))
1811 return;
1812 if (try_to_extend_transaction(handle, inode)) {
1813 ext3_mark_inode_dirty(handle, inode);
1814 ext3_journal_test_restart(handle, inode);
1815 }
1816
1817 ext3_free_blocks(handle, inode, nr, 1);
1818
1819 if (parent_bh) {
1820 /*
1821 * The block which we have just freed is
1822 * pointed to by an indirect block: journal it
1823 */
1824 BUFFER_TRACE(parent_bh, "get_write_access");
1825 if (!ext3_journal_get_write_access(handle,
1826 parent_bh)){
1827 *p = 0;
1828 BUFFER_TRACE(parent_bh,
1829 "call ext3_journal_dirty_metadata");
1830 ext3_journal_dirty_metadata(handle,
1831 parent_bh);
1832 }
1833 }
1834 }
1835 } else {
1836 /* We have reached the bottom of the tree. */
1837 BUFFER_TRACE(parent_bh, "free data blocks");
1838 ext3_free_data(handle, inode, parent_bh, first, last);
1839 }
1840 }
1841
1842 /*
1843 * ext3_truncate()
1844 *
1845 * We block out ext3_get_block() block instantiations across the entire
1846 * transaction, and VFS/VM ensures that ext3_truncate() cannot run
1847 * simultaneously on behalf of the same inode.
1848 *
1849 * As we work through the truncate and commmit bits of it to the journal there
1850 * is one core, guiding principle: the file's tree must always be consistent on
1851 * disk. We must be able to restart the truncate after a crash.
1852 *
1853 * The file's tree may be transiently inconsistent in memory (although it
1854 * probably isn't), but whenever we close off and commit a journal transaction,
1855 * the contents of (the filesystem + the journal) must be consistent and
1856 * restartable. It's pretty simple, really: bottom up, right to left (although
1857 * left-to-right works OK too).
1858 *
1859 * Note that at recovery time, journal replay occurs *before* the restart of
1860 * truncate against the orphan inode list.
1861 *
1862 * The committed inode has the new, desired i_size (which is the same as
1863 * i_disksize in this case). After a crash, ext3_orphan_cleanup() will see
1864 * that this inode's truncate did not complete and it will again call
1865 * ext3_truncate() to have another go. So there will be instantiated blocks
1866 * to the right of the truncation point in a crashed ext3 filesystem. But
1867 * that's fine - as long as they are linked from the inode, the post-crash
1868 * ext3_truncate() run will find them and release them.
1869 */
1870
ext3_truncate(struct inode * inode)1871 void ext3_truncate(struct inode * inode)
1872 {
1873 handle_t *handle;
1874 u32 *i_data = inode->u.ext3_i.i_data;
1875 int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
1876 int offsets[4];
1877 Indirect chain[4];
1878 Indirect *partial;
1879 int nr = 0;
1880 int n;
1881 long last_block;
1882 unsigned blocksize;
1883
1884 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1885 S_ISLNK(inode->i_mode)))
1886 return;
1887 if (ext3_inode_is_fast_symlink(inode))
1888 return;
1889 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1890 return;
1891
1892 ext3_discard_prealloc(inode);
1893
1894 handle = start_transaction(inode);
1895 if (IS_ERR(handle))
1896 return; /* AKPM: return what? */
1897
1898 blocksize = inode->i_sb->s_blocksize;
1899 last_block = (inode->i_size + blocksize-1)
1900 >> EXT3_BLOCK_SIZE_BITS(inode->i_sb);
1901
1902 ext3_block_truncate_page(handle, inode->i_mapping, inode->i_size);
1903
1904
1905 n = ext3_block_to_path(inode, last_block, offsets);
1906 if (n == 0)
1907 goto out_stop; /* error */
1908
1909 /*
1910 * OK. This truncate is going to happen. We add the inode to the
1911 * orphan list, so that if this truncate spans multiple transactions,
1912 * and we crash, we will resume the truncate when the filesystem
1913 * recovers. It also marks the inode dirty, to catch the new size.
1914 *
1915 * Implication: the file must always be in a sane, consistent
1916 * truncatable state while each transaction commits.
1917 */
1918 if (ext3_orphan_add(handle, inode))
1919 goto out_stop;
1920
1921 /*
1922 * The orphan list entry will now protect us from any crash which
1923 * occurs before the truncate completes, so it is now safe to propagate
1924 * the new, shorter inode size (held for now in i_size) into the
1925 * on-disk inode. We do this via i_disksize, which is the value which
1926 * ext3 *really* writes onto the disk inode.
1927 */
1928 inode->u.ext3_i.i_disksize = inode->i_size;
1929
1930 /*
1931 * From here we block out all ext3_get_block() callers who want to
1932 * modify the block allocation tree.
1933 */
1934 down_write(&inode->u.ext3_i.truncate_sem);
1935
1936 if (n == 1) { /* direct blocks */
1937 ext3_free_data(handle, inode, NULL, i_data+offsets[0],
1938 i_data + EXT3_NDIR_BLOCKS);
1939 goto do_indirects;
1940 }
1941
1942 partial = ext3_find_shared(inode, n, offsets, chain, &nr);
1943 /* Kill the top of shared branch (not detached) */
1944 if (nr) {
1945 if (partial == chain) {
1946 /* Shared branch grows from the inode */
1947 ext3_free_branches(handle, inode, NULL,
1948 &nr, &nr+1, (chain+n-1) - partial);
1949 *partial->p = 0;
1950 /*
1951 * We mark the inode dirty prior to restart,
1952 * and prior to stop. No need for it here.
1953 */
1954 } else {
1955 /* Shared branch grows from an indirect block */
1956 BUFFER_TRACE(partial->bh, "get_write_access");
1957 ext3_free_branches(handle, inode, partial->bh,
1958 partial->p,
1959 partial->p+1, (chain+n-1) - partial);
1960 }
1961 }
1962 /* Clear the ends of indirect blocks on the shared branch */
1963 while (partial > chain) {
1964 ext3_free_branches(handle, inode, partial->bh, partial->p + 1,
1965 (u32*)partial->bh->b_data + addr_per_block,
1966 (chain+n-1) - partial);
1967 BUFFER_TRACE(partial->bh, "call brelse");
1968 brelse (partial->bh);
1969 partial--;
1970 }
1971 do_indirects:
1972 /* Kill the remaining (whole) subtrees */
1973 switch (offsets[0]) {
1974 default:
1975 nr = i_data[EXT3_IND_BLOCK];
1976 if (nr) {
1977 ext3_free_branches(handle, inode, NULL,
1978 &nr, &nr+1, 1);
1979 i_data[EXT3_IND_BLOCK] = 0;
1980 }
1981 case EXT3_IND_BLOCK:
1982 nr = i_data[EXT3_DIND_BLOCK];
1983 if (nr) {
1984 ext3_free_branches(handle, inode, NULL,
1985 &nr, &nr+1, 2);
1986 i_data[EXT3_DIND_BLOCK] = 0;
1987 }
1988 case EXT3_DIND_BLOCK:
1989 nr = i_data[EXT3_TIND_BLOCK];
1990 if (nr) {
1991 ext3_free_branches(handle, inode, NULL,
1992 &nr, &nr+1, 3);
1993 i_data[EXT3_TIND_BLOCK] = 0;
1994 }
1995 case EXT3_TIND_BLOCK:
1996 ;
1997 }
1998 up_write(&inode->u.ext3_i.truncate_sem);
1999 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
2000 ext3_mark_inode_dirty(handle, inode);
2001
2002 /* In a multi-transaction truncate, we only make the final
2003 * transaction synchronous */
2004 if (IS_SYNC(inode))
2005 handle->h_sync = 1;
2006 out_stop:
2007 /*
2008 * If this was a simple ftruncate(), and the file will remain alive
2009 * then we need to clear up the orphan record which we created above.
2010 * However, if this was a real unlink then we were called by
2011 * ext3_delete_inode(), and we allow that function to clean up the
2012 * orphan info for us.
2013 */
2014 if (inode->i_nlink)
2015 ext3_orphan_del(handle, inode);
2016
2017 ext3_journal_stop(handle, inode);
2018 }
2019
2020 /*
2021 * ext3_get_inode_loc returns with an extra refcount against the
2022 * inode's underlying buffer_head on success.
2023 */
2024
ext3_get_inode_loc(struct inode * inode,struct ext3_iloc * iloc)2025 int ext3_get_inode_loc (struct inode *inode, struct ext3_iloc *iloc)
2026 {
2027 struct buffer_head *bh = 0;
2028 unsigned long block;
2029 unsigned long block_group;
2030 unsigned long group_desc;
2031 unsigned long desc;
2032 unsigned long offset;
2033 struct ext3_group_desc * gdp;
2034
2035 if ((inode->i_ino != EXT3_ROOT_INO &&
2036 inode->i_ino != EXT3_ACL_IDX_INO &&
2037 inode->i_ino != EXT3_ACL_DATA_INO &&
2038 inode->i_ino != EXT3_JOURNAL_INO &&
2039 inode->i_ino < EXT3_FIRST_INO(inode->i_sb)) ||
2040 inode->i_ino > le32_to_cpu(
2041 inode->i_sb->u.ext3_sb.s_es->s_inodes_count)) {
2042 ext3_error (inode->i_sb, "ext3_get_inode_loc",
2043 "bad inode number: %lu", inode->i_ino);
2044 goto bad_inode;
2045 }
2046 block_group = (inode->i_ino - 1) / EXT3_INODES_PER_GROUP(inode->i_sb);
2047 if (block_group >= inode->i_sb->u.ext3_sb.s_groups_count) {
2048 ext3_error (inode->i_sb, "ext3_get_inode_loc",
2049 "group >= groups count");
2050 goto bad_inode;
2051 }
2052 group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(inode->i_sb);
2053 desc = block_group & (EXT3_DESC_PER_BLOCK(inode->i_sb) - 1);
2054 bh = inode->i_sb->u.ext3_sb.s_group_desc[group_desc];
2055 if (!bh) {
2056 ext3_error (inode->i_sb, "ext3_get_inode_loc",
2057 "Descriptor not loaded");
2058 goto bad_inode;
2059 }
2060
2061 gdp = (struct ext3_group_desc *) bh->b_data;
2062 /*
2063 * Figure out the offset within the block group inode table
2064 */
2065 offset = ((inode->i_ino - 1) % EXT3_INODES_PER_GROUP(inode->i_sb)) *
2066 EXT3_INODE_SIZE(inode->i_sb);
2067 block = le32_to_cpu(gdp[desc].bg_inode_table) +
2068 (offset >> EXT3_BLOCK_SIZE_BITS(inode->i_sb));
2069 if (!(bh = sb_bread(inode->i_sb, block))) {
2070 ext3_error (inode->i_sb, "ext3_get_inode_loc",
2071 "unable to read inode block - "
2072 "inode=%lu, block=%lu", inode->i_ino, block);
2073 goto bad_inode;
2074 }
2075 offset &= (EXT3_BLOCK_SIZE(inode->i_sb) - 1);
2076
2077 iloc->bh = bh;
2078 iloc->raw_inode = (struct ext3_inode *) (bh->b_data + offset);
2079 iloc->block_group = block_group;
2080
2081 return 0;
2082
2083 bad_inode:
2084 return -EIO;
2085 }
2086
ext3_set_inode_flags(struct inode * inode)2087 void ext3_set_inode_flags(struct inode *inode)
2088 {
2089 unsigned int flags = inode->u.ext3_i.i_flags;
2090
2091 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME);
2092 if (flags & EXT3_SYNC_FL)
2093 inode->i_flags |= S_SYNC;
2094 if (flags & EXT3_APPEND_FL)
2095 inode->i_flags |= S_APPEND;
2096 if (flags & EXT3_IMMUTABLE_FL)
2097 inode->i_flags |= S_IMMUTABLE;
2098 if (flags & EXT3_NOATIME_FL)
2099 inode->i_flags |= S_NOATIME;
2100 }
2101
2102
ext3_read_inode(struct inode * inode)2103 void ext3_read_inode(struct inode * inode)
2104 {
2105 struct ext3_iloc iloc;
2106 struct ext3_inode *raw_inode;
2107 struct buffer_head *bh;
2108 int block;
2109
2110 if(ext3_get_inode_loc(inode, &iloc))
2111 goto bad_inode;
2112 bh = iloc.bh;
2113 raw_inode = iloc.raw_inode;
2114 init_rwsem(&inode->u.ext3_i.truncate_sem);
2115 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
2116 inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
2117 inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
2118 if(!(test_opt (inode->i_sb, NO_UID32))) {
2119 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
2120 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
2121 }
2122 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
2123 inode->i_size = le32_to_cpu(raw_inode->i_size);
2124 inode->i_atime = le32_to_cpu(raw_inode->i_atime);
2125 inode->i_ctime = le32_to_cpu(raw_inode->i_ctime);
2126 inode->i_mtime = le32_to_cpu(raw_inode->i_mtime);
2127 inode->u.ext3_i.i_dtime = le32_to_cpu(raw_inode->i_dtime);
2128 /* We now have enough fields to check if the inode was active or not.
2129 * This is needed because nfsd might try to access dead inodes
2130 * the test is that same one that e2fsck uses
2131 * NeilBrown 1999oct15
2132 */
2133 if (inode->i_nlink == 0) {
2134 if (inode->i_mode == 0 ||
2135 !(inode->i_sb->u.ext3_sb.s_mount_state & EXT3_ORPHAN_FS)) {
2136 /* this inode is deleted */
2137 brelse (bh);
2138 goto bad_inode;
2139 }
2140 /* The only unlinked inodes we let through here have
2141 * valid i_mode and are being read by the orphan
2142 * recovery code: that's fine, we're about to complete
2143 * the process of deleting those. */
2144 }
2145 inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size
2146 * (for stat), not the fs block
2147 * size */
2148 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
2149 inode->i_version = ++event;
2150 inode->u.ext3_i.i_flags = le32_to_cpu(raw_inode->i_flags);
2151 #ifdef EXT3_FRAGMENTS
2152 inode->u.ext3_i.i_faddr = le32_to_cpu(raw_inode->i_faddr);
2153 inode->u.ext3_i.i_frag_no = raw_inode->i_frag;
2154 inode->u.ext3_i.i_frag_size = raw_inode->i_fsize;
2155 #endif
2156 inode->u.ext3_i.i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
2157 if (!S_ISREG(inode->i_mode)) {
2158 inode->u.ext3_i.i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
2159 } else {
2160 inode->i_size |=
2161 ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
2162 }
2163 inode->u.ext3_i.i_disksize = inode->i_size;
2164 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
2165 #ifdef EXT3_PREALLOCATE
2166 inode->u.ext3_i.i_prealloc_count = 0;
2167 #endif
2168 inode->u.ext3_i.i_block_group = iloc.block_group;
2169
2170 /*
2171 * NOTE! The in-memory inode i_data array is in little-endian order
2172 * even on big-endian machines: we do NOT byteswap the block numbers!
2173 */
2174 for (block = 0; block < EXT3_N_BLOCKS; block++)
2175 inode->u.ext3_i.i_data[block] = iloc.raw_inode->i_block[block];
2176 INIT_LIST_HEAD(&inode->u.ext3_i.i_orphan);
2177
2178 if (inode->i_ino == EXT3_ACL_IDX_INO ||
2179 inode->i_ino == EXT3_ACL_DATA_INO)
2180 /* Nothing to do */ ;
2181 else if (S_ISREG(inode->i_mode)) {
2182 inode->i_op = &ext3_file_inode_operations;
2183 inode->i_fop = &ext3_file_operations;
2184 inode->i_mapping->a_ops = &ext3_aops;
2185 } else if (S_ISDIR(inode->i_mode)) {
2186 inode->i_op = &ext3_dir_inode_operations;
2187 inode->i_fop = &ext3_dir_operations;
2188 } else if (S_ISLNK(inode->i_mode)) {
2189 if (ext3_inode_is_fast_symlink(inode))
2190 inode->i_op = &ext3_fast_symlink_inode_operations;
2191 else {
2192 inode->i_op = &page_symlink_inode_operations;
2193 inode->i_mapping->a_ops = &ext3_aops;
2194 }
2195 } else
2196 init_special_inode(inode, inode->i_mode,
2197 le32_to_cpu(iloc.raw_inode->i_block[0]));
2198 brelse(iloc.bh);
2199 ext3_set_inode_flags(inode);
2200 return;
2201
2202 bad_inode:
2203 make_bad_inode(inode);
2204 return;
2205 }
2206
2207 /*
2208 * Post the struct inode info into an on-disk inode location in the
2209 * buffer-cache. This gobbles the caller's reference to the
2210 * buffer_head in the inode location struct.
2211 */
2212
ext3_do_update_inode(handle_t * handle,struct inode * inode,struct ext3_iloc * iloc)2213 static int ext3_do_update_inode(handle_t *handle,
2214 struct inode *inode,
2215 struct ext3_iloc *iloc)
2216 {
2217 struct ext3_inode *raw_inode = iloc->raw_inode;
2218 struct buffer_head *bh = iloc->bh;
2219 int err = 0, rc, block;
2220
2221 if (handle) {
2222 BUFFER_TRACE(bh, "get_write_access");
2223 err = ext3_journal_get_write_access(handle, bh);
2224 if (err)
2225 goto out_brelse;
2226 }
2227 /* For fields not not tracking in the in-memory inode,
2228 * initialise them to zero for new inodes. */
2229 if (EXT3_I(inode)->i_state & EXT3_STATE_NEW)
2230 memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size);
2231
2232 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
2233 if(!(test_opt(inode->i_sb, NO_UID32))) {
2234 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
2235 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
2236 /*
2237 * Fix up interoperability with old kernels. Otherwise, old inodes get
2238 * re-used with the upper 16 bits of the uid/gid intact
2239 */
2240 if(!inode->u.ext3_i.i_dtime) {
2241 raw_inode->i_uid_high =
2242 cpu_to_le16(high_16_bits(inode->i_uid));
2243 raw_inode->i_gid_high =
2244 cpu_to_le16(high_16_bits(inode->i_gid));
2245 } else {
2246 raw_inode->i_uid_high = 0;
2247 raw_inode->i_gid_high = 0;
2248 }
2249 } else {
2250 raw_inode->i_uid_low =
2251 cpu_to_le16(fs_high2lowuid(inode->i_uid));
2252 raw_inode->i_gid_low =
2253 cpu_to_le16(fs_high2lowgid(inode->i_gid));
2254 raw_inode->i_uid_high = 0;
2255 raw_inode->i_gid_high = 0;
2256 }
2257 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
2258 raw_inode->i_size = cpu_to_le32(inode->u.ext3_i.i_disksize);
2259 raw_inode->i_atime = cpu_to_le32(inode->i_atime);
2260 raw_inode->i_ctime = cpu_to_le32(inode->i_ctime);
2261 raw_inode->i_mtime = cpu_to_le32(inode->i_mtime);
2262 raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
2263 raw_inode->i_dtime = cpu_to_le32(inode->u.ext3_i.i_dtime);
2264 raw_inode->i_flags = cpu_to_le32(inode->u.ext3_i.i_flags);
2265 #ifdef EXT3_FRAGMENTS
2266 raw_inode->i_faddr = cpu_to_le32(inode->u.ext3_i.i_faddr);
2267 raw_inode->i_frag = inode->u.ext3_i.i_frag_no;
2268 raw_inode->i_fsize = inode->u.ext3_i.i_frag_size;
2269 #endif
2270 raw_inode->i_file_acl = cpu_to_le32(inode->u.ext3_i.i_file_acl);
2271 if (!S_ISREG(inode->i_mode)) {
2272 raw_inode->i_dir_acl = cpu_to_le32(inode->u.ext3_i.i_dir_acl);
2273 } else {
2274 raw_inode->i_size_high =
2275 cpu_to_le32(inode->u.ext3_i.i_disksize >> 32);
2276 if (inode->u.ext3_i.i_disksize > 0x7fffffffULL) {
2277 struct super_block *sb = inode->i_sb;
2278 if (!EXT3_HAS_RO_COMPAT_FEATURE(sb,
2279 EXT3_FEATURE_RO_COMPAT_LARGE_FILE) ||
2280 EXT3_SB(sb)->s_es->s_rev_level ==
2281 cpu_to_le32(EXT3_GOOD_OLD_REV)) {
2282 /* If this is the first large file
2283 * created, add a flag to the superblock.
2284 */
2285 err = ext3_journal_get_write_access(handle,
2286 sb->u.ext3_sb.s_sbh);
2287 if (err)
2288 goto out_brelse;
2289 ext3_update_dynamic_rev(sb);
2290 EXT3_SET_RO_COMPAT_FEATURE(sb,
2291 EXT3_FEATURE_RO_COMPAT_LARGE_FILE);
2292 sb->s_dirt = 1;
2293 handle->h_sync = 1;
2294 err = ext3_journal_dirty_metadata(handle,
2295 sb->u.ext3_sb.s_sbh);
2296 }
2297 }
2298 }
2299 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
2300 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
2301 raw_inode->i_block[0] =
2302 cpu_to_le32(kdev_t_to_nr(inode->i_rdev));
2303 else for (block = 0; block < EXT3_N_BLOCKS; block++)
2304 raw_inode->i_block[block] = inode->u.ext3_i.i_data[block];
2305
2306 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
2307 rc = ext3_journal_dirty_metadata(handle, bh);
2308 if (!err)
2309 err = rc;
2310 EXT3_I(inode)->i_state &= ~EXT3_STATE_NEW;
2311
2312 out_brelse:
2313 brelse (bh);
2314 ext3_std_error(inode->i_sb, err);
2315 return err;
2316 }
2317
2318 /*
2319 * ext3_write_inode()
2320 *
2321 * We are called from a few places:
2322 *
2323 * - Within generic_file_write() for O_SYNC files.
2324 * Here, there will be no transaction running. We wait for any running
2325 * trasnaction to commit.
2326 *
2327 * - Within sys_sync(), kupdate and such.
2328 * We wait on commit, if tol to.
2329 *
2330 * - Within prune_icache() (PF_MEMALLOC == true)
2331 * Here we simply return. We can't afford to block kswapd on the
2332 * journal commit.
2333 *
2334 * In all cases it is actually safe for us to return without doing anything,
2335 * because the inode has been copied into a raw inode buffer in
2336 * ext3_mark_inode_dirty(). This is a correctness thing for O_SYNC and for
2337 * knfsd.
2338 *
2339 * Note that we are absolutely dependent upon all inode dirtiers doing the
2340 * right thing: they *must* call mark_inode_dirty() after dirtying info in
2341 * which we are interested.
2342 *
2343 * It would be a bug for them to not do this. The code:
2344 *
2345 * mark_inode_dirty(inode)
2346 * stuff();
2347 * inode->i_size = expr;
2348 *
2349 * is in error because a kswapd-driven write_inode() could occur while
2350 * `stuff()' is running, and the new i_size will be lost. Plus the inode
2351 * will no longer be on the superblock's dirty inode list.
2352 */
ext3_write_inode(struct inode * inode,int wait)2353 void ext3_write_inode(struct inode *inode, int wait)
2354 {
2355 if (current->flags & PF_MEMALLOC)
2356 return;
2357
2358 if (ext3_journal_current_handle()) {
2359 jbd_debug(0, "called recursively, non-PF_MEMALLOC!\n");
2360 return;
2361 }
2362
2363 if (!wait)
2364 return;
2365
2366 ext3_force_commit(inode->i_sb);
2367 }
2368
2369 /*
2370 * ext3_setattr()
2371 *
2372 * Called from notify_change.
2373 *
2374 * We want to trap VFS attempts to truncate the file as soon as
2375 * possible. In particular, we want to make sure that when the VFS
2376 * shrinks i_size, we put the inode on the orphan list and modify
2377 * i_disksize immediately, so that during the subsequent flushing of
2378 * dirty pages and freeing of disk blocks, we can guarantee that any
2379 * commit will leave the blocks being flushed in an unused state on
2380 * disk. (On recovery, the inode will get truncated and the blocks will
2381 * be freed, so we have a strong guarantee that no future commit will
2382 * leave these blocks visible to the user.)
2383 *
2384 * This is only needed for regular files. rmdir() has its own path, and
2385 * we can never truncate a direcory except on final unlink (at which
2386 * point i_nlink is zero so recovery is easy.)
2387 *
2388 * Called with the BKL.
2389 */
2390
ext3_setattr(struct dentry * dentry,struct iattr * attr)2391 int ext3_setattr(struct dentry *dentry, struct iattr *attr)
2392 {
2393 struct inode *inode = dentry->d_inode;
2394 int error, rc = 0;
2395 const unsigned int ia_valid = attr->ia_valid;
2396
2397 error = inode_change_ok(inode, attr);
2398 if (error)
2399 return error;
2400
2401 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
2402 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
2403 error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
2404 if (error)
2405 return error;
2406 }
2407
2408 if (attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
2409 handle_t *handle;
2410
2411 handle = ext3_journal_start(inode, 3);
2412 if (IS_ERR(handle)) {
2413 error = PTR_ERR(handle);
2414 goto err_out;
2415 }
2416
2417 error = ext3_orphan_add(handle, inode);
2418 inode->u.ext3_i.i_disksize = attr->ia_size;
2419 rc = ext3_mark_inode_dirty(handle, inode);
2420 if (!error)
2421 error = rc;
2422 ext3_journal_stop(handle, inode);
2423 }
2424
2425 rc = inode_setattr(inode, attr);
2426
2427 /* If inode_setattr's call to ext3_truncate failed to get a
2428 * transaction handle at all, we need to clean up the in-core
2429 * orphan list manually. */
2430 if (inode->i_nlink)
2431 ext3_orphan_del(NULL, inode);
2432
2433 err_out:
2434 ext3_std_error(inode->i_sb, error);
2435 if (!error)
2436 error = rc;
2437 return error;
2438 }
2439
2440
2441 /*
2442 * akpm: how many blocks doth make a writepage()?
2443 *
2444 * With N blocks per page, it may be:
2445 * N data blocks
2446 * 2 indirect block
2447 * 2 dindirect
2448 * 1 tindirect
2449 * N+5 bitmap blocks (from the above)
2450 * N+5 group descriptor summary blocks
2451 * 1 inode block
2452 * 1 superblock.
2453 * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quote files
2454 *
2455 * 3 * (N + 5) + 2 + 2 * EXT3_SINGLEDATA_TRANS_BLOCKS
2456 *
2457 * With ordered or writeback data it's the same, less the N data blocks.
2458 *
2459 * If the inode's direct blocks can hold an integral number of pages then a
2460 * page cannot straddle two indirect blocks, and we can only touch one indirect
2461 * and dindirect block, and the "5" above becomes "3".
2462 *
2463 * This still overestimates under most circumstances. If we were to pass the
2464 * start and end offsets in here as well we could do block_to_path() on each
2465 * block and work out the exact number of indirects which are touched. Pah.
2466 */
2467
ext3_writepage_trans_blocks(struct inode * inode)2468 int ext3_writepage_trans_blocks(struct inode *inode)
2469 {
2470 int bpp = ext3_journal_blocks_per_page(inode);
2471 int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
2472 int ret;
2473
2474 if (ext3_should_journal_data(inode))
2475 ret = 3 * (bpp + indirects) + 2;
2476 else
2477 ret = 2 * (bpp + indirects) + 2;
2478
2479 #ifdef CONFIG_QUOTA
2480 ret += 2 * EXT3_SINGLEDATA_TRANS_BLOCKS;
2481 #endif
2482
2483 return ret;
2484 }
2485
2486 int
ext3_mark_iloc_dirty(handle_t * handle,struct inode * inode,struct ext3_iloc * iloc)2487 ext3_mark_iloc_dirty(handle_t *handle,
2488 struct inode *inode,
2489 struct ext3_iloc *iloc)
2490 {
2491 int err = 0;
2492
2493 if (handle) {
2494 /* the do_update_inode consumes one bh->b_count */
2495 atomic_inc(&iloc->bh->b_count);
2496 err = ext3_do_update_inode(handle, inode, iloc);
2497 /* ext3_do_update_inode() does journal_dirty_metadata */
2498 brelse(iloc->bh);
2499 } else {
2500 printk(KERN_EMERG "%s: called with no handle!\n", __FUNCTION__);
2501 }
2502 return err;
2503 }
2504
2505 /*
2506 * On success, We end up with an outstanding reference count against
2507 * iloc->bh. This _must_ be cleaned up later.
2508 */
2509
2510 int
ext3_reserve_inode_write(handle_t * handle,struct inode * inode,struct ext3_iloc * iloc)2511 ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
2512 struct ext3_iloc *iloc)
2513 {
2514 int err = 0;
2515 if (handle) {
2516 err = ext3_get_inode_loc(inode, iloc);
2517 if (!err) {
2518 BUFFER_TRACE(iloc->bh, "get_write_access");
2519 err = ext3_journal_get_write_access(handle, iloc->bh);
2520 if (err) {
2521 brelse(iloc->bh);
2522 iloc->bh = NULL;
2523 }
2524 }
2525 }
2526 ext3_std_error(inode->i_sb, err);
2527 return err;
2528 }
2529
2530 /*
2531 * akpm: What we do here is to mark the in-core inode as clean
2532 * with respect to inode dirtiness (it may still be data-dirty).
2533 * This means that the in-core inode may be reaped by prune_icache
2534 * without having to perform any I/O. This is a very good thing,
2535 * because *any* task may call prune_icache - even ones which
2536 * have a transaction open against a different journal.
2537 *
2538 * Is this cheating? Not really. Sure, we haven't written the
2539 * inode out, but prune_icache isn't a user-visible syncing function.
2540 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
2541 * we start and wait on commits.
2542 *
2543 * Is this efficient/effective? Well, we're being nice to the system
2544 * by cleaning up our inodes proactively so they can be reaped
2545 * without I/O. But we are potentially leaving up to five seconds'
2546 * worth of inodes floating about which prune_icache wants us to
2547 * write out. One way to fix that would be to get prune_icache()
2548 * to do a write_super() to free up some memory. It has the desired
2549 * effect.
2550 */
ext3_mark_inode_dirty(handle_t * handle,struct inode * inode)2551 int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
2552 {
2553 struct ext3_iloc iloc;
2554 int err;
2555
2556 err = ext3_reserve_inode_write(handle, inode, &iloc);
2557 if (!err)
2558 err = ext3_mark_iloc_dirty(handle, inode, &iloc);
2559 return err;
2560 }
2561
2562 /*
2563 * akpm: ext3_dirty_inode() is called from __mark_inode_dirty()
2564 *
2565 * We're really interested in the case where a file is being extended.
2566 * i_size has been changed by generic_commit_write() and we thus need
2567 * to include the updated inode in the current transaction.
2568 *
2569 * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks
2570 * are allocated to the file.
2571 *
2572 * If the inode is marked synchronous, we don't honour that here - doing
2573 * so would cause a commit on atime updates, which we don't bother doing.
2574 * We handle synchronous inodes at the highest possible level.
2575 */
ext3_dirty_inode(struct inode * inode)2576 void ext3_dirty_inode(struct inode *inode)
2577 {
2578 handle_t *current_handle = ext3_journal_current_handle();
2579 handle_t *handle;
2580
2581 lock_kernel();
2582 handle = ext3_journal_start(inode, 2);
2583 if (IS_ERR(handle))
2584 goto out;
2585 if (current_handle &&
2586 current_handle->h_transaction != handle->h_transaction) {
2587 /* This task has a transaction open against a different fs */
2588 printk(KERN_EMERG "%s: transactions do not match!\n",
2589 __FUNCTION__);
2590 } else {
2591 jbd_debug(5, "marking dirty. outer handle=%p\n",
2592 current_handle);
2593 ext3_mark_inode_dirty(handle, inode);
2594 }
2595 ext3_journal_stop(handle, inode);
2596 out:
2597 unlock_kernel();
2598 }
2599
2600 #ifdef AKPM
2601 /*
2602 * Bind an inode's backing buffer_head into this transaction, to prevent
2603 * it from being flushed to disk early. Unlike
2604 * ext3_reserve_inode_write, this leaves behind no bh reference and
2605 * returns no iloc structure, so the caller needs to repeat the iloc
2606 * lookup to mark the inode dirty later.
2607 */
2608 static inline int
ext3_pin_inode(handle_t * handle,struct inode * inode)2609 ext3_pin_inode(handle_t *handle, struct inode *inode)
2610 {
2611 struct ext3_iloc iloc;
2612
2613 int err = 0;
2614 if (handle) {
2615 err = ext3_get_inode_loc(inode, &iloc);
2616 if (!err) {
2617 BUFFER_TRACE(iloc.bh, "get_write_access");
2618 err = journal_get_write_access(handle, iloc.bh);
2619 if (!err)
2620 err = ext3_journal_dirty_metadata(handle,
2621 iloc.bh);
2622 brelse(iloc.bh);
2623 }
2624 }
2625 ext3_std_error(inode->i_sb, err);
2626 return err;
2627 }
2628 #endif
2629
ext3_change_inode_journal_flag(struct inode * inode,int val)2630 int ext3_change_inode_journal_flag(struct inode *inode, int val)
2631 {
2632 journal_t *journal;
2633 handle_t *handle;
2634 int err;
2635
2636 /*
2637 * We have to be very careful here: changing a data block's
2638 * journaling status dynamically is dangerous. If we write a
2639 * data block to the journal, change the status and then delete
2640 * that block, we risk forgetting to revoke the old log record
2641 * from the journal and so a subsequent replay can corrupt data.
2642 * So, first we make sure that the journal is empty and that
2643 * nobody is changing anything.
2644 */
2645
2646 journal = EXT3_JOURNAL(inode);
2647 if (is_journal_aborted(journal) || IS_RDONLY(inode))
2648 return -EROFS;
2649
2650 journal_lock_updates(journal);
2651 journal_flush(journal);
2652
2653 /*
2654 * OK, there are no updates running now, and all cached data is
2655 * synced to disk. We are now in a completely consistent state
2656 * which doesn't have anything in the journal, and we know that
2657 * no filesystem updates are running, so it is safe to modify
2658 * the inode's in-core data-journaling state flag now.
2659 */
2660
2661 if (val)
2662 inode->u.ext3_i.i_flags |= EXT3_JOURNAL_DATA_FL;
2663 else
2664 inode->u.ext3_i.i_flags &= ~EXT3_JOURNAL_DATA_FL;
2665
2666 journal_unlock_updates(journal);
2667
2668 /* Finally we can mark the inode as dirty. */
2669
2670 handle = ext3_journal_start(inode, 1);
2671 if (IS_ERR(handle))
2672 return PTR_ERR(handle);
2673
2674 err = ext3_mark_inode_dirty(handle, inode);
2675 handle->h_sync = 1;
2676 ext3_journal_stop(handle, inode);
2677 ext3_std_error(inode->i_sb, err);
2678
2679 return err;
2680 }
2681
2682
2683 /*
2684 * ext3_aops_journal_start().
2685 *
2686 * <This function died, but the comment lives on>
2687 *
2688 * We need to take the inode semaphore *outside* the
2689 * journal_start/journal_stop. Otherwise, a different task could do a
2690 * wait_for_commit() while holding ->i_sem, which deadlocks. The rule
2691 * is: transaction open/closes are considered to be a locking operation
2692 * and they nest *inside* ->i_sem.
2693 * ----------------------------------------------------------------------------
2694 * Possible problem:
2695 * ext3_file_write()
2696 * -> generic_file_write()
2697 * -> __alloc_pages()
2698 * -> page_launder()
2699 * -> ext3_writepage()
2700 *
2701 * And the writepage can be on a different fs while we have a
2702 * transaction open against this one! Bad.
2703 *
2704 * I tried making the task PF_MEMALLOC here, but that simply results in
2705 * 0-order allocation failures passed back to generic_file_write().
2706 * Instead, we rely on the reentrancy protection in ext3_writepage().
2707 * ----------------------------------------------------------------------------
2708 * When we do the journal_start() here we don't really need to reserve
2709 * any blocks - we won't need any until we hit ext3_prepare_write(),
2710 * which does all the needed journal extending. However! There is a
2711 * problem with quotas:
2712 *
2713 * Thread 1:
2714 * sys_sync
2715 * ->sync_dquots
2716 * ->commit_dquot
2717 * ->lock_dquot
2718 * ->write_dquot
2719 * ->ext3_file_write
2720 * ->journal_start
2721 * ->ext3_prepare_write
2722 * ->journal_extend
2723 * ->journal_start
2724 * Thread 2:
2725 * ext3_create (for example)
2726 * ->ext3_new_inode
2727 * ->dquot_initialize
2728 * ->lock_dquot
2729 *
2730 * Deadlock. Thread 1's journal_start blocks because thread 2 has a
2731 * transaction open. Thread 2's transaction will never close because
2732 * thread 2 is stuck waiting for the dquot lock.
2733 *
2734 * So. We must ensure that thread 1 *never* needs to extend the journal
2735 * for quota writes. We do that by reserving enough journal blocks
2736 * here, in ext3_aops_journal_start() to ensure that the forthcoming "see if we
2737 * need to extend" test in ext3_prepare_write() succeeds.
2738 */
2739