1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
4 * Written by Alex Tomas <alex@clusterfs.com>
5 *
6 * Architecture independence:
7 * Copyright (c) 2005, Bull S.A.
8 * Written by Pierre Peiffer <pierre.peiffer@bull.net>
9 */
10
11 /*
12 * Extents support for EXT4
13 *
14 * TODO:
15 * - ext4*_error() should be used in some situations
16 * - analyze all BUG()/BUG_ON(), use -EIO where appropriate
17 * - smart tree reduction
18 */
19
20 #include <linux/fs.h>
21 #include <linux/time.h>
22 #include <linux/jbd2.h>
23 #include <linux/highuid.h>
24 #include <linux/pagemap.h>
25 #include <linux/quotaops.h>
26 #include <linux/string.h>
27 #include <linux/slab.h>
28 #include <linux/uaccess.h>
29 #include <linux/fiemap.h>
30 #include <linux/iomap.h>
31 #include <linux/sched/mm.h>
32 #include "ext4_jbd2.h"
33 #include "ext4_extents.h"
34 #include "xattr.h"
35
36 #include <trace/events/ext4.h>
37
38 /*
39 * used by extent splitting.
40 */
41 #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \
42 due to ENOSPC */
43 #define EXT4_EXT_MARK_UNWRIT1 0x2 /* mark first half unwritten */
44 #define EXT4_EXT_MARK_UNWRIT2 0x4 /* mark second half unwritten */
45
46 #define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */
47 #define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */
48
ext4_extent_block_csum(struct inode * inode,struct ext4_extent_header * eh)49 static __le32 ext4_extent_block_csum(struct inode *inode,
50 struct ext4_extent_header *eh)
51 {
52 struct ext4_inode_info *ei = EXT4_I(inode);
53 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
54 __u32 csum;
55
56 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
57 EXT4_EXTENT_TAIL_OFFSET(eh));
58 return cpu_to_le32(csum);
59 }
60
ext4_extent_block_csum_verify(struct inode * inode,struct ext4_extent_header * eh)61 static int ext4_extent_block_csum_verify(struct inode *inode,
62 struct ext4_extent_header *eh)
63 {
64 struct ext4_extent_tail *et;
65
66 if (!ext4_has_metadata_csum(inode->i_sb))
67 return 1;
68
69 et = find_ext4_extent_tail(eh);
70 if (et->et_checksum != ext4_extent_block_csum(inode, eh))
71 return 0;
72 return 1;
73 }
74
ext4_extent_block_csum_set(struct inode * inode,struct ext4_extent_header * eh)75 static void ext4_extent_block_csum_set(struct inode *inode,
76 struct ext4_extent_header *eh)
77 {
78 struct ext4_extent_tail *et;
79
80 if (!ext4_has_metadata_csum(inode->i_sb))
81 return;
82
83 et = find_ext4_extent_tail(eh);
84 et->et_checksum = ext4_extent_block_csum(inode, eh);
85 }
86
87 static int ext4_split_extent_at(handle_t *handle,
88 struct inode *inode,
89 struct ext4_ext_path **ppath,
90 ext4_lblk_t split,
91 int split_flag,
92 int flags);
93
ext4_ext_trunc_restart_fn(struct inode * inode,int * dropped)94 static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped)
95 {
96 /*
97 * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
98 * moment, get_block can be called only for blocks inside i_size since
99 * page cache has been already dropped and writes are blocked by
100 * i_rwsem. So we can safely drop the i_data_sem here.
101 */
102 BUG_ON(EXT4_JOURNAL(inode) == NULL);
103 ext4_discard_preallocations(inode, 0);
104 up_write(&EXT4_I(inode)->i_data_sem);
105 *dropped = 1;
106 return 0;
107 }
108
ext4_ext_drop_refs(struct ext4_ext_path * path)109 static void ext4_ext_drop_refs(struct ext4_ext_path *path)
110 {
111 int depth, i;
112
113 if (!path)
114 return;
115 depth = path->p_depth;
116 for (i = 0; i <= depth; i++, path++) {
117 brelse(path->p_bh);
118 path->p_bh = NULL;
119 }
120 }
121
ext4_free_ext_path(struct ext4_ext_path * path)122 void ext4_free_ext_path(struct ext4_ext_path *path)
123 {
124 ext4_ext_drop_refs(path);
125 kfree(path);
126 }
127
128 /*
129 * Make sure 'handle' has at least 'check_cred' credits. If not, restart
130 * transaction with 'restart_cred' credits. The function drops i_data_sem
131 * when restarting transaction and gets it after transaction is restarted.
132 *
133 * The function returns 0 on success, 1 if transaction had to be restarted,
134 * and < 0 in case of fatal error.
135 */
ext4_datasem_ensure_credits(handle_t * handle,struct inode * inode,int check_cred,int restart_cred,int revoke_cred)136 int ext4_datasem_ensure_credits(handle_t *handle, struct inode *inode,
137 int check_cred, int restart_cred,
138 int revoke_cred)
139 {
140 int ret;
141 int dropped = 0;
142
143 ret = ext4_journal_ensure_credits_fn(handle, check_cred, restart_cred,
144 revoke_cred, ext4_ext_trunc_restart_fn(inode, &dropped));
145 if (dropped)
146 down_write(&EXT4_I(inode)->i_data_sem);
147 return ret;
148 }
149
150 /*
151 * could return:
152 * - EROFS
153 * - ENOMEM
154 */
ext4_ext_get_access(handle_t * handle,struct inode * inode,struct ext4_ext_path * path)155 static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
156 struct ext4_ext_path *path)
157 {
158 int err = 0;
159
160 if (path->p_bh) {
161 /* path points to block */
162 BUFFER_TRACE(path->p_bh, "get_write_access");
163 err = ext4_journal_get_write_access(handle, inode->i_sb,
164 path->p_bh, EXT4_JTR_NONE);
165 /*
166 * The extent buffer's verified bit will be set again in
167 * __ext4_ext_dirty(). We could leave an inconsistent
168 * buffer if the extents updating procudure break off du
169 * to some error happens, force to check it again.
170 */
171 if (!err)
172 clear_buffer_verified(path->p_bh);
173 }
174 /* path points to leaf/index in inode body */
175 /* we use in-core data, no need to protect them */
176 return err;
177 }
178
179 /*
180 * could return:
181 * - EROFS
182 * - ENOMEM
183 * - EIO
184 */
__ext4_ext_dirty(const char * where,unsigned int line,handle_t * handle,struct inode * inode,struct ext4_ext_path * path)185 static int __ext4_ext_dirty(const char *where, unsigned int line,
186 handle_t *handle, struct inode *inode,
187 struct ext4_ext_path *path)
188 {
189 int err;
190
191 WARN_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem));
192 if (path->p_bh) {
193 ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
194 /* path points to block */
195 err = __ext4_handle_dirty_metadata(where, line, handle,
196 inode, path->p_bh);
197 /* Extents updating done, re-set verified flag */
198 if (!err)
199 set_buffer_verified(path->p_bh);
200 } else {
201 /* path points to leaf/index in inode body */
202 err = ext4_mark_inode_dirty(handle, inode);
203 }
204 return err;
205 }
206
207 #define ext4_ext_dirty(handle, inode, path) \
208 __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path))
209
ext4_ext_find_goal(struct inode * inode,struct ext4_ext_path * path,ext4_lblk_t block)210 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
211 struct ext4_ext_path *path,
212 ext4_lblk_t block)
213 {
214 if (path) {
215 int depth = path->p_depth;
216 struct ext4_extent *ex;
217
218 /*
219 * Try to predict block placement assuming that we are
220 * filling in a file which will eventually be
221 * non-sparse --- i.e., in the case of libbfd writing
222 * an ELF object sections out-of-order but in a way
223 * the eventually results in a contiguous object or
224 * executable file, or some database extending a table
225 * space file. However, this is actually somewhat
226 * non-ideal if we are writing a sparse file such as
227 * qemu or KVM writing a raw image file that is going
228 * to stay fairly sparse, since it will end up
229 * fragmenting the file system's free space. Maybe we
230 * should have some hueristics or some way to allow
231 * userspace to pass a hint to file system,
232 * especially if the latter case turns out to be
233 * common.
234 */
235 ex = path[depth].p_ext;
236 if (ex) {
237 ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
238 ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
239
240 if (block > ext_block)
241 return ext_pblk + (block - ext_block);
242 else
243 return ext_pblk - (ext_block - block);
244 }
245
246 /* it looks like index is empty;
247 * try to find starting block from index itself */
248 if (path[depth].p_bh)
249 return path[depth].p_bh->b_blocknr;
250 }
251
252 /* OK. use inode's group */
253 return ext4_inode_to_goal_block(inode);
254 }
255
256 /*
257 * Allocation for a meta data block
258 */
259 static ext4_fsblk_t
ext4_ext_new_meta_block(handle_t * handle,struct inode * inode,struct ext4_ext_path * path,struct ext4_extent * ex,int * err,unsigned int flags)260 ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
261 struct ext4_ext_path *path,
262 struct ext4_extent *ex, int *err, unsigned int flags)
263 {
264 ext4_fsblk_t goal, newblock;
265
266 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
267 newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
268 NULL, err);
269 return newblock;
270 }
271
ext4_ext_space_block(struct inode * inode,int check)272 static inline int ext4_ext_space_block(struct inode *inode, int check)
273 {
274 int size;
275
276 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
277 / sizeof(struct ext4_extent);
278 #ifdef AGGRESSIVE_TEST
279 if (!check && size > 6)
280 size = 6;
281 #endif
282 return size;
283 }
284
ext4_ext_space_block_idx(struct inode * inode,int check)285 static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
286 {
287 int size;
288
289 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
290 / sizeof(struct ext4_extent_idx);
291 #ifdef AGGRESSIVE_TEST
292 if (!check && size > 5)
293 size = 5;
294 #endif
295 return size;
296 }
297
ext4_ext_space_root(struct inode * inode,int check)298 static inline int ext4_ext_space_root(struct inode *inode, int check)
299 {
300 int size;
301
302 size = sizeof(EXT4_I(inode)->i_data);
303 size -= sizeof(struct ext4_extent_header);
304 size /= sizeof(struct ext4_extent);
305 #ifdef AGGRESSIVE_TEST
306 if (!check && size > 3)
307 size = 3;
308 #endif
309 return size;
310 }
311
ext4_ext_space_root_idx(struct inode * inode,int check)312 static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
313 {
314 int size;
315
316 size = sizeof(EXT4_I(inode)->i_data);
317 size -= sizeof(struct ext4_extent_header);
318 size /= sizeof(struct ext4_extent_idx);
319 #ifdef AGGRESSIVE_TEST
320 if (!check && size > 4)
321 size = 4;
322 #endif
323 return size;
324 }
325
326 static inline int
ext4_force_split_extent_at(handle_t * handle,struct inode * inode,struct ext4_ext_path ** ppath,ext4_lblk_t lblk,int nofail)327 ext4_force_split_extent_at(handle_t *handle, struct inode *inode,
328 struct ext4_ext_path **ppath, ext4_lblk_t lblk,
329 int nofail)
330 {
331 struct ext4_ext_path *path = *ppath;
332 int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext);
333 int flags = EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO;
334
335 if (nofail)
336 flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL | EXT4_EX_NOFAIL;
337
338 return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ?
339 EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0,
340 flags);
341 }
342
343 static int
ext4_ext_max_entries(struct inode * inode,int depth)344 ext4_ext_max_entries(struct inode *inode, int depth)
345 {
346 int max;
347
348 if (depth == ext_depth(inode)) {
349 if (depth == 0)
350 max = ext4_ext_space_root(inode, 1);
351 else
352 max = ext4_ext_space_root_idx(inode, 1);
353 } else {
354 if (depth == 0)
355 max = ext4_ext_space_block(inode, 1);
356 else
357 max = ext4_ext_space_block_idx(inode, 1);
358 }
359
360 return max;
361 }
362
ext4_valid_extent(struct inode * inode,struct ext4_extent * ext)363 static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
364 {
365 ext4_fsblk_t block = ext4_ext_pblock(ext);
366 int len = ext4_ext_get_actual_len(ext);
367 ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
368
369 /*
370 * We allow neither:
371 * - zero length
372 * - overflow/wrap-around
373 */
374 if (lblock + len <= lblock)
375 return 0;
376 return ext4_inode_block_valid(inode, block, len);
377 }
378
ext4_valid_extent_idx(struct inode * inode,struct ext4_extent_idx * ext_idx)379 static int ext4_valid_extent_idx(struct inode *inode,
380 struct ext4_extent_idx *ext_idx)
381 {
382 ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
383
384 return ext4_inode_block_valid(inode, block, 1);
385 }
386
ext4_valid_extent_entries(struct inode * inode,struct ext4_extent_header * eh,ext4_lblk_t lblk,ext4_fsblk_t * pblk,int depth)387 static int ext4_valid_extent_entries(struct inode *inode,
388 struct ext4_extent_header *eh,
389 ext4_lblk_t lblk, ext4_fsblk_t *pblk,
390 int depth)
391 {
392 unsigned short entries;
393 ext4_lblk_t lblock = 0;
394 ext4_lblk_t cur = 0;
395
396 if (eh->eh_entries == 0)
397 return 1;
398
399 entries = le16_to_cpu(eh->eh_entries);
400
401 if (depth == 0) {
402 /* leaf entries */
403 struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
404
405 /*
406 * The logical block in the first entry should equal to
407 * the number in the index block.
408 */
409 if (depth != ext_depth(inode) &&
410 lblk != le32_to_cpu(ext->ee_block))
411 return 0;
412 while (entries) {
413 if (!ext4_valid_extent(inode, ext))
414 return 0;
415
416 /* Check for overlapping extents */
417 lblock = le32_to_cpu(ext->ee_block);
418 if (lblock < cur) {
419 *pblk = ext4_ext_pblock(ext);
420 return 0;
421 }
422 cur = lblock + ext4_ext_get_actual_len(ext);
423 ext++;
424 entries--;
425 }
426 } else {
427 struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
428
429 /*
430 * The logical block in the first entry should equal to
431 * the number in the parent index block.
432 */
433 if (depth != ext_depth(inode) &&
434 lblk != le32_to_cpu(ext_idx->ei_block))
435 return 0;
436 while (entries) {
437 if (!ext4_valid_extent_idx(inode, ext_idx))
438 return 0;
439
440 /* Check for overlapping index extents */
441 lblock = le32_to_cpu(ext_idx->ei_block);
442 if (lblock < cur) {
443 *pblk = ext4_idx_pblock(ext_idx);
444 return 0;
445 }
446 ext_idx++;
447 entries--;
448 cur = lblock + 1;
449 }
450 }
451 return 1;
452 }
453
__ext4_ext_check(const char * function,unsigned int line,struct inode * inode,struct ext4_extent_header * eh,int depth,ext4_fsblk_t pblk,ext4_lblk_t lblk)454 static int __ext4_ext_check(const char *function, unsigned int line,
455 struct inode *inode, struct ext4_extent_header *eh,
456 int depth, ext4_fsblk_t pblk, ext4_lblk_t lblk)
457 {
458 const char *error_msg;
459 int max = 0, err = -EFSCORRUPTED;
460
461 if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
462 error_msg = "invalid magic";
463 goto corrupted;
464 }
465 if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
466 error_msg = "unexpected eh_depth";
467 goto corrupted;
468 }
469 if (unlikely(eh->eh_max == 0)) {
470 error_msg = "invalid eh_max";
471 goto corrupted;
472 }
473 max = ext4_ext_max_entries(inode, depth);
474 if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
475 error_msg = "too large eh_max";
476 goto corrupted;
477 }
478 if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
479 error_msg = "invalid eh_entries";
480 goto corrupted;
481 }
482 if (unlikely((eh->eh_entries == 0) && (depth > 0))) {
483 error_msg = "eh_entries is 0 but eh_depth is > 0";
484 goto corrupted;
485 }
486 if (!ext4_valid_extent_entries(inode, eh, lblk, &pblk, depth)) {
487 error_msg = "invalid extent entries";
488 goto corrupted;
489 }
490 if (unlikely(depth > 32)) {
491 error_msg = "too large eh_depth";
492 goto corrupted;
493 }
494 /* Verify checksum on non-root extent tree nodes */
495 if (ext_depth(inode) != depth &&
496 !ext4_extent_block_csum_verify(inode, eh)) {
497 error_msg = "extent tree corrupted";
498 err = -EFSBADCRC;
499 goto corrupted;
500 }
501 return 0;
502
503 corrupted:
504 ext4_error_inode_err(inode, function, line, 0, -err,
505 "pblk %llu bad header/extent: %s - magic %x, "
506 "entries %u, max %u(%u), depth %u(%u)",
507 (unsigned long long) pblk, error_msg,
508 le16_to_cpu(eh->eh_magic),
509 le16_to_cpu(eh->eh_entries),
510 le16_to_cpu(eh->eh_max),
511 max, le16_to_cpu(eh->eh_depth), depth);
512 return err;
513 }
514
515 #define ext4_ext_check(inode, eh, depth, pblk) \
516 __ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk), 0)
517
ext4_ext_check_inode(struct inode * inode)518 int ext4_ext_check_inode(struct inode *inode)
519 {
520 return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0);
521 }
522
ext4_cache_extents(struct inode * inode,struct ext4_extent_header * eh)523 static void ext4_cache_extents(struct inode *inode,
524 struct ext4_extent_header *eh)
525 {
526 struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
527 ext4_lblk_t prev = 0;
528 int i;
529
530 for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
531 unsigned int status = EXTENT_STATUS_WRITTEN;
532 ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
533 int len = ext4_ext_get_actual_len(ex);
534
535 if (prev && (prev != lblk))
536 ext4_es_cache_extent(inode, prev, lblk - prev, ~0,
537 EXTENT_STATUS_HOLE);
538
539 if (ext4_ext_is_unwritten(ex))
540 status = EXTENT_STATUS_UNWRITTEN;
541 ext4_es_cache_extent(inode, lblk, len,
542 ext4_ext_pblock(ex), status);
543 prev = lblk + len;
544 }
545 }
546
547 static struct buffer_head *
__read_extent_tree_block(const char * function,unsigned int line,struct inode * inode,struct ext4_extent_idx * idx,int depth,int flags)548 __read_extent_tree_block(const char *function, unsigned int line,
549 struct inode *inode, struct ext4_extent_idx *idx,
550 int depth, int flags)
551 {
552 struct buffer_head *bh;
553 int err;
554 gfp_t gfp_flags = __GFP_MOVABLE | GFP_NOFS;
555 ext4_fsblk_t pblk;
556
557 if (flags & EXT4_EX_NOFAIL)
558 gfp_flags |= __GFP_NOFAIL;
559
560 pblk = ext4_idx_pblock(idx);
561 bh = sb_getblk_gfp(inode->i_sb, pblk, gfp_flags);
562 if (unlikely(!bh))
563 return ERR_PTR(-ENOMEM);
564
565 if (!bh_uptodate_or_lock(bh)) {
566 trace_ext4_ext_load_extent(inode, pblk, _RET_IP_);
567 err = ext4_read_bh(bh, 0, NULL);
568 if (err < 0)
569 goto errout;
570 }
571 if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
572 return bh;
573 err = __ext4_ext_check(function, line, inode, ext_block_hdr(bh),
574 depth, pblk, le32_to_cpu(idx->ei_block));
575 if (err)
576 goto errout;
577 set_buffer_verified(bh);
578 /*
579 * If this is a leaf block, cache all of its entries
580 */
581 if (!(flags & EXT4_EX_NOCACHE) && depth == 0) {
582 struct ext4_extent_header *eh = ext_block_hdr(bh);
583 ext4_cache_extents(inode, eh);
584 }
585 return bh;
586 errout:
587 put_bh(bh);
588 return ERR_PTR(err);
589
590 }
591
592 #define read_extent_tree_block(inode, idx, depth, flags) \
593 __read_extent_tree_block(__func__, __LINE__, (inode), (idx), \
594 (depth), (flags))
595
596 /*
597 * This function is called to cache a file's extent information in the
598 * extent status tree
599 */
ext4_ext_precache(struct inode * inode)600 int ext4_ext_precache(struct inode *inode)
601 {
602 struct ext4_inode_info *ei = EXT4_I(inode);
603 struct ext4_ext_path *path = NULL;
604 struct buffer_head *bh;
605 int i = 0, depth, ret = 0;
606
607 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
608 return 0; /* not an extent-mapped inode */
609
610 down_read(&ei->i_data_sem);
611 depth = ext_depth(inode);
612
613 /* Don't cache anything if there are no external extent blocks */
614 if (!depth) {
615 up_read(&ei->i_data_sem);
616 return ret;
617 }
618
619 path = kcalloc(depth + 1, sizeof(struct ext4_ext_path),
620 GFP_NOFS);
621 if (path == NULL) {
622 up_read(&ei->i_data_sem);
623 return -ENOMEM;
624 }
625
626 path[0].p_hdr = ext_inode_hdr(inode);
627 ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0);
628 if (ret)
629 goto out;
630 path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr);
631 while (i >= 0) {
632 /*
633 * If this is a leaf block or we've reached the end of
634 * the index block, go up
635 */
636 if ((i == depth) ||
637 path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) {
638 brelse(path[i].p_bh);
639 path[i].p_bh = NULL;
640 i--;
641 continue;
642 }
643 bh = read_extent_tree_block(inode, path[i].p_idx++,
644 depth - i - 1,
645 EXT4_EX_FORCE_CACHE);
646 if (IS_ERR(bh)) {
647 ret = PTR_ERR(bh);
648 break;
649 }
650 i++;
651 path[i].p_bh = bh;
652 path[i].p_hdr = ext_block_hdr(bh);
653 path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr);
654 }
655 ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED);
656 out:
657 up_read(&ei->i_data_sem);
658 ext4_free_ext_path(path);
659 return ret;
660 }
661
662 #ifdef EXT_DEBUG
ext4_ext_show_path(struct inode * inode,struct ext4_ext_path * path)663 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
664 {
665 int k, l = path->p_depth;
666
667 ext_debug(inode, "path:");
668 for (k = 0; k <= l; k++, path++) {
669 if (path->p_idx) {
670 ext_debug(inode, " %d->%llu",
671 le32_to_cpu(path->p_idx->ei_block),
672 ext4_idx_pblock(path->p_idx));
673 } else if (path->p_ext) {
674 ext_debug(inode, " %d:[%d]%d:%llu ",
675 le32_to_cpu(path->p_ext->ee_block),
676 ext4_ext_is_unwritten(path->p_ext),
677 ext4_ext_get_actual_len(path->p_ext),
678 ext4_ext_pblock(path->p_ext));
679 } else
680 ext_debug(inode, " []");
681 }
682 ext_debug(inode, "\n");
683 }
684
ext4_ext_show_leaf(struct inode * inode,struct ext4_ext_path * path)685 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
686 {
687 int depth = ext_depth(inode);
688 struct ext4_extent_header *eh;
689 struct ext4_extent *ex;
690 int i;
691
692 if (!path)
693 return;
694
695 eh = path[depth].p_hdr;
696 ex = EXT_FIRST_EXTENT(eh);
697
698 ext_debug(inode, "Displaying leaf extents\n");
699
700 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
701 ext_debug(inode, "%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
702 ext4_ext_is_unwritten(ex),
703 ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
704 }
705 ext_debug(inode, "\n");
706 }
707
ext4_ext_show_move(struct inode * inode,struct ext4_ext_path * path,ext4_fsblk_t newblock,int level)708 static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
709 ext4_fsblk_t newblock, int level)
710 {
711 int depth = ext_depth(inode);
712 struct ext4_extent *ex;
713
714 if (depth != level) {
715 struct ext4_extent_idx *idx;
716 idx = path[level].p_idx;
717 while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
718 ext_debug(inode, "%d: move %d:%llu in new index %llu\n",
719 level, le32_to_cpu(idx->ei_block),
720 ext4_idx_pblock(idx), newblock);
721 idx++;
722 }
723
724 return;
725 }
726
727 ex = path[depth].p_ext;
728 while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
729 ext_debug(inode, "move %d:%llu:[%d]%d in new leaf %llu\n",
730 le32_to_cpu(ex->ee_block),
731 ext4_ext_pblock(ex),
732 ext4_ext_is_unwritten(ex),
733 ext4_ext_get_actual_len(ex),
734 newblock);
735 ex++;
736 }
737 }
738
739 #else
740 #define ext4_ext_show_path(inode, path)
741 #define ext4_ext_show_leaf(inode, path)
742 #define ext4_ext_show_move(inode, path, newblock, level)
743 #endif
744
745 /*
746 * ext4_ext_binsearch_idx:
747 * binary search for the closest index of the given block
748 * the header must be checked before calling this
749 */
750 static void
ext4_ext_binsearch_idx(struct inode * inode,struct ext4_ext_path * path,ext4_lblk_t block)751 ext4_ext_binsearch_idx(struct inode *inode,
752 struct ext4_ext_path *path, ext4_lblk_t block)
753 {
754 struct ext4_extent_header *eh = path->p_hdr;
755 struct ext4_extent_idx *r, *l, *m;
756
757
758 ext_debug(inode, "binsearch for %u(idx): ", block);
759
760 l = EXT_FIRST_INDEX(eh) + 1;
761 r = EXT_LAST_INDEX(eh);
762 while (l <= r) {
763 m = l + (r - l) / 2;
764 ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l,
765 le32_to_cpu(l->ei_block), m, le32_to_cpu(m->ei_block),
766 r, le32_to_cpu(r->ei_block));
767
768 if (block < le32_to_cpu(m->ei_block))
769 r = m - 1;
770 else
771 l = m + 1;
772 }
773
774 path->p_idx = l - 1;
775 ext_debug(inode, " -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
776 ext4_idx_pblock(path->p_idx));
777
778 #ifdef CHECK_BINSEARCH
779 {
780 struct ext4_extent_idx *chix, *ix;
781 int k;
782
783 chix = ix = EXT_FIRST_INDEX(eh);
784 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
785 if (k != 0 && le32_to_cpu(ix->ei_block) <=
786 le32_to_cpu(ix[-1].ei_block)) {
787 printk(KERN_DEBUG "k=%d, ix=0x%p, "
788 "first=0x%p\n", k,
789 ix, EXT_FIRST_INDEX(eh));
790 printk(KERN_DEBUG "%u <= %u\n",
791 le32_to_cpu(ix->ei_block),
792 le32_to_cpu(ix[-1].ei_block));
793 }
794 BUG_ON(k && le32_to_cpu(ix->ei_block)
795 <= le32_to_cpu(ix[-1].ei_block));
796 if (block < le32_to_cpu(ix->ei_block))
797 break;
798 chix = ix;
799 }
800 BUG_ON(chix != path->p_idx);
801 }
802 #endif
803
804 }
805
806 /*
807 * ext4_ext_binsearch:
808 * binary search for closest extent of the given block
809 * the header must be checked before calling this
810 */
811 static void
ext4_ext_binsearch(struct inode * inode,struct ext4_ext_path * path,ext4_lblk_t block)812 ext4_ext_binsearch(struct inode *inode,
813 struct ext4_ext_path *path, ext4_lblk_t block)
814 {
815 struct ext4_extent_header *eh = path->p_hdr;
816 struct ext4_extent *r, *l, *m;
817
818 if (eh->eh_entries == 0) {
819 /*
820 * this leaf is empty:
821 * we get such a leaf in split/add case
822 */
823 return;
824 }
825
826 ext_debug(inode, "binsearch for %u: ", block);
827
828 l = EXT_FIRST_EXTENT(eh) + 1;
829 r = EXT_LAST_EXTENT(eh);
830
831 while (l <= r) {
832 m = l + (r - l) / 2;
833 ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l,
834 le32_to_cpu(l->ee_block), m, le32_to_cpu(m->ee_block),
835 r, le32_to_cpu(r->ee_block));
836
837 if (block < le32_to_cpu(m->ee_block))
838 r = m - 1;
839 else
840 l = m + 1;
841 }
842
843 path->p_ext = l - 1;
844 ext_debug(inode, " -> %d:%llu:[%d]%d ",
845 le32_to_cpu(path->p_ext->ee_block),
846 ext4_ext_pblock(path->p_ext),
847 ext4_ext_is_unwritten(path->p_ext),
848 ext4_ext_get_actual_len(path->p_ext));
849
850 #ifdef CHECK_BINSEARCH
851 {
852 struct ext4_extent *chex, *ex;
853 int k;
854
855 chex = ex = EXT_FIRST_EXTENT(eh);
856 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
857 BUG_ON(k && le32_to_cpu(ex->ee_block)
858 <= le32_to_cpu(ex[-1].ee_block));
859 if (block < le32_to_cpu(ex->ee_block))
860 break;
861 chex = ex;
862 }
863 BUG_ON(chex != path->p_ext);
864 }
865 #endif
866
867 }
868
ext4_ext_tree_init(handle_t * handle,struct inode * inode)869 void ext4_ext_tree_init(handle_t *handle, struct inode *inode)
870 {
871 struct ext4_extent_header *eh;
872
873 eh = ext_inode_hdr(inode);
874 eh->eh_depth = 0;
875 eh->eh_entries = 0;
876 eh->eh_magic = EXT4_EXT_MAGIC;
877 eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
878 eh->eh_generation = 0;
879 ext4_mark_inode_dirty(handle, inode);
880 }
881
882 struct ext4_ext_path *
ext4_find_extent(struct inode * inode,ext4_lblk_t block,struct ext4_ext_path ** orig_path,int flags)883 ext4_find_extent(struct inode *inode, ext4_lblk_t block,
884 struct ext4_ext_path **orig_path, int flags)
885 {
886 struct ext4_extent_header *eh;
887 struct buffer_head *bh;
888 struct ext4_ext_path *path = orig_path ? *orig_path : NULL;
889 short int depth, i, ppos = 0;
890 int ret;
891 gfp_t gfp_flags = GFP_NOFS;
892
893 if (flags & EXT4_EX_NOFAIL)
894 gfp_flags |= __GFP_NOFAIL;
895
896 eh = ext_inode_hdr(inode);
897 depth = ext_depth(inode);
898 if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) {
899 EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d",
900 depth);
901 ret = -EFSCORRUPTED;
902 goto err;
903 }
904
905 if (path) {
906 ext4_ext_drop_refs(path);
907 if (depth > path[0].p_maxdepth) {
908 kfree(path);
909 *orig_path = path = NULL;
910 }
911 }
912 if (!path) {
913 /* account possible depth increase */
914 path = kcalloc(depth + 2, sizeof(struct ext4_ext_path),
915 gfp_flags);
916 if (unlikely(!path))
917 return ERR_PTR(-ENOMEM);
918 path[0].p_maxdepth = depth + 1;
919 }
920 path[0].p_hdr = eh;
921 path[0].p_bh = NULL;
922
923 i = depth;
924 if (!(flags & EXT4_EX_NOCACHE) && depth == 0)
925 ext4_cache_extents(inode, eh);
926 /* walk through the tree */
927 while (i) {
928 ext_debug(inode, "depth %d: num %d, max %d\n",
929 ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
930
931 ext4_ext_binsearch_idx(inode, path + ppos, block);
932 path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
933 path[ppos].p_depth = i;
934 path[ppos].p_ext = NULL;
935
936 bh = read_extent_tree_block(inode, path[ppos].p_idx, --i, flags);
937 if (IS_ERR(bh)) {
938 ret = PTR_ERR(bh);
939 goto err;
940 }
941
942 eh = ext_block_hdr(bh);
943 ppos++;
944 path[ppos].p_bh = bh;
945 path[ppos].p_hdr = eh;
946 }
947
948 path[ppos].p_depth = i;
949 path[ppos].p_ext = NULL;
950 path[ppos].p_idx = NULL;
951
952 /* find extent */
953 ext4_ext_binsearch(inode, path + ppos, block);
954 /* if not an empty leaf */
955 if (path[ppos].p_ext)
956 path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
957
958 ext4_ext_show_path(inode, path);
959
960 return path;
961
962 err:
963 ext4_free_ext_path(path);
964 if (orig_path)
965 *orig_path = NULL;
966 return ERR_PTR(ret);
967 }
968
969 /*
970 * ext4_ext_insert_index:
971 * insert new index [@logical;@ptr] into the block at @curp;
972 * check where to insert: before @curp or after @curp
973 */
ext4_ext_insert_index(handle_t * handle,struct inode * inode,struct ext4_ext_path * curp,int logical,ext4_fsblk_t ptr)974 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
975 struct ext4_ext_path *curp,
976 int logical, ext4_fsblk_t ptr)
977 {
978 struct ext4_extent_idx *ix;
979 int len, err;
980
981 err = ext4_ext_get_access(handle, inode, curp);
982 if (err)
983 return err;
984
985 if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
986 EXT4_ERROR_INODE(inode,
987 "logical %d == ei_block %d!",
988 logical, le32_to_cpu(curp->p_idx->ei_block));
989 return -EFSCORRUPTED;
990 }
991
992 if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
993 >= le16_to_cpu(curp->p_hdr->eh_max))) {
994 EXT4_ERROR_INODE(inode,
995 "eh_entries %d >= eh_max %d!",
996 le16_to_cpu(curp->p_hdr->eh_entries),
997 le16_to_cpu(curp->p_hdr->eh_max));
998 return -EFSCORRUPTED;
999 }
1000
1001 if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
1002 /* insert after */
1003 ext_debug(inode, "insert new index %d after: %llu\n",
1004 logical, ptr);
1005 ix = curp->p_idx + 1;
1006 } else {
1007 /* insert before */
1008 ext_debug(inode, "insert new index %d before: %llu\n",
1009 logical, ptr);
1010 ix = curp->p_idx;
1011 }
1012
1013 len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
1014 BUG_ON(len < 0);
1015 if (len > 0) {
1016 ext_debug(inode, "insert new index %d: "
1017 "move %d indices from 0x%p to 0x%p\n",
1018 logical, len, ix, ix + 1);
1019 memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
1020 }
1021
1022 if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
1023 EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
1024 return -EFSCORRUPTED;
1025 }
1026
1027 ix->ei_block = cpu_to_le32(logical);
1028 ext4_idx_store_pblock(ix, ptr);
1029 le16_add_cpu(&curp->p_hdr->eh_entries, 1);
1030
1031 if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
1032 EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
1033 return -EFSCORRUPTED;
1034 }
1035
1036 err = ext4_ext_dirty(handle, inode, curp);
1037 ext4_std_error(inode->i_sb, err);
1038
1039 return err;
1040 }
1041
1042 /*
1043 * ext4_ext_split:
1044 * inserts new subtree into the path, using free index entry
1045 * at depth @at:
1046 * - allocates all needed blocks (new leaf and all intermediate index blocks)
1047 * - makes decision where to split
1048 * - moves remaining extents and index entries (right to the split point)
1049 * into the newly allocated blocks
1050 * - initializes subtree
1051 */
ext4_ext_split(handle_t * handle,struct inode * inode,unsigned int flags,struct ext4_ext_path * path,struct ext4_extent * newext,int at)1052 static int ext4_ext_split(handle_t *handle, struct inode *inode,
1053 unsigned int flags,
1054 struct ext4_ext_path *path,
1055 struct ext4_extent *newext, int at)
1056 {
1057 struct buffer_head *bh = NULL;
1058 int depth = ext_depth(inode);
1059 struct ext4_extent_header *neh;
1060 struct ext4_extent_idx *fidx;
1061 int i = at, k, m, a;
1062 ext4_fsblk_t newblock, oldblock;
1063 __le32 border;
1064 ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
1065 gfp_t gfp_flags = GFP_NOFS;
1066 int err = 0;
1067 size_t ext_size = 0;
1068
1069 if (flags & EXT4_EX_NOFAIL)
1070 gfp_flags |= __GFP_NOFAIL;
1071
1072 /* make decision: where to split? */
1073 /* FIXME: now decision is simplest: at current extent */
1074
1075 /* if current leaf will be split, then we should use
1076 * border from split point */
1077 if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
1078 EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
1079 return -EFSCORRUPTED;
1080 }
1081 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
1082 border = path[depth].p_ext[1].ee_block;
1083 ext_debug(inode, "leaf will be split."
1084 " next leaf starts at %d\n",
1085 le32_to_cpu(border));
1086 } else {
1087 border = newext->ee_block;
1088 ext_debug(inode, "leaf will be added."
1089 " next leaf starts at %d\n",
1090 le32_to_cpu(border));
1091 }
1092
1093 /*
1094 * If error occurs, then we break processing
1095 * and mark filesystem read-only. index won't
1096 * be inserted and tree will be in consistent
1097 * state. Next mount will repair buffers too.
1098 */
1099
1100 /*
1101 * Get array to track all allocated blocks.
1102 * We need this to handle errors and free blocks
1103 * upon them.
1104 */
1105 ablocks = kcalloc(depth, sizeof(ext4_fsblk_t), gfp_flags);
1106 if (!ablocks)
1107 return -ENOMEM;
1108
1109 /* allocate all needed blocks */
1110 ext_debug(inode, "allocate %d blocks for indexes/leaf\n", depth - at);
1111 for (a = 0; a < depth - at; a++) {
1112 newblock = ext4_ext_new_meta_block(handle, inode, path,
1113 newext, &err, flags);
1114 if (newblock == 0)
1115 goto cleanup;
1116 ablocks[a] = newblock;
1117 }
1118
1119 /* initialize new leaf */
1120 newblock = ablocks[--a];
1121 if (unlikely(newblock == 0)) {
1122 EXT4_ERROR_INODE(inode, "newblock == 0!");
1123 err = -EFSCORRUPTED;
1124 goto cleanup;
1125 }
1126 bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
1127 if (unlikely(!bh)) {
1128 err = -ENOMEM;
1129 goto cleanup;
1130 }
1131 lock_buffer(bh);
1132
1133 err = ext4_journal_get_create_access(handle, inode->i_sb, bh,
1134 EXT4_JTR_NONE);
1135 if (err)
1136 goto cleanup;
1137
1138 neh = ext_block_hdr(bh);
1139 neh->eh_entries = 0;
1140 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1141 neh->eh_magic = EXT4_EXT_MAGIC;
1142 neh->eh_depth = 0;
1143 neh->eh_generation = 0;
1144
1145 /* move remainder of path[depth] to the new leaf */
1146 if (unlikely(path[depth].p_hdr->eh_entries !=
1147 path[depth].p_hdr->eh_max)) {
1148 EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
1149 path[depth].p_hdr->eh_entries,
1150 path[depth].p_hdr->eh_max);
1151 err = -EFSCORRUPTED;
1152 goto cleanup;
1153 }
1154 /* start copy from next extent */
1155 m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
1156 ext4_ext_show_move(inode, path, newblock, depth);
1157 if (m) {
1158 struct ext4_extent *ex;
1159 ex = EXT_FIRST_EXTENT(neh);
1160 memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
1161 le16_add_cpu(&neh->eh_entries, m);
1162 }
1163
1164 /* zero out unused area in the extent block */
1165 ext_size = sizeof(struct ext4_extent_header) +
1166 sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries);
1167 memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
1168 ext4_extent_block_csum_set(inode, neh);
1169 set_buffer_uptodate(bh);
1170 unlock_buffer(bh);
1171
1172 err = ext4_handle_dirty_metadata(handle, inode, bh);
1173 if (err)
1174 goto cleanup;
1175 brelse(bh);
1176 bh = NULL;
1177
1178 /* correct old leaf */
1179 if (m) {
1180 err = ext4_ext_get_access(handle, inode, path + depth);
1181 if (err)
1182 goto cleanup;
1183 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
1184 err = ext4_ext_dirty(handle, inode, path + depth);
1185 if (err)
1186 goto cleanup;
1187
1188 }
1189
1190 /* create intermediate indexes */
1191 k = depth - at - 1;
1192 if (unlikely(k < 0)) {
1193 EXT4_ERROR_INODE(inode, "k %d < 0!", k);
1194 err = -EFSCORRUPTED;
1195 goto cleanup;
1196 }
1197 if (k)
1198 ext_debug(inode, "create %d intermediate indices\n", k);
1199 /* insert new index into current index block */
1200 /* current depth stored in i var */
1201 i = depth - 1;
1202 while (k--) {
1203 oldblock = newblock;
1204 newblock = ablocks[--a];
1205 bh = sb_getblk(inode->i_sb, newblock);
1206 if (unlikely(!bh)) {
1207 err = -ENOMEM;
1208 goto cleanup;
1209 }
1210 lock_buffer(bh);
1211
1212 err = ext4_journal_get_create_access(handle, inode->i_sb, bh,
1213 EXT4_JTR_NONE);
1214 if (err)
1215 goto cleanup;
1216
1217 neh = ext_block_hdr(bh);
1218 neh->eh_entries = cpu_to_le16(1);
1219 neh->eh_magic = EXT4_EXT_MAGIC;
1220 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1221 neh->eh_depth = cpu_to_le16(depth - i);
1222 neh->eh_generation = 0;
1223 fidx = EXT_FIRST_INDEX(neh);
1224 fidx->ei_block = border;
1225 ext4_idx_store_pblock(fidx, oldblock);
1226
1227 ext_debug(inode, "int.index at %d (block %llu): %u -> %llu\n",
1228 i, newblock, le32_to_cpu(border), oldblock);
1229
1230 /* move remainder of path[i] to the new index block */
1231 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
1232 EXT_LAST_INDEX(path[i].p_hdr))) {
1233 EXT4_ERROR_INODE(inode,
1234 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
1235 le32_to_cpu(path[i].p_ext->ee_block));
1236 err = -EFSCORRUPTED;
1237 goto cleanup;
1238 }
1239 /* start copy indexes */
1240 m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
1241 ext_debug(inode, "cur 0x%p, last 0x%p\n", path[i].p_idx,
1242 EXT_MAX_INDEX(path[i].p_hdr));
1243 ext4_ext_show_move(inode, path, newblock, i);
1244 if (m) {
1245 memmove(++fidx, path[i].p_idx,
1246 sizeof(struct ext4_extent_idx) * m);
1247 le16_add_cpu(&neh->eh_entries, m);
1248 }
1249 /* zero out unused area in the extent block */
1250 ext_size = sizeof(struct ext4_extent_header) +
1251 (sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries));
1252 memset(bh->b_data + ext_size, 0,
1253 inode->i_sb->s_blocksize - ext_size);
1254 ext4_extent_block_csum_set(inode, neh);
1255 set_buffer_uptodate(bh);
1256 unlock_buffer(bh);
1257
1258 err = ext4_handle_dirty_metadata(handle, inode, bh);
1259 if (err)
1260 goto cleanup;
1261 brelse(bh);
1262 bh = NULL;
1263
1264 /* correct old index */
1265 if (m) {
1266 err = ext4_ext_get_access(handle, inode, path + i);
1267 if (err)
1268 goto cleanup;
1269 le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
1270 err = ext4_ext_dirty(handle, inode, path + i);
1271 if (err)
1272 goto cleanup;
1273 }
1274
1275 i--;
1276 }
1277
1278 /* insert new index */
1279 err = ext4_ext_insert_index(handle, inode, path + at,
1280 le32_to_cpu(border), newblock);
1281
1282 cleanup:
1283 if (bh) {
1284 if (buffer_locked(bh))
1285 unlock_buffer(bh);
1286 brelse(bh);
1287 }
1288
1289 if (err) {
1290 /* free all allocated blocks in error case */
1291 for (i = 0; i < depth; i++) {
1292 if (!ablocks[i])
1293 continue;
1294 ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
1295 EXT4_FREE_BLOCKS_METADATA);
1296 }
1297 }
1298 kfree(ablocks);
1299
1300 return err;
1301 }
1302
1303 /*
1304 * ext4_ext_grow_indepth:
1305 * implements tree growing procedure:
1306 * - allocates new block
1307 * - moves top-level data (index block or leaf) into the new block
1308 * - initializes new top-level, creating index that points to the
1309 * just created block
1310 */
ext4_ext_grow_indepth(handle_t * handle,struct inode * inode,unsigned int flags)1311 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1312 unsigned int flags)
1313 {
1314 struct ext4_extent_header *neh;
1315 struct buffer_head *bh;
1316 ext4_fsblk_t newblock, goal = 0;
1317 struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
1318 int err = 0;
1319 size_t ext_size = 0;
1320
1321 /* Try to prepend new index to old one */
1322 if (ext_depth(inode))
1323 goal = ext4_idx_pblock(EXT_FIRST_INDEX(ext_inode_hdr(inode)));
1324 if (goal > le32_to_cpu(es->s_first_data_block)) {
1325 flags |= EXT4_MB_HINT_TRY_GOAL;
1326 goal--;
1327 } else
1328 goal = ext4_inode_to_goal_block(inode);
1329 newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
1330 NULL, &err);
1331 if (newblock == 0)
1332 return err;
1333
1334 bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
1335 if (unlikely(!bh))
1336 return -ENOMEM;
1337 lock_buffer(bh);
1338
1339 err = ext4_journal_get_create_access(handle, inode->i_sb, bh,
1340 EXT4_JTR_NONE);
1341 if (err) {
1342 unlock_buffer(bh);
1343 goto out;
1344 }
1345
1346 ext_size = sizeof(EXT4_I(inode)->i_data);
1347 /* move top-level index/leaf into new block */
1348 memmove(bh->b_data, EXT4_I(inode)->i_data, ext_size);
1349 /* zero out unused area in the extent block */
1350 memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
1351
1352 /* set size of new block */
1353 neh = ext_block_hdr(bh);
1354 /* old root could have indexes or leaves
1355 * so calculate e_max right way */
1356 if (ext_depth(inode))
1357 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1358 else
1359 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1360 neh->eh_magic = EXT4_EXT_MAGIC;
1361 ext4_extent_block_csum_set(inode, neh);
1362 set_buffer_uptodate(bh);
1363 set_buffer_verified(bh);
1364 unlock_buffer(bh);
1365
1366 err = ext4_handle_dirty_metadata(handle, inode, bh);
1367 if (err)
1368 goto out;
1369
1370 /* Update top-level index: num,max,pointer */
1371 neh = ext_inode_hdr(inode);
1372 neh->eh_entries = cpu_to_le16(1);
1373 ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
1374 if (neh->eh_depth == 0) {
1375 /* Root extent block becomes index block */
1376 neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
1377 EXT_FIRST_INDEX(neh)->ei_block =
1378 EXT_FIRST_EXTENT(neh)->ee_block;
1379 }
1380 ext_debug(inode, "new root: num %d(%d), lblock %d, ptr %llu\n",
1381 le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
1382 le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
1383 ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
1384
1385 le16_add_cpu(&neh->eh_depth, 1);
1386 err = ext4_mark_inode_dirty(handle, inode);
1387 out:
1388 brelse(bh);
1389
1390 return err;
1391 }
1392
1393 /*
1394 * ext4_ext_create_new_leaf:
1395 * finds empty index and adds new leaf.
1396 * if no free index is found, then it requests in-depth growing.
1397 */
ext4_ext_create_new_leaf(handle_t * handle,struct inode * inode,unsigned int mb_flags,unsigned int gb_flags,struct ext4_ext_path ** ppath,struct ext4_extent * newext)1398 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1399 unsigned int mb_flags,
1400 unsigned int gb_flags,
1401 struct ext4_ext_path **ppath,
1402 struct ext4_extent *newext)
1403 {
1404 struct ext4_ext_path *path = *ppath;
1405 struct ext4_ext_path *curp;
1406 int depth, i, err = 0;
1407
1408 repeat:
1409 i = depth = ext_depth(inode);
1410
1411 /* walk up to the tree and look for free index entry */
1412 curp = path + depth;
1413 while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1414 i--;
1415 curp--;
1416 }
1417
1418 /* we use already allocated block for index block,
1419 * so subsequent data blocks should be contiguous */
1420 if (EXT_HAS_FREE_INDEX(curp)) {
1421 /* if we found index with free entry, then use that
1422 * entry: create all needed subtree and add new leaf */
1423 err = ext4_ext_split(handle, inode, mb_flags, path, newext, i);
1424 if (err)
1425 goto out;
1426
1427 /* refill path */
1428 path = ext4_find_extent(inode,
1429 (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1430 ppath, gb_flags);
1431 if (IS_ERR(path))
1432 err = PTR_ERR(path);
1433 } else {
1434 /* tree is full, time to grow in depth */
1435 err = ext4_ext_grow_indepth(handle, inode, mb_flags);
1436 if (err)
1437 goto out;
1438
1439 /* refill path */
1440 path = ext4_find_extent(inode,
1441 (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1442 ppath, gb_flags);
1443 if (IS_ERR(path)) {
1444 err = PTR_ERR(path);
1445 goto out;
1446 }
1447
1448 /*
1449 * only first (depth 0 -> 1) produces free space;
1450 * in all other cases we have to split the grown tree
1451 */
1452 depth = ext_depth(inode);
1453 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1454 /* now we need to split */
1455 goto repeat;
1456 }
1457 }
1458
1459 out:
1460 return err;
1461 }
1462
1463 /*
1464 * search the closest allocated block to the left for *logical
1465 * and returns it at @logical + it's physical address at @phys
1466 * if *logical is the smallest allocated block, the function
1467 * returns 0 at @phys
1468 * return value contains 0 (success) or error code
1469 */
ext4_ext_search_left(struct inode * inode,struct ext4_ext_path * path,ext4_lblk_t * logical,ext4_fsblk_t * phys)1470 static int ext4_ext_search_left(struct inode *inode,
1471 struct ext4_ext_path *path,
1472 ext4_lblk_t *logical, ext4_fsblk_t *phys)
1473 {
1474 struct ext4_extent_idx *ix;
1475 struct ext4_extent *ex;
1476 int depth, ee_len;
1477
1478 if (unlikely(path == NULL)) {
1479 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1480 return -EFSCORRUPTED;
1481 }
1482 depth = path->p_depth;
1483 *phys = 0;
1484
1485 if (depth == 0 && path->p_ext == NULL)
1486 return 0;
1487
1488 /* usually extent in the path covers blocks smaller
1489 * then *logical, but it can be that extent is the
1490 * first one in the file */
1491
1492 ex = path[depth].p_ext;
1493 ee_len = ext4_ext_get_actual_len(ex);
1494 if (*logical < le32_to_cpu(ex->ee_block)) {
1495 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1496 EXT4_ERROR_INODE(inode,
1497 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1498 *logical, le32_to_cpu(ex->ee_block));
1499 return -EFSCORRUPTED;
1500 }
1501 while (--depth >= 0) {
1502 ix = path[depth].p_idx;
1503 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1504 EXT4_ERROR_INODE(inode,
1505 "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
1506 ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
1507 le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block),
1508 depth);
1509 return -EFSCORRUPTED;
1510 }
1511 }
1512 return 0;
1513 }
1514
1515 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1516 EXT4_ERROR_INODE(inode,
1517 "logical %d < ee_block %d + ee_len %d!",
1518 *logical, le32_to_cpu(ex->ee_block), ee_len);
1519 return -EFSCORRUPTED;
1520 }
1521
1522 *logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1523 *phys = ext4_ext_pblock(ex) + ee_len - 1;
1524 return 0;
1525 }
1526
1527 /*
1528 * Search the closest allocated block to the right for *logical
1529 * and returns it at @logical + it's physical address at @phys.
1530 * If not exists, return 0 and @phys is set to 0. We will return
1531 * 1 which means we found an allocated block and ret_ex is valid.
1532 * Or return a (< 0) error code.
1533 */
ext4_ext_search_right(struct inode * inode,struct ext4_ext_path * path,ext4_lblk_t * logical,ext4_fsblk_t * phys,struct ext4_extent * ret_ex)1534 static int ext4_ext_search_right(struct inode *inode,
1535 struct ext4_ext_path *path,
1536 ext4_lblk_t *logical, ext4_fsblk_t *phys,
1537 struct ext4_extent *ret_ex)
1538 {
1539 struct buffer_head *bh = NULL;
1540 struct ext4_extent_header *eh;
1541 struct ext4_extent_idx *ix;
1542 struct ext4_extent *ex;
1543 int depth; /* Note, NOT eh_depth; depth from top of tree */
1544 int ee_len;
1545
1546 if (unlikely(path == NULL)) {
1547 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1548 return -EFSCORRUPTED;
1549 }
1550 depth = path->p_depth;
1551 *phys = 0;
1552
1553 if (depth == 0 && path->p_ext == NULL)
1554 return 0;
1555
1556 /* usually extent in the path covers blocks smaller
1557 * then *logical, but it can be that extent is the
1558 * first one in the file */
1559
1560 ex = path[depth].p_ext;
1561 ee_len = ext4_ext_get_actual_len(ex);
1562 if (*logical < le32_to_cpu(ex->ee_block)) {
1563 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1564 EXT4_ERROR_INODE(inode,
1565 "first_extent(path[%d].p_hdr) != ex",
1566 depth);
1567 return -EFSCORRUPTED;
1568 }
1569 while (--depth >= 0) {
1570 ix = path[depth].p_idx;
1571 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1572 EXT4_ERROR_INODE(inode,
1573 "ix != EXT_FIRST_INDEX *logical %d!",
1574 *logical);
1575 return -EFSCORRUPTED;
1576 }
1577 }
1578 goto found_extent;
1579 }
1580
1581 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1582 EXT4_ERROR_INODE(inode,
1583 "logical %d < ee_block %d + ee_len %d!",
1584 *logical, le32_to_cpu(ex->ee_block), ee_len);
1585 return -EFSCORRUPTED;
1586 }
1587
1588 if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1589 /* next allocated block in this leaf */
1590 ex++;
1591 goto found_extent;
1592 }
1593
1594 /* go up and search for index to the right */
1595 while (--depth >= 0) {
1596 ix = path[depth].p_idx;
1597 if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1598 goto got_index;
1599 }
1600
1601 /* we've gone up to the root and found no index to the right */
1602 return 0;
1603
1604 got_index:
1605 /* we've found index to the right, let's
1606 * follow it and find the closest allocated
1607 * block to the right */
1608 ix++;
1609 while (++depth < path->p_depth) {
1610 /* subtract from p_depth to get proper eh_depth */
1611 bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0);
1612 if (IS_ERR(bh))
1613 return PTR_ERR(bh);
1614 eh = ext_block_hdr(bh);
1615 ix = EXT_FIRST_INDEX(eh);
1616 put_bh(bh);
1617 }
1618
1619 bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0);
1620 if (IS_ERR(bh))
1621 return PTR_ERR(bh);
1622 eh = ext_block_hdr(bh);
1623 ex = EXT_FIRST_EXTENT(eh);
1624 found_extent:
1625 *logical = le32_to_cpu(ex->ee_block);
1626 *phys = ext4_ext_pblock(ex);
1627 if (ret_ex)
1628 *ret_ex = *ex;
1629 if (bh)
1630 put_bh(bh);
1631 return 1;
1632 }
1633
1634 /*
1635 * ext4_ext_next_allocated_block:
1636 * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
1637 * NOTE: it considers block number from index entry as
1638 * allocated block. Thus, index entries have to be consistent
1639 * with leaves.
1640 */
1641 ext4_lblk_t
ext4_ext_next_allocated_block(struct ext4_ext_path * path)1642 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1643 {
1644 int depth;
1645
1646 BUG_ON(path == NULL);
1647 depth = path->p_depth;
1648
1649 if (depth == 0 && path->p_ext == NULL)
1650 return EXT_MAX_BLOCKS;
1651
1652 while (depth >= 0) {
1653 struct ext4_ext_path *p = &path[depth];
1654
1655 if (depth == path->p_depth) {
1656 /* leaf */
1657 if (p->p_ext && p->p_ext != EXT_LAST_EXTENT(p->p_hdr))
1658 return le32_to_cpu(p->p_ext[1].ee_block);
1659 } else {
1660 /* index */
1661 if (p->p_idx != EXT_LAST_INDEX(p->p_hdr))
1662 return le32_to_cpu(p->p_idx[1].ei_block);
1663 }
1664 depth--;
1665 }
1666
1667 return EXT_MAX_BLOCKS;
1668 }
1669
1670 /*
1671 * ext4_ext_next_leaf_block:
1672 * returns first allocated block from next leaf or EXT_MAX_BLOCKS
1673 */
ext4_ext_next_leaf_block(struct ext4_ext_path * path)1674 static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
1675 {
1676 int depth;
1677
1678 BUG_ON(path == NULL);
1679 depth = path->p_depth;
1680
1681 /* zero-tree has no leaf blocks at all */
1682 if (depth == 0)
1683 return EXT_MAX_BLOCKS;
1684
1685 /* go to index block */
1686 depth--;
1687
1688 while (depth >= 0) {
1689 if (path[depth].p_idx !=
1690 EXT_LAST_INDEX(path[depth].p_hdr))
1691 return (ext4_lblk_t)
1692 le32_to_cpu(path[depth].p_idx[1].ei_block);
1693 depth--;
1694 }
1695
1696 return EXT_MAX_BLOCKS;
1697 }
1698
1699 /*
1700 * ext4_ext_correct_indexes:
1701 * if leaf gets modified and modified extent is first in the leaf,
1702 * then we have to correct all indexes above.
1703 * TODO: do we need to correct tree in all cases?
1704 */
ext4_ext_correct_indexes(handle_t * handle,struct inode * inode,struct ext4_ext_path * path)1705 static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1706 struct ext4_ext_path *path)
1707 {
1708 struct ext4_extent_header *eh;
1709 int depth = ext_depth(inode);
1710 struct ext4_extent *ex;
1711 __le32 border;
1712 int k, err = 0;
1713
1714 eh = path[depth].p_hdr;
1715 ex = path[depth].p_ext;
1716
1717 if (unlikely(ex == NULL || eh == NULL)) {
1718 EXT4_ERROR_INODE(inode,
1719 "ex %p == NULL or eh %p == NULL", ex, eh);
1720 return -EFSCORRUPTED;
1721 }
1722
1723 if (depth == 0) {
1724 /* there is no tree at all */
1725 return 0;
1726 }
1727
1728 if (ex != EXT_FIRST_EXTENT(eh)) {
1729 /* we correct tree if first leaf got modified only */
1730 return 0;
1731 }
1732
1733 /*
1734 * TODO: we need correction if border is smaller than current one
1735 */
1736 k = depth - 1;
1737 border = path[depth].p_ext->ee_block;
1738 err = ext4_ext_get_access(handle, inode, path + k);
1739 if (err)
1740 return err;
1741 path[k].p_idx->ei_block = border;
1742 err = ext4_ext_dirty(handle, inode, path + k);
1743 if (err)
1744 return err;
1745
1746 while (k--) {
1747 /* change all left-side indexes */
1748 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1749 break;
1750 err = ext4_ext_get_access(handle, inode, path + k);
1751 if (err)
1752 break;
1753 path[k].p_idx->ei_block = border;
1754 err = ext4_ext_dirty(handle, inode, path + k);
1755 if (err)
1756 break;
1757 }
1758
1759 return err;
1760 }
1761
ext4_can_extents_be_merged(struct inode * inode,struct ext4_extent * ex1,struct ext4_extent * ex2)1762 static int ext4_can_extents_be_merged(struct inode *inode,
1763 struct ext4_extent *ex1,
1764 struct ext4_extent *ex2)
1765 {
1766 unsigned short ext1_ee_len, ext2_ee_len;
1767
1768 if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2))
1769 return 0;
1770
1771 ext1_ee_len = ext4_ext_get_actual_len(ex1);
1772 ext2_ee_len = ext4_ext_get_actual_len(ex2);
1773
1774 if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1775 le32_to_cpu(ex2->ee_block))
1776 return 0;
1777
1778 if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN)
1779 return 0;
1780
1781 if (ext4_ext_is_unwritten(ex1) &&
1782 ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN)
1783 return 0;
1784 #ifdef AGGRESSIVE_TEST
1785 if (ext1_ee_len >= 4)
1786 return 0;
1787 #endif
1788
1789 if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
1790 return 1;
1791 return 0;
1792 }
1793
1794 /*
1795 * This function tries to merge the "ex" extent to the next extent in the tree.
1796 * It always tries to merge towards right. If you want to merge towards
1797 * left, pass "ex - 1" as argument instead of "ex".
1798 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1799 * 1 if they got merged.
1800 */
ext4_ext_try_to_merge_right(struct inode * inode,struct ext4_ext_path * path,struct ext4_extent * ex)1801 static int ext4_ext_try_to_merge_right(struct inode *inode,
1802 struct ext4_ext_path *path,
1803 struct ext4_extent *ex)
1804 {
1805 struct ext4_extent_header *eh;
1806 unsigned int depth, len;
1807 int merge_done = 0, unwritten;
1808
1809 depth = ext_depth(inode);
1810 BUG_ON(path[depth].p_hdr == NULL);
1811 eh = path[depth].p_hdr;
1812
1813 while (ex < EXT_LAST_EXTENT(eh)) {
1814 if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1815 break;
1816 /* merge with next extent! */
1817 unwritten = ext4_ext_is_unwritten(ex);
1818 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1819 + ext4_ext_get_actual_len(ex + 1));
1820 if (unwritten)
1821 ext4_ext_mark_unwritten(ex);
1822
1823 if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1824 len = (EXT_LAST_EXTENT(eh) - ex - 1)
1825 * sizeof(struct ext4_extent);
1826 memmove(ex + 1, ex + 2, len);
1827 }
1828 le16_add_cpu(&eh->eh_entries, -1);
1829 merge_done = 1;
1830 WARN_ON(eh->eh_entries == 0);
1831 if (!eh->eh_entries)
1832 EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
1833 }
1834
1835 return merge_done;
1836 }
1837
1838 /*
1839 * This function does a very simple check to see if we can collapse
1840 * an extent tree with a single extent tree leaf block into the inode.
1841 */
ext4_ext_try_to_merge_up(handle_t * handle,struct inode * inode,struct ext4_ext_path * path)1842 static void ext4_ext_try_to_merge_up(handle_t *handle,
1843 struct inode *inode,
1844 struct ext4_ext_path *path)
1845 {
1846 size_t s;
1847 unsigned max_root = ext4_ext_space_root(inode, 0);
1848 ext4_fsblk_t blk;
1849
1850 if ((path[0].p_depth != 1) ||
1851 (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) ||
1852 (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root))
1853 return;
1854
1855 /*
1856 * We need to modify the block allocation bitmap and the block
1857 * group descriptor to release the extent tree block. If we
1858 * can't get the journal credits, give up.
1859 */
1860 if (ext4_journal_extend(handle, 2,
1861 ext4_free_metadata_revoke_credits(inode->i_sb, 1)))
1862 return;
1863
1864 /*
1865 * Copy the extent data up to the inode
1866 */
1867 blk = ext4_idx_pblock(path[0].p_idx);
1868 s = le16_to_cpu(path[1].p_hdr->eh_entries) *
1869 sizeof(struct ext4_extent_idx);
1870 s += sizeof(struct ext4_extent_header);
1871
1872 path[1].p_maxdepth = path[0].p_maxdepth;
1873 memcpy(path[0].p_hdr, path[1].p_hdr, s);
1874 path[0].p_depth = 0;
1875 path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) +
1876 (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr));
1877 path[0].p_hdr->eh_max = cpu_to_le16(max_root);
1878
1879 brelse(path[1].p_bh);
1880 ext4_free_blocks(handle, inode, NULL, blk, 1,
1881 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
1882 }
1883
1884 /*
1885 * This function tries to merge the @ex extent to neighbours in the tree, then
1886 * tries to collapse the extent tree into the inode.
1887 */
ext4_ext_try_to_merge(handle_t * handle,struct inode * inode,struct ext4_ext_path * path,struct ext4_extent * ex)1888 static void ext4_ext_try_to_merge(handle_t *handle,
1889 struct inode *inode,
1890 struct ext4_ext_path *path,
1891 struct ext4_extent *ex)
1892 {
1893 struct ext4_extent_header *eh;
1894 unsigned int depth;
1895 int merge_done = 0;
1896
1897 depth = ext_depth(inode);
1898 BUG_ON(path[depth].p_hdr == NULL);
1899 eh = path[depth].p_hdr;
1900
1901 if (ex > EXT_FIRST_EXTENT(eh))
1902 merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
1903
1904 if (!merge_done)
1905 (void) ext4_ext_try_to_merge_right(inode, path, ex);
1906
1907 ext4_ext_try_to_merge_up(handle, inode, path);
1908 }
1909
1910 /*
1911 * check if a portion of the "newext" extent overlaps with an
1912 * existing extent.
1913 *
1914 * If there is an overlap discovered, it updates the length of the newext
1915 * such that there will be no overlap, and then returns 1.
1916 * If there is no overlap found, it returns 0.
1917 */
ext4_ext_check_overlap(struct ext4_sb_info * sbi,struct inode * inode,struct ext4_extent * newext,struct ext4_ext_path * path)1918 static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
1919 struct inode *inode,
1920 struct ext4_extent *newext,
1921 struct ext4_ext_path *path)
1922 {
1923 ext4_lblk_t b1, b2;
1924 unsigned int depth, len1;
1925 unsigned int ret = 0;
1926
1927 b1 = le32_to_cpu(newext->ee_block);
1928 len1 = ext4_ext_get_actual_len(newext);
1929 depth = ext_depth(inode);
1930 if (!path[depth].p_ext)
1931 goto out;
1932 b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
1933
1934 /*
1935 * get the next allocated block if the extent in the path
1936 * is before the requested block(s)
1937 */
1938 if (b2 < b1) {
1939 b2 = ext4_ext_next_allocated_block(path);
1940 if (b2 == EXT_MAX_BLOCKS)
1941 goto out;
1942 b2 = EXT4_LBLK_CMASK(sbi, b2);
1943 }
1944
1945 /* check for wrap through zero on extent logical start block*/
1946 if (b1 + len1 < b1) {
1947 len1 = EXT_MAX_BLOCKS - b1;
1948 newext->ee_len = cpu_to_le16(len1);
1949 ret = 1;
1950 }
1951
1952 /* check for overlap */
1953 if (b1 + len1 > b2) {
1954 newext->ee_len = cpu_to_le16(b2 - b1);
1955 ret = 1;
1956 }
1957 out:
1958 return ret;
1959 }
1960
1961 /*
1962 * ext4_ext_insert_extent:
1963 * tries to merge requested extent into the existing extent or
1964 * inserts requested extent as new one into the tree,
1965 * creating new leaf in the no-space case.
1966 */
ext4_ext_insert_extent(handle_t * handle,struct inode * inode,struct ext4_ext_path ** ppath,struct ext4_extent * newext,int gb_flags)1967 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1968 struct ext4_ext_path **ppath,
1969 struct ext4_extent *newext, int gb_flags)
1970 {
1971 struct ext4_ext_path *path = *ppath;
1972 struct ext4_extent_header *eh;
1973 struct ext4_extent *ex, *fex;
1974 struct ext4_extent *nearex; /* nearest extent */
1975 struct ext4_ext_path *npath = NULL;
1976 int depth, len, err;
1977 ext4_lblk_t next;
1978 int mb_flags = 0, unwritten;
1979
1980 if (gb_flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
1981 mb_flags |= EXT4_MB_DELALLOC_RESERVED;
1982 if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1983 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
1984 return -EFSCORRUPTED;
1985 }
1986 depth = ext_depth(inode);
1987 ex = path[depth].p_ext;
1988 eh = path[depth].p_hdr;
1989 if (unlikely(path[depth].p_hdr == NULL)) {
1990 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1991 return -EFSCORRUPTED;
1992 }
1993
1994 /* try to insert block into found extent and return */
1995 if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) {
1996
1997 /*
1998 * Try to see whether we should rather test the extent on
1999 * right from ex, or from the left of ex. This is because
2000 * ext4_find_extent() can return either extent on the
2001 * left, or on the right from the searched position. This
2002 * will make merging more effective.
2003 */
2004 if (ex < EXT_LAST_EXTENT(eh) &&
2005 (le32_to_cpu(ex->ee_block) +
2006 ext4_ext_get_actual_len(ex) <
2007 le32_to_cpu(newext->ee_block))) {
2008 ex += 1;
2009 goto prepend;
2010 } else if ((ex > EXT_FIRST_EXTENT(eh)) &&
2011 (le32_to_cpu(newext->ee_block) +
2012 ext4_ext_get_actual_len(newext) <
2013 le32_to_cpu(ex->ee_block)))
2014 ex -= 1;
2015
2016 /* Try to append newex to the ex */
2017 if (ext4_can_extents_be_merged(inode, ex, newext)) {
2018 ext_debug(inode, "append [%d]%d block to %u:[%d]%d"
2019 "(from %llu)\n",
2020 ext4_ext_is_unwritten(newext),
2021 ext4_ext_get_actual_len(newext),
2022 le32_to_cpu(ex->ee_block),
2023 ext4_ext_is_unwritten(ex),
2024 ext4_ext_get_actual_len(ex),
2025 ext4_ext_pblock(ex));
2026 err = ext4_ext_get_access(handle, inode,
2027 path + depth);
2028 if (err)
2029 return err;
2030 unwritten = ext4_ext_is_unwritten(ex);
2031 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
2032 + ext4_ext_get_actual_len(newext));
2033 if (unwritten)
2034 ext4_ext_mark_unwritten(ex);
2035 nearex = ex;
2036 goto merge;
2037 }
2038
2039 prepend:
2040 /* Try to prepend newex to the ex */
2041 if (ext4_can_extents_be_merged(inode, newext, ex)) {
2042 ext_debug(inode, "prepend %u[%d]%d block to %u:[%d]%d"
2043 "(from %llu)\n",
2044 le32_to_cpu(newext->ee_block),
2045 ext4_ext_is_unwritten(newext),
2046 ext4_ext_get_actual_len(newext),
2047 le32_to_cpu(ex->ee_block),
2048 ext4_ext_is_unwritten(ex),
2049 ext4_ext_get_actual_len(ex),
2050 ext4_ext_pblock(ex));
2051 err = ext4_ext_get_access(handle, inode,
2052 path + depth);
2053 if (err)
2054 return err;
2055
2056 unwritten = ext4_ext_is_unwritten(ex);
2057 ex->ee_block = newext->ee_block;
2058 ext4_ext_store_pblock(ex, ext4_ext_pblock(newext));
2059 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
2060 + ext4_ext_get_actual_len(newext));
2061 if (unwritten)
2062 ext4_ext_mark_unwritten(ex);
2063 nearex = ex;
2064 goto merge;
2065 }
2066 }
2067
2068 depth = ext_depth(inode);
2069 eh = path[depth].p_hdr;
2070 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
2071 goto has_space;
2072
2073 /* probably next leaf has space for us? */
2074 fex = EXT_LAST_EXTENT(eh);
2075 next = EXT_MAX_BLOCKS;
2076 if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
2077 next = ext4_ext_next_leaf_block(path);
2078 if (next != EXT_MAX_BLOCKS) {
2079 ext_debug(inode, "next leaf block - %u\n", next);
2080 BUG_ON(npath != NULL);
2081 npath = ext4_find_extent(inode, next, NULL, gb_flags);
2082 if (IS_ERR(npath))
2083 return PTR_ERR(npath);
2084 BUG_ON(npath->p_depth != path->p_depth);
2085 eh = npath[depth].p_hdr;
2086 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
2087 ext_debug(inode, "next leaf isn't full(%d)\n",
2088 le16_to_cpu(eh->eh_entries));
2089 path = npath;
2090 goto has_space;
2091 }
2092 ext_debug(inode, "next leaf has no free space(%d,%d)\n",
2093 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
2094 }
2095
2096 /*
2097 * There is no free space in the found leaf.
2098 * We're gonna add a new leaf in the tree.
2099 */
2100 if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
2101 mb_flags |= EXT4_MB_USE_RESERVED;
2102 err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
2103 ppath, newext);
2104 if (err)
2105 goto cleanup;
2106 depth = ext_depth(inode);
2107 eh = path[depth].p_hdr;
2108
2109 has_space:
2110 nearex = path[depth].p_ext;
2111
2112 err = ext4_ext_get_access(handle, inode, path + depth);
2113 if (err)
2114 goto cleanup;
2115
2116 if (!nearex) {
2117 /* there is no extent in this leaf, create first one */
2118 ext_debug(inode, "first extent in the leaf: %u:%llu:[%d]%d\n",
2119 le32_to_cpu(newext->ee_block),
2120 ext4_ext_pblock(newext),
2121 ext4_ext_is_unwritten(newext),
2122 ext4_ext_get_actual_len(newext));
2123 nearex = EXT_FIRST_EXTENT(eh);
2124 } else {
2125 if (le32_to_cpu(newext->ee_block)
2126 > le32_to_cpu(nearex->ee_block)) {
2127 /* Insert after */
2128 ext_debug(inode, "insert %u:%llu:[%d]%d before: "
2129 "nearest %p\n",
2130 le32_to_cpu(newext->ee_block),
2131 ext4_ext_pblock(newext),
2132 ext4_ext_is_unwritten(newext),
2133 ext4_ext_get_actual_len(newext),
2134 nearex);
2135 nearex++;
2136 } else {
2137 /* Insert before */
2138 BUG_ON(newext->ee_block == nearex->ee_block);
2139 ext_debug(inode, "insert %u:%llu:[%d]%d after: "
2140 "nearest %p\n",
2141 le32_to_cpu(newext->ee_block),
2142 ext4_ext_pblock(newext),
2143 ext4_ext_is_unwritten(newext),
2144 ext4_ext_get_actual_len(newext),
2145 nearex);
2146 }
2147 len = EXT_LAST_EXTENT(eh) - nearex + 1;
2148 if (len > 0) {
2149 ext_debug(inode, "insert %u:%llu:[%d]%d: "
2150 "move %d extents from 0x%p to 0x%p\n",
2151 le32_to_cpu(newext->ee_block),
2152 ext4_ext_pblock(newext),
2153 ext4_ext_is_unwritten(newext),
2154 ext4_ext_get_actual_len(newext),
2155 len, nearex, nearex + 1);
2156 memmove(nearex + 1, nearex,
2157 len * sizeof(struct ext4_extent));
2158 }
2159 }
2160
2161 le16_add_cpu(&eh->eh_entries, 1);
2162 path[depth].p_ext = nearex;
2163 nearex->ee_block = newext->ee_block;
2164 ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
2165 nearex->ee_len = newext->ee_len;
2166
2167 merge:
2168 /* try to merge extents */
2169 if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO))
2170 ext4_ext_try_to_merge(handle, inode, path, nearex);
2171
2172
2173 /* time to correct all indexes above */
2174 err = ext4_ext_correct_indexes(handle, inode, path);
2175 if (err)
2176 goto cleanup;
2177
2178 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
2179
2180 cleanup:
2181 ext4_free_ext_path(npath);
2182 return err;
2183 }
2184
ext4_fill_es_cache_info(struct inode * inode,ext4_lblk_t block,ext4_lblk_t num,struct fiemap_extent_info * fieinfo)2185 static int ext4_fill_es_cache_info(struct inode *inode,
2186 ext4_lblk_t block, ext4_lblk_t num,
2187 struct fiemap_extent_info *fieinfo)
2188 {
2189 ext4_lblk_t next, end = block + num - 1;
2190 struct extent_status es;
2191 unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
2192 unsigned int flags;
2193 int err;
2194
2195 while (block <= end) {
2196 next = 0;
2197 flags = 0;
2198 if (!ext4_es_lookup_extent(inode, block, &next, &es))
2199 break;
2200 if (ext4_es_is_unwritten(&es))
2201 flags |= FIEMAP_EXTENT_UNWRITTEN;
2202 if (ext4_es_is_delayed(&es))
2203 flags |= (FIEMAP_EXTENT_DELALLOC |
2204 FIEMAP_EXTENT_UNKNOWN);
2205 if (ext4_es_is_hole(&es))
2206 flags |= EXT4_FIEMAP_EXTENT_HOLE;
2207 if (next == 0)
2208 flags |= FIEMAP_EXTENT_LAST;
2209 if (flags & (FIEMAP_EXTENT_DELALLOC|
2210 EXT4_FIEMAP_EXTENT_HOLE))
2211 es.es_pblk = 0;
2212 else
2213 es.es_pblk = ext4_es_pblock(&es);
2214 err = fiemap_fill_next_extent(fieinfo,
2215 (__u64)es.es_lblk << blksize_bits,
2216 (__u64)es.es_pblk << blksize_bits,
2217 (__u64)es.es_len << blksize_bits,
2218 flags);
2219 if (next == 0)
2220 break;
2221 block = next;
2222 if (err < 0)
2223 return err;
2224 if (err == 1)
2225 return 0;
2226 }
2227 return 0;
2228 }
2229
2230
2231 /*
2232 * ext4_ext_determine_hole - determine hole around given block
2233 * @inode: inode we lookup in
2234 * @path: path in extent tree to @lblk
2235 * @lblk: pointer to logical block around which we want to determine hole
2236 *
2237 * Determine hole length (and start if easily possible) around given logical
2238 * block. We don't try too hard to find the beginning of the hole but @path
2239 * actually points to extent before @lblk, we provide it.
2240 *
2241 * The function returns the length of a hole starting at @lblk. We update @lblk
2242 * to the beginning of the hole if we managed to find it.
2243 */
ext4_ext_determine_hole(struct inode * inode,struct ext4_ext_path * path,ext4_lblk_t * lblk)2244 static ext4_lblk_t ext4_ext_determine_hole(struct inode *inode,
2245 struct ext4_ext_path *path,
2246 ext4_lblk_t *lblk)
2247 {
2248 int depth = ext_depth(inode);
2249 struct ext4_extent *ex;
2250 ext4_lblk_t len;
2251
2252 ex = path[depth].p_ext;
2253 if (ex == NULL) {
2254 /* there is no extent yet, so gap is [0;-] */
2255 *lblk = 0;
2256 len = EXT_MAX_BLOCKS;
2257 } else if (*lblk < le32_to_cpu(ex->ee_block)) {
2258 len = le32_to_cpu(ex->ee_block) - *lblk;
2259 } else if (*lblk >= le32_to_cpu(ex->ee_block)
2260 + ext4_ext_get_actual_len(ex)) {
2261 ext4_lblk_t next;
2262
2263 *lblk = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
2264 next = ext4_ext_next_allocated_block(path);
2265 BUG_ON(next == *lblk);
2266 len = next - *lblk;
2267 } else {
2268 BUG();
2269 }
2270 return len;
2271 }
2272
2273 /*
2274 * ext4_ext_put_gap_in_cache:
2275 * calculate boundaries of the gap that the requested block fits into
2276 * and cache this gap
2277 */
2278 static void
ext4_ext_put_gap_in_cache(struct inode * inode,ext4_lblk_t hole_start,ext4_lblk_t hole_len)2279 ext4_ext_put_gap_in_cache(struct inode *inode, ext4_lblk_t hole_start,
2280 ext4_lblk_t hole_len)
2281 {
2282 struct extent_status es;
2283
2284 ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start,
2285 hole_start + hole_len - 1, &es);
2286 if (es.es_len) {
2287 /* There's delayed extent containing lblock? */
2288 if (es.es_lblk <= hole_start)
2289 return;
2290 hole_len = min(es.es_lblk - hole_start, hole_len);
2291 }
2292 ext_debug(inode, " -> %u:%u\n", hole_start, hole_len);
2293 ext4_es_insert_extent(inode, hole_start, hole_len, ~0,
2294 EXTENT_STATUS_HOLE);
2295 }
2296
2297 /*
2298 * ext4_ext_rm_idx:
2299 * removes index from the index block.
2300 */
ext4_ext_rm_idx(handle_t * handle,struct inode * inode,struct ext4_ext_path * path,int depth)2301 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
2302 struct ext4_ext_path *path, int depth)
2303 {
2304 int err;
2305 ext4_fsblk_t leaf;
2306
2307 /* free index block */
2308 depth--;
2309 path = path + depth;
2310 leaf = ext4_idx_pblock(path->p_idx);
2311 if (unlikely(path->p_hdr->eh_entries == 0)) {
2312 EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
2313 return -EFSCORRUPTED;
2314 }
2315 err = ext4_ext_get_access(handle, inode, path);
2316 if (err)
2317 return err;
2318
2319 if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
2320 int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
2321 len *= sizeof(struct ext4_extent_idx);
2322 memmove(path->p_idx, path->p_idx + 1, len);
2323 }
2324
2325 le16_add_cpu(&path->p_hdr->eh_entries, -1);
2326 err = ext4_ext_dirty(handle, inode, path);
2327 if (err)
2328 return err;
2329 ext_debug(inode, "index is empty, remove it, free block %llu\n", leaf);
2330 trace_ext4_ext_rm_idx(inode, leaf);
2331
2332 ext4_free_blocks(handle, inode, NULL, leaf, 1,
2333 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
2334
2335 while (--depth >= 0) {
2336 if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
2337 break;
2338 path--;
2339 err = ext4_ext_get_access(handle, inode, path);
2340 if (err)
2341 break;
2342 path->p_idx->ei_block = (path+1)->p_idx->ei_block;
2343 err = ext4_ext_dirty(handle, inode, path);
2344 if (err)
2345 break;
2346 }
2347 return err;
2348 }
2349
2350 /*
2351 * ext4_ext_calc_credits_for_single_extent:
2352 * This routine returns max. credits that needed to insert an extent
2353 * to the extent tree.
2354 * When pass the actual path, the caller should calculate credits
2355 * under i_data_sem.
2356 */
ext4_ext_calc_credits_for_single_extent(struct inode * inode,int nrblocks,struct ext4_ext_path * path)2357 int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
2358 struct ext4_ext_path *path)
2359 {
2360 if (path) {
2361 int depth = ext_depth(inode);
2362 int ret = 0;
2363
2364 /* probably there is space in leaf? */
2365 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2366 < le16_to_cpu(path[depth].p_hdr->eh_max)) {
2367
2368 /*
2369 * There are some space in the leaf tree, no
2370 * need to account for leaf block credit
2371 *
2372 * bitmaps and block group descriptor blocks
2373 * and other metadata blocks still need to be
2374 * accounted.
2375 */
2376 /* 1 bitmap, 1 block group descriptor */
2377 ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
2378 return ret;
2379 }
2380 }
2381
2382 return ext4_chunk_trans_blocks(inode, nrblocks);
2383 }
2384
2385 /*
2386 * How many index/leaf blocks need to change/allocate to add @extents extents?
2387 *
2388 * If we add a single extent, then in the worse case, each tree level
2389 * index/leaf need to be changed in case of the tree split.
2390 *
2391 * If more extents are inserted, they could cause the whole tree split more
2392 * than once, but this is really rare.
2393 */
ext4_ext_index_trans_blocks(struct inode * inode,int extents)2394 int ext4_ext_index_trans_blocks(struct inode *inode, int extents)
2395 {
2396 int index;
2397 int depth;
2398
2399 /* If we are converting the inline data, only one is needed here. */
2400 if (ext4_has_inline_data(inode))
2401 return 1;
2402
2403 depth = ext_depth(inode);
2404
2405 if (extents <= 1)
2406 index = depth * 2;
2407 else
2408 index = depth * 3;
2409
2410 return index;
2411 }
2412
get_default_free_blocks_flags(struct inode * inode)2413 static inline int get_default_free_blocks_flags(struct inode *inode)
2414 {
2415 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) ||
2416 ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE))
2417 return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET;
2418 else if (ext4_should_journal_data(inode))
2419 return EXT4_FREE_BLOCKS_FORGET;
2420 return 0;
2421 }
2422
2423 /*
2424 * ext4_rereserve_cluster - increment the reserved cluster count when
2425 * freeing a cluster with a pending reservation
2426 *
2427 * @inode - file containing the cluster
2428 * @lblk - logical block in cluster to be reserved
2429 *
2430 * Increments the reserved cluster count and adjusts quota in a bigalloc
2431 * file system when freeing a partial cluster containing at least one
2432 * delayed and unwritten block. A partial cluster meeting that
2433 * requirement will have a pending reservation. If so, the
2434 * RERESERVE_CLUSTER flag is used when calling ext4_free_blocks() to
2435 * defer reserved and allocated space accounting to a subsequent call
2436 * to this function.
2437 */
ext4_rereserve_cluster(struct inode * inode,ext4_lblk_t lblk)2438 static void ext4_rereserve_cluster(struct inode *inode, ext4_lblk_t lblk)
2439 {
2440 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2441 struct ext4_inode_info *ei = EXT4_I(inode);
2442
2443 dquot_reclaim_block(inode, EXT4_C2B(sbi, 1));
2444
2445 spin_lock(&ei->i_block_reservation_lock);
2446 ei->i_reserved_data_blocks++;
2447 percpu_counter_add(&sbi->s_dirtyclusters_counter, 1);
2448 spin_unlock(&ei->i_block_reservation_lock);
2449
2450 percpu_counter_add(&sbi->s_freeclusters_counter, 1);
2451 ext4_remove_pending(inode, lblk);
2452 }
2453
ext4_remove_blocks(handle_t * handle,struct inode * inode,struct ext4_extent * ex,struct partial_cluster * partial,ext4_lblk_t from,ext4_lblk_t to)2454 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2455 struct ext4_extent *ex,
2456 struct partial_cluster *partial,
2457 ext4_lblk_t from, ext4_lblk_t to)
2458 {
2459 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2460 unsigned short ee_len = ext4_ext_get_actual_len(ex);
2461 ext4_fsblk_t last_pblk, pblk;
2462 ext4_lblk_t num;
2463 int flags;
2464
2465 /* only extent tail removal is allowed */
2466 if (from < le32_to_cpu(ex->ee_block) ||
2467 to != le32_to_cpu(ex->ee_block) + ee_len - 1) {
2468 ext4_error(sbi->s_sb,
2469 "strange request: removal(2) %u-%u from %u:%u",
2470 from, to, le32_to_cpu(ex->ee_block), ee_len);
2471 return 0;
2472 }
2473
2474 #ifdef EXTENTS_STATS
2475 spin_lock(&sbi->s_ext_stats_lock);
2476 sbi->s_ext_blocks += ee_len;
2477 sbi->s_ext_extents++;
2478 if (ee_len < sbi->s_ext_min)
2479 sbi->s_ext_min = ee_len;
2480 if (ee_len > sbi->s_ext_max)
2481 sbi->s_ext_max = ee_len;
2482 if (ext_depth(inode) > sbi->s_depth_max)
2483 sbi->s_depth_max = ext_depth(inode);
2484 spin_unlock(&sbi->s_ext_stats_lock);
2485 #endif
2486
2487 trace_ext4_remove_blocks(inode, ex, from, to, partial);
2488
2489 /*
2490 * if we have a partial cluster, and it's different from the
2491 * cluster of the last block in the extent, we free it
2492 */
2493 last_pblk = ext4_ext_pblock(ex) + ee_len - 1;
2494
2495 if (partial->state != initial &&
2496 partial->pclu != EXT4_B2C(sbi, last_pblk)) {
2497 if (partial->state == tofree) {
2498 flags = get_default_free_blocks_flags(inode);
2499 if (ext4_is_pending(inode, partial->lblk))
2500 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
2501 ext4_free_blocks(handle, inode, NULL,
2502 EXT4_C2B(sbi, partial->pclu),
2503 sbi->s_cluster_ratio, flags);
2504 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
2505 ext4_rereserve_cluster(inode, partial->lblk);
2506 }
2507 partial->state = initial;
2508 }
2509
2510 num = le32_to_cpu(ex->ee_block) + ee_len - from;
2511 pblk = ext4_ext_pblock(ex) + ee_len - num;
2512
2513 /*
2514 * We free the partial cluster at the end of the extent (if any),
2515 * unless the cluster is used by another extent (partial_cluster
2516 * state is nofree). If a partial cluster exists here, it must be
2517 * shared with the last block in the extent.
2518 */
2519 flags = get_default_free_blocks_flags(inode);
2520
2521 /* partial, left end cluster aligned, right end unaligned */
2522 if ((EXT4_LBLK_COFF(sbi, to) != sbi->s_cluster_ratio - 1) &&
2523 (EXT4_LBLK_CMASK(sbi, to) >= from) &&
2524 (partial->state != nofree)) {
2525 if (ext4_is_pending(inode, to))
2526 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
2527 ext4_free_blocks(handle, inode, NULL,
2528 EXT4_PBLK_CMASK(sbi, last_pblk),
2529 sbi->s_cluster_ratio, flags);
2530 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
2531 ext4_rereserve_cluster(inode, to);
2532 partial->state = initial;
2533 flags = get_default_free_blocks_flags(inode);
2534 }
2535
2536 flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER;
2537
2538 /*
2539 * For bigalloc file systems, we never free a partial cluster
2540 * at the beginning of the extent. Instead, we check to see if we
2541 * need to free it on a subsequent call to ext4_remove_blocks,
2542 * or at the end of ext4_ext_rm_leaf or ext4_ext_remove_space.
2543 */
2544 flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
2545 ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
2546
2547 /* reset the partial cluster if we've freed past it */
2548 if (partial->state != initial && partial->pclu != EXT4_B2C(sbi, pblk))
2549 partial->state = initial;
2550
2551 /*
2552 * If we've freed the entire extent but the beginning is not left
2553 * cluster aligned and is not marked as ineligible for freeing we
2554 * record the partial cluster at the beginning of the extent. It
2555 * wasn't freed by the preceding ext4_free_blocks() call, and we
2556 * need to look farther to the left to determine if it's to be freed
2557 * (not shared with another extent). Else, reset the partial
2558 * cluster - we're either done freeing or the beginning of the
2559 * extent is left cluster aligned.
2560 */
2561 if (EXT4_LBLK_COFF(sbi, from) && num == ee_len) {
2562 if (partial->state == initial) {
2563 partial->pclu = EXT4_B2C(sbi, pblk);
2564 partial->lblk = from;
2565 partial->state = tofree;
2566 }
2567 } else {
2568 partial->state = initial;
2569 }
2570
2571 return 0;
2572 }
2573
2574 /*
2575 * ext4_ext_rm_leaf() Removes the extents associated with the
2576 * blocks appearing between "start" and "end". Both "start"
2577 * and "end" must appear in the same extent or EIO is returned.
2578 *
2579 * @handle: The journal handle
2580 * @inode: The files inode
2581 * @path: The path to the leaf
2582 * @partial_cluster: The cluster which we'll have to free if all extents
2583 * has been released from it. However, if this value is
2584 * negative, it's a cluster just to the right of the
2585 * punched region and it must not be freed.
2586 * @start: The first block to remove
2587 * @end: The last block to remove
2588 */
2589 static int
ext4_ext_rm_leaf(handle_t * handle,struct inode * inode,struct ext4_ext_path * path,struct partial_cluster * partial,ext4_lblk_t start,ext4_lblk_t end)2590 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2591 struct ext4_ext_path *path,
2592 struct partial_cluster *partial,
2593 ext4_lblk_t start, ext4_lblk_t end)
2594 {
2595 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2596 int err = 0, correct_index = 0;
2597 int depth = ext_depth(inode), credits, revoke_credits;
2598 struct ext4_extent_header *eh;
2599 ext4_lblk_t a, b;
2600 unsigned num;
2601 ext4_lblk_t ex_ee_block;
2602 unsigned short ex_ee_len;
2603 unsigned unwritten = 0;
2604 struct ext4_extent *ex;
2605 ext4_fsblk_t pblk;
2606
2607 /* the header must be checked already in ext4_ext_remove_space() */
2608 ext_debug(inode, "truncate since %u in leaf to %u\n", start, end);
2609 if (!path[depth].p_hdr)
2610 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2611 eh = path[depth].p_hdr;
2612 if (unlikely(path[depth].p_hdr == NULL)) {
2613 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2614 return -EFSCORRUPTED;
2615 }
2616 /* find where to start removing */
2617 ex = path[depth].p_ext;
2618 if (!ex)
2619 ex = EXT_LAST_EXTENT(eh);
2620
2621 ex_ee_block = le32_to_cpu(ex->ee_block);
2622 ex_ee_len = ext4_ext_get_actual_len(ex);
2623
2624 trace_ext4_ext_rm_leaf(inode, start, ex, partial);
2625
2626 while (ex >= EXT_FIRST_EXTENT(eh) &&
2627 ex_ee_block + ex_ee_len > start) {
2628
2629 if (ext4_ext_is_unwritten(ex))
2630 unwritten = 1;
2631 else
2632 unwritten = 0;
2633
2634 ext_debug(inode, "remove ext %u:[%d]%d\n", ex_ee_block,
2635 unwritten, ex_ee_len);
2636 path[depth].p_ext = ex;
2637
2638 a = ex_ee_block > start ? ex_ee_block : start;
2639 b = ex_ee_block+ex_ee_len - 1 < end ?
2640 ex_ee_block+ex_ee_len - 1 : end;
2641
2642 ext_debug(inode, " border %u:%u\n", a, b);
2643
2644 /* If this extent is beyond the end of the hole, skip it */
2645 if (end < ex_ee_block) {
2646 /*
2647 * We're going to skip this extent and move to another,
2648 * so note that its first cluster is in use to avoid
2649 * freeing it when removing blocks. Eventually, the
2650 * right edge of the truncated/punched region will
2651 * be just to the left.
2652 */
2653 if (sbi->s_cluster_ratio > 1) {
2654 pblk = ext4_ext_pblock(ex);
2655 partial->pclu = EXT4_B2C(sbi, pblk);
2656 partial->state = nofree;
2657 }
2658 ex--;
2659 ex_ee_block = le32_to_cpu(ex->ee_block);
2660 ex_ee_len = ext4_ext_get_actual_len(ex);
2661 continue;
2662 } else if (b != ex_ee_block + ex_ee_len - 1) {
2663 EXT4_ERROR_INODE(inode,
2664 "can not handle truncate %u:%u "
2665 "on extent %u:%u",
2666 start, end, ex_ee_block,
2667 ex_ee_block + ex_ee_len - 1);
2668 err = -EFSCORRUPTED;
2669 goto out;
2670 } else if (a != ex_ee_block) {
2671 /* remove tail of the extent */
2672 num = a - ex_ee_block;
2673 } else {
2674 /* remove whole extent: excellent! */
2675 num = 0;
2676 }
2677 /*
2678 * 3 for leaf, sb, and inode plus 2 (bmap and group
2679 * descriptor) for each block group; assume two block
2680 * groups plus ex_ee_len/blocks_per_block_group for
2681 * the worst case
2682 */
2683 credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
2684 if (ex == EXT_FIRST_EXTENT(eh)) {
2685 correct_index = 1;
2686 credits += (ext_depth(inode)) + 1;
2687 }
2688 credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
2689 /*
2690 * We may end up freeing some index blocks and data from the
2691 * punched range. Note that partial clusters are accounted for
2692 * by ext4_free_data_revoke_credits().
2693 */
2694 revoke_credits =
2695 ext4_free_metadata_revoke_credits(inode->i_sb,
2696 ext_depth(inode)) +
2697 ext4_free_data_revoke_credits(inode, b - a + 1);
2698
2699 err = ext4_datasem_ensure_credits(handle, inode, credits,
2700 credits, revoke_credits);
2701 if (err) {
2702 if (err > 0)
2703 err = -EAGAIN;
2704 goto out;
2705 }
2706
2707 err = ext4_ext_get_access(handle, inode, path + depth);
2708 if (err)
2709 goto out;
2710
2711 err = ext4_remove_blocks(handle, inode, ex, partial, a, b);
2712 if (err)
2713 goto out;
2714
2715 if (num == 0)
2716 /* this extent is removed; mark slot entirely unused */
2717 ext4_ext_store_pblock(ex, 0);
2718
2719 ex->ee_len = cpu_to_le16(num);
2720 /*
2721 * Do not mark unwritten if all the blocks in the
2722 * extent have been removed.
2723 */
2724 if (unwritten && num)
2725 ext4_ext_mark_unwritten(ex);
2726 /*
2727 * If the extent was completely released,
2728 * we need to remove it from the leaf
2729 */
2730 if (num == 0) {
2731 if (end != EXT_MAX_BLOCKS - 1) {
2732 /*
2733 * For hole punching, we need to scoot all the
2734 * extents up when an extent is removed so that
2735 * we dont have blank extents in the middle
2736 */
2737 memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
2738 sizeof(struct ext4_extent));
2739
2740 /* Now get rid of the one at the end */
2741 memset(EXT_LAST_EXTENT(eh), 0,
2742 sizeof(struct ext4_extent));
2743 }
2744 le16_add_cpu(&eh->eh_entries, -1);
2745 }
2746
2747 err = ext4_ext_dirty(handle, inode, path + depth);
2748 if (err)
2749 goto out;
2750
2751 ext_debug(inode, "new extent: %u:%u:%llu\n", ex_ee_block, num,
2752 ext4_ext_pblock(ex));
2753 ex--;
2754 ex_ee_block = le32_to_cpu(ex->ee_block);
2755 ex_ee_len = ext4_ext_get_actual_len(ex);
2756 }
2757
2758 if (correct_index && eh->eh_entries)
2759 err = ext4_ext_correct_indexes(handle, inode, path);
2760
2761 /*
2762 * If there's a partial cluster and at least one extent remains in
2763 * the leaf, free the partial cluster if it isn't shared with the
2764 * current extent. If it is shared with the current extent
2765 * we reset the partial cluster because we've reached the start of the
2766 * truncated/punched region and we're done removing blocks.
2767 */
2768 if (partial->state == tofree && ex >= EXT_FIRST_EXTENT(eh)) {
2769 pblk = ext4_ext_pblock(ex) + ex_ee_len - 1;
2770 if (partial->pclu != EXT4_B2C(sbi, pblk)) {
2771 int flags = get_default_free_blocks_flags(inode);
2772
2773 if (ext4_is_pending(inode, partial->lblk))
2774 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
2775 ext4_free_blocks(handle, inode, NULL,
2776 EXT4_C2B(sbi, partial->pclu),
2777 sbi->s_cluster_ratio, flags);
2778 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
2779 ext4_rereserve_cluster(inode, partial->lblk);
2780 }
2781 partial->state = initial;
2782 }
2783
2784 /* if this leaf is free, then we should
2785 * remove it from index block above */
2786 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2787 err = ext4_ext_rm_idx(handle, inode, path, depth);
2788
2789 out:
2790 return err;
2791 }
2792
2793 /*
2794 * ext4_ext_more_to_rm:
2795 * returns 1 if current index has to be freed (even partial)
2796 */
2797 static int
ext4_ext_more_to_rm(struct ext4_ext_path * path)2798 ext4_ext_more_to_rm(struct ext4_ext_path *path)
2799 {
2800 BUG_ON(path->p_idx == NULL);
2801
2802 if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2803 return 0;
2804
2805 /*
2806 * if truncate on deeper level happened, it wasn't partial,
2807 * so we have to consider current index for truncation
2808 */
2809 if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2810 return 0;
2811 return 1;
2812 }
2813
ext4_ext_remove_space(struct inode * inode,ext4_lblk_t start,ext4_lblk_t end)2814 int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
2815 ext4_lblk_t end)
2816 {
2817 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2818 int depth = ext_depth(inode);
2819 struct ext4_ext_path *path = NULL;
2820 struct partial_cluster partial;
2821 handle_t *handle;
2822 int i = 0, err = 0;
2823
2824 partial.pclu = 0;
2825 partial.lblk = 0;
2826 partial.state = initial;
2827
2828 ext_debug(inode, "truncate since %u to %u\n", start, end);
2829
2830 /* probably first extent we're gonna free will be last in block */
2831 handle = ext4_journal_start_with_revoke(inode, EXT4_HT_TRUNCATE,
2832 depth + 1,
2833 ext4_free_metadata_revoke_credits(inode->i_sb, depth));
2834 if (IS_ERR(handle))
2835 return PTR_ERR(handle);
2836
2837 again:
2838 trace_ext4_ext_remove_space(inode, start, end, depth);
2839
2840 /*
2841 * Check if we are removing extents inside the extent tree. If that
2842 * is the case, we are going to punch a hole inside the extent tree
2843 * so we have to check whether we need to split the extent covering
2844 * the last block to remove so we can easily remove the part of it
2845 * in ext4_ext_rm_leaf().
2846 */
2847 if (end < EXT_MAX_BLOCKS - 1) {
2848 struct ext4_extent *ex;
2849 ext4_lblk_t ee_block, ex_end, lblk;
2850 ext4_fsblk_t pblk;
2851
2852 /* find extent for or closest extent to this block */
2853 path = ext4_find_extent(inode, end, NULL,
2854 EXT4_EX_NOCACHE | EXT4_EX_NOFAIL);
2855 if (IS_ERR(path)) {
2856 ext4_journal_stop(handle);
2857 return PTR_ERR(path);
2858 }
2859 depth = ext_depth(inode);
2860 /* Leaf not may not exist only if inode has no blocks at all */
2861 ex = path[depth].p_ext;
2862 if (!ex) {
2863 if (depth) {
2864 EXT4_ERROR_INODE(inode,
2865 "path[%d].p_hdr == NULL",
2866 depth);
2867 err = -EFSCORRUPTED;
2868 }
2869 goto out;
2870 }
2871
2872 ee_block = le32_to_cpu(ex->ee_block);
2873 ex_end = ee_block + ext4_ext_get_actual_len(ex) - 1;
2874
2875 /*
2876 * See if the last block is inside the extent, if so split
2877 * the extent at 'end' block so we can easily remove the
2878 * tail of the first part of the split extent in
2879 * ext4_ext_rm_leaf().
2880 */
2881 if (end >= ee_block && end < ex_end) {
2882
2883 /*
2884 * If we're going to split the extent, note that
2885 * the cluster containing the block after 'end' is
2886 * in use to avoid freeing it when removing blocks.
2887 */
2888 if (sbi->s_cluster_ratio > 1) {
2889 pblk = ext4_ext_pblock(ex) + end - ee_block + 1;
2890 partial.pclu = EXT4_B2C(sbi, pblk);
2891 partial.state = nofree;
2892 }
2893
2894 /*
2895 * Split the extent in two so that 'end' is the last
2896 * block in the first new extent. Also we should not
2897 * fail removing space due to ENOSPC so try to use
2898 * reserved block if that happens.
2899 */
2900 err = ext4_force_split_extent_at(handle, inode, &path,
2901 end + 1, 1);
2902 if (err < 0)
2903 goto out;
2904
2905 } else if (sbi->s_cluster_ratio > 1 && end >= ex_end &&
2906 partial.state == initial) {
2907 /*
2908 * If we're punching, there's an extent to the right.
2909 * If the partial cluster hasn't been set, set it to
2910 * that extent's first cluster and its state to nofree
2911 * so it won't be freed should it contain blocks to be
2912 * removed. If it's already set (tofree/nofree), we're
2913 * retrying and keep the original partial cluster info
2914 * so a cluster marked tofree as a result of earlier
2915 * extent removal is not lost.
2916 */
2917 lblk = ex_end + 1;
2918 err = ext4_ext_search_right(inode, path, &lblk, &pblk,
2919 NULL);
2920 if (err < 0)
2921 goto out;
2922 if (pblk) {
2923 partial.pclu = EXT4_B2C(sbi, pblk);
2924 partial.state = nofree;
2925 }
2926 }
2927 }
2928 /*
2929 * We start scanning from right side, freeing all the blocks
2930 * after i_size and walking into the tree depth-wise.
2931 */
2932 depth = ext_depth(inode);
2933 if (path) {
2934 int k = i = depth;
2935 while (--k > 0)
2936 path[k].p_block =
2937 le16_to_cpu(path[k].p_hdr->eh_entries)+1;
2938 } else {
2939 path = kcalloc(depth + 1, sizeof(struct ext4_ext_path),
2940 GFP_NOFS | __GFP_NOFAIL);
2941 if (path == NULL) {
2942 ext4_journal_stop(handle);
2943 return -ENOMEM;
2944 }
2945 path[0].p_maxdepth = path[0].p_depth = depth;
2946 path[0].p_hdr = ext_inode_hdr(inode);
2947 i = 0;
2948
2949 if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) {
2950 err = -EFSCORRUPTED;
2951 goto out;
2952 }
2953 }
2954 err = 0;
2955
2956 while (i >= 0 && err == 0) {
2957 if (i == depth) {
2958 /* this is leaf block */
2959 err = ext4_ext_rm_leaf(handle, inode, path,
2960 &partial, start, end);
2961 /* root level has p_bh == NULL, brelse() eats this */
2962 brelse(path[i].p_bh);
2963 path[i].p_bh = NULL;
2964 i--;
2965 continue;
2966 }
2967
2968 /* this is index block */
2969 if (!path[i].p_hdr) {
2970 ext_debug(inode, "initialize header\n");
2971 path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2972 }
2973
2974 if (!path[i].p_idx) {
2975 /* this level hasn't been touched yet */
2976 path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2977 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2978 ext_debug(inode, "init index ptr: hdr 0x%p, num %d\n",
2979 path[i].p_hdr,
2980 le16_to_cpu(path[i].p_hdr->eh_entries));
2981 } else {
2982 /* we were already here, see at next index */
2983 path[i].p_idx--;
2984 }
2985
2986 ext_debug(inode, "level %d - index, first 0x%p, cur 0x%p\n",
2987 i, EXT_FIRST_INDEX(path[i].p_hdr),
2988 path[i].p_idx);
2989 if (ext4_ext_more_to_rm(path + i)) {
2990 struct buffer_head *bh;
2991 /* go to the next level */
2992 ext_debug(inode, "move to level %d (block %llu)\n",
2993 i + 1, ext4_idx_pblock(path[i].p_idx));
2994 memset(path + i + 1, 0, sizeof(*path));
2995 bh = read_extent_tree_block(inode, path[i].p_idx,
2996 depth - i - 1,
2997 EXT4_EX_NOCACHE);
2998 if (IS_ERR(bh)) {
2999 /* should we reset i_size? */
3000 err = PTR_ERR(bh);
3001 break;
3002 }
3003 /* Yield here to deal with large extent trees.
3004 * Should be a no-op if we did IO above. */
3005 cond_resched();
3006 if (WARN_ON(i + 1 > depth)) {
3007 err = -EFSCORRUPTED;
3008 break;
3009 }
3010 path[i + 1].p_bh = bh;
3011
3012 /* save actual number of indexes since this
3013 * number is changed at the next iteration */
3014 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
3015 i++;
3016 } else {
3017 /* we finished processing this index, go up */
3018 if (path[i].p_hdr->eh_entries == 0 && i > 0) {
3019 /* index is empty, remove it;
3020 * handle must be already prepared by the
3021 * truncatei_leaf() */
3022 err = ext4_ext_rm_idx(handle, inode, path, i);
3023 }
3024 /* root level has p_bh == NULL, brelse() eats this */
3025 brelse(path[i].p_bh);
3026 path[i].p_bh = NULL;
3027 i--;
3028 ext_debug(inode, "return to level %d\n", i);
3029 }
3030 }
3031
3032 trace_ext4_ext_remove_space_done(inode, start, end, depth, &partial,
3033 path->p_hdr->eh_entries);
3034
3035 /*
3036 * if there's a partial cluster and we have removed the first extent
3037 * in the file, then we also free the partial cluster, if any
3038 */
3039 if (partial.state == tofree && err == 0) {
3040 int flags = get_default_free_blocks_flags(inode);
3041
3042 if (ext4_is_pending(inode, partial.lblk))
3043 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
3044 ext4_free_blocks(handle, inode, NULL,
3045 EXT4_C2B(sbi, partial.pclu),
3046 sbi->s_cluster_ratio, flags);
3047 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
3048 ext4_rereserve_cluster(inode, partial.lblk);
3049 partial.state = initial;
3050 }
3051
3052 /* TODO: flexible tree reduction should be here */
3053 if (path->p_hdr->eh_entries == 0) {
3054 /*
3055 * truncate to zero freed all the tree,
3056 * so we need to correct eh_depth
3057 */
3058 err = ext4_ext_get_access(handle, inode, path);
3059 if (err == 0) {
3060 ext_inode_hdr(inode)->eh_depth = 0;
3061 ext_inode_hdr(inode)->eh_max =
3062 cpu_to_le16(ext4_ext_space_root(inode, 0));
3063 err = ext4_ext_dirty(handle, inode, path);
3064 }
3065 }
3066 out:
3067 ext4_free_ext_path(path);
3068 path = NULL;
3069 if (err == -EAGAIN)
3070 goto again;
3071 ext4_journal_stop(handle);
3072
3073 return err;
3074 }
3075
3076 /*
3077 * called at mount time
3078 */
ext4_ext_init(struct super_block * sb)3079 void ext4_ext_init(struct super_block *sb)
3080 {
3081 /*
3082 * possible initialization would be here
3083 */
3084
3085 if (ext4_has_feature_extents(sb)) {
3086 #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
3087 printk(KERN_INFO "EXT4-fs: file extents enabled"
3088 #ifdef AGGRESSIVE_TEST
3089 ", aggressive tests"
3090 #endif
3091 #ifdef CHECK_BINSEARCH
3092 ", check binsearch"
3093 #endif
3094 #ifdef EXTENTS_STATS
3095 ", stats"
3096 #endif
3097 "\n");
3098 #endif
3099 #ifdef EXTENTS_STATS
3100 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
3101 EXT4_SB(sb)->s_ext_min = 1 << 30;
3102 EXT4_SB(sb)->s_ext_max = 0;
3103 #endif
3104 }
3105 }
3106
3107 /*
3108 * called at umount time
3109 */
ext4_ext_release(struct super_block * sb)3110 void ext4_ext_release(struct super_block *sb)
3111 {
3112 if (!ext4_has_feature_extents(sb))
3113 return;
3114
3115 #ifdef EXTENTS_STATS
3116 if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
3117 struct ext4_sb_info *sbi = EXT4_SB(sb);
3118 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
3119 sbi->s_ext_blocks, sbi->s_ext_extents,
3120 sbi->s_ext_blocks / sbi->s_ext_extents);
3121 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
3122 sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
3123 }
3124 #endif
3125 }
3126
ext4_zeroout_es(struct inode * inode,struct ext4_extent * ex)3127 static int ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex)
3128 {
3129 ext4_lblk_t ee_block;
3130 ext4_fsblk_t ee_pblock;
3131 unsigned int ee_len;
3132
3133 ee_block = le32_to_cpu(ex->ee_block);
3134 ee_len = ext4_ext_get_actual_len(ex);
3135 ee_pblock = ext4_ext_pblock(ex);
3136
3137 if (ee_len == 0)
3138 return 0;
3139
3140 return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
3141 EXTENT_STATUS_WRITTEN);
3142 }
3143
3144 /* FIXME!! we need to try to merge to left or right after zero-out */
ext4_ext_zeroout(struct inode * inode,struct ext4_extent * ex)3145 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
3146 {
3147 ext4_fsblk_t ee_pblock;
3148 unsigned int ee_len;
3149
3150 ee_len = ext4_ext_get_actual_len(ex);
3151 ee_pblock = ext4_ext_pblock(ex);
3152 return ext4_issue_zeroout(inode, le32_to_cpu(ex->ee_block), ee_pblock,
3153 ee_len);
3154 }
3155
3156 /*
3157 * ext4_split_extent_at() splits an extent at given block.
3158 *
3159 * @handle: the journal handle
3160 * @inode: the file inode
3161 * @path: the path to the extent
3162 * @split: the logical block where the extent is splitted.
3163 * @split_flags: indicates if the extent could be zeroout if split fails, and
3164 * the states(init or unwritten) of new extents.
3165 * @flags: flags used to insert new extent to extent tree.
3166 *
3167 *
3168 * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
3169 * of which are determined by split_flag.
3170 *
3171 * There are two cases:
3172 * a> the extent are splitted into two extent.
3173 * b> split is not needed, and just mark the extent.
3174 *
3175 * return 0 on success.
3176 */
ext4_split_extent_at(handle_t * handle,struct inode * inode,struct ext4_ext_path ** ppath,ext4_lblk_t split,int split_flag,int flags)3177 static int ext4_split_extent_at(handle_t *handle,
3178 struct inode *inode,
3179 struct ext4_ext_path **ppath,
3180 ext4_lblk_t split,
3181 int split_flag,
3182 int flags)
3183 {
3184 struct ext4_ext_path *path = *ppath;
3185 ext4_fsblk_t newblock;
3186 ext4_lblk_t ee_block;
3187 struct ext4_extent *ex, newex, orig_ex, zero_ex;
3188 struct ext4_extent *ex2 = NULL;
3189 unsigned int ee_len, depth;
3190 int err = 0;
3191
3192 BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
3193 (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
3194
3195 ext_debug(inode, "logical block %llu\n", (unsigned long long)split);
3196
3197 ext4_ext_show_leaf(inode, path);
3198
3199 depth = ext_depth(inode);
3200 ex = path[depth].p_ext;
3201 ee_block = le32_to_cpu(ex->ee_block);
3202 ee_len = ext4_ext_get_actual_len(ex);
3203 newblock = split - ee_block + ext4_ext_pblock(ex);
3204
3205 BUG_ON(split < ee_block || split >= (ee_block + ee_len));
3206 BUG_ON(!ext4_ext_is_unwritten(ex) &&
3207 split_flag & (EXT4_EXT_MAY_ZEROOUT |
3208 EXT4_EXT_MARK_UNWRIT1 |
3209 EXT4_EXT_MARK_UNWRIT2));
3210
3211 err = ext4_ext_get_access(handle, inode, path + depth);
3212 if (err)
3213 goto out;
3214
3215 if (split == ee_block) {
3216 /*
3217 * case b: block @split is the block that the extent begins with
3218 * then we just change the state of the extent, and splitting
3219 * is not needed.
3220 */
3221 if (split_flag & EXT4_EXT_MARK_UNWRIT2)
3222 ext4_ext_mark_unwritten(ex);
3223 else
3224 ext4_ext_mark_initialized(ex);
3225
3226 if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
3227 ext4_ext_try_to_merge(handle, inode, path, ex);
3228
3229 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3230 goto out;
3231 }
3232
3233 /* case a */
3234 memcpy(&orig_ex, ex, sizeof(orig_ex));
3235 ex->ee_len = cpu_to_le16(split - ee_block);
3236 if (split_flag & EXT4_EXT_MARK_UNWRIT1)
3237 ext4_ext_mark_unwritten(ex);
3238
3239 /*
3240 * path may lead to new leaf, not to original leaf any more
3241 * after ext4_ext_insert_extent() returns,
3242 */
3243 err = ext4_ext_dirty(handle, inode, path + depth);
3244 if (err)
3245 goto fix_extent_len;
3246
3247 ex2 = &newex;
3248 ex2->ee_block = cpu_to_le32(split);
3249 ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block));
3250 ext4_ext_store_pblock(ex2, newblock);
3251 if (split_flag & EXT4_EXT_MARK_UNWRIT2)
3252 ext4_ext_mark_unwritten(ex2);
3253
3254 err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
3255 if (err != -ENOSPC && err != -EDQUOT)
3256 goto out;
3257
3258 if (EXT4_EXT_MAY_ZEROOUT & split_flag) {
3259 if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
3260 if (split_flag & EXT4_EXT_DATA_VALID1) {
3261 err = ext4_ext_zeroout(inode, ex2);
3262 zero_ex.ee_block = ex2->ee_block;
3263 zero_ex.ee_len = cpu_to_le16(
3264 ext4_ext_get_actual_len(ex2));
3265 ext4_ext_store_pblock(&zero_ex,
3266 ext4_ext_pblock(ex2));
3267 } else {
3268 err = ext4_ext_zeroout(inode, ex);
3269 zero_ex.ee_block = ex->ee_block;
3270 zero_ex.ee_len = cpu_to_le16(
3271 ext4_ext_get_actual_len(ex));
3272 ext4_ext_store_pblock(&zero_ex,
3273 ext4_ext_pblock(ex));
3274 }
3275 } else {
3276 err = ext4_ext_zeroout(inode, &orig_ex);
3277 zero_ex.ee_block = orig_ex.ee_block;
3278 zero_ex.ee_len = cpu_to_le16(
3279 ext4_ext_get_actual_len(&orig_ex));
3280 ext4_ext_store_pblock(&zero_ex,
3281 ext4_ext_pblock(&orig_ex));
3282 }
3283
3284 if (!err) {
3285 /* update the extent length and mark as initialized */
3286 ex->ee_len = cpu_to_le16(ee_len);
3287 ext4_ext_try_to_merge(handle, inode, path, ex);
3288 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3289 if (!err)
3290 /* update extent status tree */
3291 err = ext4_zeroout_es(inode, &zero_ex);
3292 /* If we failed at this point, we don't know in which
3293 * state the extent tree exactly is so don't try to fix
3294 * length of the original extent as it may do even more
3295 * damage.
3296 */
3297 goto out;
3298 }
3299 }
3300
3301 fix_extent_len:
3302 ex->ee_len = orig_ex.ee_len;
3303 /*
3304 * Ignore ext4_ext_dirty return value since we are already in error path
3305 * and err is a non-zero error code.
3306 */
3307 ext4_ext_dirty(handle, inode, path + path->p_depth);
3308 return err;
3309 out:
3310 ext4_ext_show_leaf(inode, path);
3311 return err;
3312 }
3313
3314 /*
3315 * ext4_split_extents() splits an extent and mark extent which is covered
3316 * by @map as split_flags indicates
3317 *
3318 * It may result in splitting the extent into multiple extents (up to three)
3319 * There are three possibilities:
3320 * a> There is no split required
3321 * b> Splits in two extents: Split is happening at either end of the extent
3322 * c> Splits in three extents: Somone is splitting in middle of the extent
3323 *
3324 */
ext4_split_extent(handle_t * handle,struct inode * inode,struct ext4_ext_path ** ppath,struct ext4_map_blocks * map,int split_flag,int flags)3325 static int ext4_split_extent(handle_t *handle,
3326 struct inode *inode,
3327 struct ext4_ext_path **ppath,
3328 struct ext4_map_blocks *map,
3329 int split_flag,
3330 int flags)
3331 {
3332 struct ext4_ext_path *path = *ppath;
3333 ext4_lblk_t ee_block;
3334 struct ext4_extent *ex;
3335 unsigned int ee_len, depth;
3336 int err = 0;
3337 int unwritten;
3338 int split_flag1, flags1;
3339 int allocated = map->m_len;
3340
3341 depth = ext_depth(inode);
3342 ex = path[depth].p_ext;
3343 ee_block = le32_to_cpu(ex->ee_block);
3344 ee_len = ext4_ext_get_actual_len(ex);
3345 unwritten = ext4_ext_is_unwritten(ex);
3346
3347 if (map->m_lblk + map->m_len < ee_block + ee_len) {
3348 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
3349 flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
3350 if (unwritten)
3351 split_flag1 |= EXT4_EXT_MARK_UNWRIT1 |
3352 EXT4_EXT_MARK_UNWRIT2;
3353 if (split_flag & EXT4_EXT_DATA_VALID2)
3354 split_flag1 |= EXT4_EXT_DATA_VALID1;
3355 err = ext4_split_extent_at(handle, inode, ppath,
3356 map->m_lblk + map->m_len, split_flag1, flags1);
3357 if (err)
3358 goto out;
3359 } else {
3360 allocated = ee_len - (map->m_lblk - ee_block);
3361 }
3362 /*
3363 * Update path is required because previous ext4_split_extent_at() may
3364 * result in split of original leaf or extent zeroout.
3365 */
3366 path = ext4_find_extent(inode, map->m_lblk, ppath, flags);
3367 if (IS_ERR(path))
3368 return PTR_ERR(path);
3369 depth = ext_depth(inode);
3370 ex = path[depth].p_ext;
3371 if (!ex) {
3372 EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
3373 (unsigned long) map->m_lblk);
3374 return -EFSCORRUPTED;
3375 }
3376 unwritten = ext4_ext_is_unwritten(ex);
3377
3378 if (map->m_lblk >= ee_block) {
3379 split_flag1 = split_flag & EXT4_EXT_DATA_VALID2;
3380 if (unwritten) {
3381 split_flag1 |= EXT4_EXT_MARK_UNWRIT1;
3382 split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
3383 EXT4_EXT_MARK_UNWRIT2);
3384 }
3385 err = ext4_split_extent_at(handle, inode, ppath,
3386 map->m_lblk, split_flag1, flags);
3387 if (err)
3388 goto out;
3389 }
3390
3391 ext4_ext_show_leaf(inode, path);
3392 out:
3393 return err ? err : allocated;
3394 }
3395
3396 /*
3397 * This function is called by ext4_ext_map_blocks() if someone tries to write
3398 * to an unwritten extent. It may result in splitting the unwritten
3399 * extent into multiple extents (up to three - one initialized and two
3400 * unwritten).
3401 * There are three possibilities:
3402 * a> There is no split required: Entire extent should be initialized
3403 * b> Splits in two extents: Write is happening at either end of the extent
3404 * c> Splits in three extents: Somone is writing in middle of the extent
3405 *
3406 * Pre-conditions:
3407 * - The extent pointed to by 'path' is unwritten.
3408 * - The extent pointed to by 'path' contains a superset
3409 * of the logical span [map->m_lblk, map->m_lblk + map->m_len).
3410 *
3411 * Post-conditions on success:
3412 * - the returned value is the number of blocks beyond map->l_lblk
3413 * that are allocated and initialized.
3414 * It is guaranteed to be >= map->m_len.
3415 */
ext4_ext_convert_to_initialized(handle_t * handle,struct inode * inode,struct ext4_map_blocks * map,struct ext4_ext_path ** ppath,int flags)3416 static int ext4_ext_convert_to_initialized(handle_t *handle,
3417 struct inode *inode,
3418 struct ext4_map_blocks *map,
3419 struct ext4_ext_path **ppath,
3420 int flags)
3421 {
3422 struct ext4_ext_path *path = *ppath;
3423 struct ext4_sb_info *sbi;
3424 struct ext4_extent_header *eh;
3425 struct ext4_map_blocks split_map;
3426 struct ext4_extent zero_ex1, zero_ex2;
3427 struct ext4_extent *ex, *abut_ex;
3428 ext4_lblk_t ee_block, eof_block;
3429 unsigned int ee_len, depth, map_len = map->m_len;
3430 int allocated = 0, max_zeroout = 0;
3431 int err = 0;
3432 int split_flag = EXT4_EXT_DATA_VALID2;
3433
3434 ext_debug(inode, "logical block %llu, max_blocks %u\n",
3435 (unsigned long long)map->m_lblk, map_len);
3436
3437 sbi = EXT4_SB(inode->i_sb);
3438 eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1)
3439 >> inode->i_sb->s_blocksize_bits;
3440 if (eof_block < map->m_lblk + map_len)
3441 eof_block = map->m_lblk + map_len;
3442
3443 depth = ext_depth(inode);
3444 eh = path[depth].p_hdr;
3445 ex = path[depth].p_ext;
3446 ee_block = le32_to_cpu(ex->ee_block);
3447 ee_len = ext4_ext_get_actual_len(ex);
3448 zero_ex1.ee_len = 0;
3449 zero_ex2.ee_len = 0;
3450
3451 trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
3452
3453 /* Pre-conditions */
3454 BUG_ON(!ext4_ext_is_unwritten(ex));
3455 BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
3456
3457 /*
3458 * Attempt to transfer newly initialized blocks from the currently
3459 * unwritten extent to its neighbor. This is much cheaper
3460 * than an insertion followed by a merge as those involve costly
3461 * memmove() calls. Transferring to the left is the common case in
3462 * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE)
3463 * followed by append writes.
3464 *
3465 * Limitations of the current logic:
3466 * - L1: we do not deal with writes covering the whole extent.
3467 * This would require removing the extent if the transfer
3468 * is possible.
3469 * - L2: we only attempt to merge with an extent stored in the
3470 * same extent tree node.
3471 */
3472 if ((map->m_lblk == ee_block) &&
3473 /* See if we can merge left */
3474 (map_len < ee_len) && /*L1*/
3475 (ex > EXT_FIRST_EXTENT(eh))) { /*L2*/
3476 ext4_lblk_t prev_lblk;
3477 ext4_fsblk_t prev_pblk, ee_pblk;
3478 unsigned int prev_len;
3479
3480 abut_ex = ex - 1;
3481 prev_lblk = le32_to_cpu(abut_ex->ee_block);
3482 prev_len = ext4_ext_get_actual_len(abut_ex);
3483 prev_pblk = ext4_ext_pblock(abut_ex);
3484 ee_pblk = ext4_ext_pblock(ex);
3485
3486 /*
3487 * A transfer of blocks from 'ex' to 'abut_ex' is allowed
3488 * upon those conditions:
3489 * - C1: abut_ex is initialized,
3490 * - C2: abut_ex is logically abutting ex,
3491 * - C3: abut_ex is physically abutting ex,
3492 * - C4: abut_ex can receive the additional blocks without
3493 * overflowing the (initialized) length limit.
3494 */
3495 if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/
3496 ((prev_lblk + prev_len) == ee_block) && /*C2*/
3497 ((prev_pblk + prev_len) == ee_pblk) && /*C3*/
3498 (prev_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/
3499 err = ext4_ext_get_access(handle, inode, path + depth);
3500 if (err)
3501 goto out;
3502
3503 trace_ext4_ext_convert_to_initialized_fastpath(inode,
3504 map, ex, abut_ex);
3505
3506 /* Shift the start of ex by 'map_len' blocks */
3507 ex->ee_block = cpu_to_le32(ee_block + map_len);
3508 ext4_ext_store_pblock(ex, ee_pblk + map_len);
3509 ex->ee_len = cpu_to_le16(ee_len - map_len);
3510 ext4_ext_mark_unwritten(ex); /* Restore the flag */
3511
3512 /* Extend abut_ex by 'map_len' blocks */
3513 abut_ex->ee_len = cpu_to_le16(prev_len + map_len);
3514
3515 /* Result: number of initialized blocks past m_lblk */
3516 allocated = map_len;
3517 }
3518 } else if (((map->m_lblk + map_len) == (ee_block + ee_len)) &&
3519 (map_len < ee_len) && /*L1*/
3520 ex < EXT_LAST_EXTENT(eh)) { /*L2*/
3521 /* See if we can merge right */
3522 ext4_lblk_t next_lblk;
3523 ext4_fsblk_t next_pblk, ee_pblk;
3524 unsigned int next_len;
3525
3526 abut_ex = ex + 1;
3527 next_lblk = le32_to_cpu(abut_ex->ee_block);
3528 next_len = ext4_ext_get_actual_len(abut_ex);
3529 next_pblk = ext4_ext_pblock(abut_ex);
3530 ee_pblk = ext4_ext_pblock(ex);
3531
3532 /*
3533 * A transfer of blocks from 'ex' to 'abut_ex' is allowed
3534 * upon those conditions:
3535 * - C1: abut_ex is initialized,
3536 * - C2: abut_ex is logically abutting ex,
3537 * - C3: abut_ex is physically abutting ex,
3538 * - C4: abut_ex can receive the additional blocks without
3539 * overflowing the (initialized) length limit.
3540 */
3541 if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/
3542 ((map->m_lblk + map_len) == next_lblk) && /*C2*/
3543 ((ee_pblk + ee_len) == next_pblk) && /*C3*/
3544 (next_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/
3545 err = ext4_ext_get_access(handle, inode, path + depth);
3546 if (err)
3547 goto out;
3548
3549 trace_ext4_ext_convert_to_initialized_fastpath(inode,
3550 map, ex, abut_ex);
3551
3552 /* Shift the start of abut_ex by 'map_len' blocks */
3553 abut_ex->ee_block = cpu_to_le32(next_lblk - map_len);
3554 ext4_ext_store_pblock(abut_ex, next_pblk - map_len);
3555 ex->ee_len = cpu_to_le16(ee_len - map_len);
3556 ext4_ext_mark_unwritten(ex); /* Restore the flag */
3557
3558 /* Extend abut_ex by 'map_len' blocks */
3559 abut_ex->ee_len = cpu_to_le16(next_len + map_len);
3560
3561 /* Result: number of initialized blocks past m_lblk */
3562 allocated = map_len;
3563 }
3564 }
3565 if (allocated) {
3566 /* Mark the block containing both extents as dirty */
3567 err = ext4_ext_dirty(handle, inode, path + depth);
3568
3569 /* Update path to point to the right extent */
3570 path[depth].p_ext = abut_ex;
3571 goto out;
3572 } else
3573 allocated = ee_len - (map->m_lblk - ee_block);
3574
3575 WARN_ON(map->m_lblk < ee_block);
3576 /*
3577 * It is safe to convert extent to initialized via explicit
3578 * zeroout only if extent is fully inside i_size or new_size.
3579 */
3580 split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3581
3582 if (EXT4_EXT_MAY_ZEROOUT & split_flag)
3583 max_zeroout = sbi->s_extent_max_zeroout_kb >>
3584 (inode->i_sb->s_blocksize_bits - 10);
3585
3586 /*
3587 * five cases:
3588 * 1. split the extent into three extents.
3589 * 2. split the extent into two extents, zeroout the head of the first
3590 * extent.
3591 * 3. split the extent into two extents, zeroout the tail of the second
3592 * extent.
3593 * 4. split the extent into two extents with out zeroout.
3594 * 5. no splitting needed, just possibly zeroout the head and / or the
3595 * tail of the extent.
3596 */
3597 split_map.m_lblk = map->m_lblk;
3598 split_map.m_len = map->m_len;
3599
3600 if (max_zeroout && (allocated > split_map.m_len)) {
3601 if (allocated <= max_zeroout) {
3602 /* case 3 or 5 */
3603 zero_ex1.ee_block =
3604 cpu_to_le32(split_map.m_lblk +
3605 split_map.m_len);
3606 zero_ex1.ee_len =
3607 cpu_to_le16(allocated - split_map.m_len);
3608 ext4_ext_store_pblock(&zero_ex1,
3609 ext4_ext_pblock(ex) + split_map.m_lblk +
3610 split_map.m_len - ee_block);
3611 err = ext4_ext_zeroout(inode, &zero_ex1);
3612 if (err)
3613 goto fallback;
3614 split_map.m_len = allocated;
3615 }
3616 if (split_map.m_lblk - ee_block + split_map.m_len <
3617 max_zeroout) {
3618 /* case 2 or 5 */
3619 if (split_map.m_lblk != ee_block) {
3620 zero_ex2.ee_block = ex->ee_block;
3621 zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk -
3622 ee_block);
3623 ext4_ext_store_pblock(&zero_ex2,
3624 ext4_ext_pblock(ex));
3625 err = ext4_ext_zeroout(inode, &zero_ex2);
3626 if (err)
3627 goto fallback;
3628 }
3629
3630 split_map.m_len += split_map.m_lblk - ee_block;
3631 split_map.m_lblk = ee_block;
3632 allocated = map->m_len;
3633 }
3634 }
3635
3636 fallback:
3637 err = ext4_split_extent(handle, inode, ppath, &split_map, split_flag,
3638 flags);
3639 if (err > 0)
3640 err = 0;
3641 out:
3642 /* If we have gotten a failure, don't zero out status tree */
3643 if (!err) {
3644 err = ext4_zeroout_es(inode, &zero_ex1);
3645 if (!err)
3646 err = ext4_zeroout_es(inode, &zero_ex2);
3647 }
3648 return err ? err : allocated;
3649 }
3650
3651 /*
3652 * This function is called by ext4_ext_map_blocks() from
3653 * ext4_get_blocks_dio_write() when DIO to write
3654 * to an unwritten extent.
3655 *
3656 * Writing to an unwritten extent may result in splitting the unwritten
3657 * extent into multiple initialized/unwritten extents (up to three)
3658 * There are three possibilities:
3659 * a> There is no split required: Entire extent should be unwritten
3660 * b> Splits in two extents: Write is happening at either end of the extent
3661 * c> Splits in three extents: Somone is writing in middle of the extent
3662 *
3663 * This works the same way in the case of initialized -> unwritten conversion.
3664 *
3665 * One of more index blocks maybe needed if the extent tree grow after
3666 * the unwritten extent split. To prevent ENOSPC occur at the IO
3667 * complete, we need to split the unwritten extent before DIO submit
3668 * the IO. The unwritten extent called at this time will be split
3669 * into three unwritten extent(at most). After IO complete, the part
3670 * being filled will be convert to initialized by the end_io callback function
3671 * via ext4_convert_unwritten_extents().
3672 *
3673 * Returns the size of unwritten extent to be written on success.
3674 */
ext4_split_convert_extents(handle_t * handle,struct inode * inode,struct ext4_map_blocks * map,struct ext4_ext_path ** ppath,int flags)3675 static int ext4_split_convert_extents(handle_t *handle,
3676 struct inode *inode,
3677 struct ext4_map_blocks *map,
3678 struct ext4_ext_path **ppath,
3679 int flags)
3680 {
3681 struct ext4_ext_path *path = *ppath;
3682 ext4_lblk_t eof_block;
3683 ext4_lblk_t ee_block;
3684 struct ext4_extent *ex;
3685 unsigned int ee_len;
3686 int split_flag = 0, depth;
3687
3688 ext_debug(inode, "logical block %llu, max_blocks %u\n",
3689 (unsigned long long)map->m_lblk, map->m_len);
3690
3691 eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1)
3692 >> inode->i_sb->s_blocksize_bits;
3693 if (eof_block < map->m_lblk + map->m_len)
3694 eof_block = map->m_lblk + map->m_len;
3695 /*
3696 * It is safe to convert extent to initialized via explicit
3697 * zeroout only if extent is fully inside i_size or new_size.
3698 */
3699 depth = ext_depth(inode);
3700 ex = path[depth].p_ext;
3701 ee_block = le32_to_cpu(ex->ee_block);
3702 ee_len = ext4_ext_get_actual_len(ex);
3703
3704 /* Convert to unwritten */
3705 if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) {
3706 split_flag |= EXT4_EXT_DATA_VALID1;
3707 /* Convert to initialized */
3708 } else if (flags & EXT4_GET_BLOCKS_CONVERT) {
3709 split_flag |= ee_block + ee_len <= eof_block ?
3710 EXT4_EXT_MAY_ZEROOUT : 0;
3711 split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2);
3712 }
3713 flags |= EXT4_GET_BLOCKS_PRE_IO;
3714 return ext4_split_extent(handle, inode, ppath, map, split_flag, flags);
3715 }
3716
ext4_convert_unwritten_extents_endio(handle_t * handle,struct inode * inode,struct ext4_map_blocks * map,struct ext4_ext_path ** ppath)3717 static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3718 struct inode *inode,
3719 struct ext4_map_blocks *map,
3720 struct ext4_ext_path **ppath)
3721 {
3722 struct ext4_ext_path *path = *ppath;
3723 struct ext4_extent *ex;
3724 ext4_lblk_t ee_block;
3725 unsigned int ee_len;
3726 int depth;
3727 int err = 0;
3728
3729 depth = ext_depth(inode);
3730 ex = path[depth].p_ext;
3731 ee_block = le32_to_cpu(ex->ee_block);
3732 ee_len = ext4_ext_get_actual_len(ex);
3733
3734 ext_debug(inode, "logical block %llu, max_blocks %u\n",
3735 (unsigned long long)ee_block, ee_len);
3736
3737 /* If extent is larger than requested it is a clear sign that we still
3738 * have some extent state machine issues left. So extent_split is still
3739 * required.
3740 * TODO: Once all related issues will be fixed this situation should be
3741 * illegal.
3742 */
3743 if (ee_block != map->m_lblk || ee_len > map->m_len) {
3744 #ifdef CONFIG_EXT4_DEBUG
3745 ext4_warning(inode->i_sb, "Inode (%ld) finished: extent logical block %llu,"
3746 " len %u; IO logical block %llu, len %u",
3747 inode->i_ino, (unsigned long long)ee_block, ee_len,
3748 (unsigned long long)map->m_lblk, map->m_len);
3749 #endif
3750 err = ext4_split_convert_extents(handle, inode, map, ppath,
3751 EXT4_GET_BLOCKS_CONVERT);
3752 if (err < 0)
3753 return err;
3754 path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
3755 if (IS_ERR(path))
3756 return PTR_ERR(path);
3757 depth = ext_depth(inode);
3758 ex = path[depth].p_ext;
3759 }
3760
3761 err = ext4_ext_get_access(handle, inode, path + depth);
3762 if (err)
3763 goto out;
3764 /* first mark the extent as initialized */
3765 ext4_ext_mark_initialized(ex);
3766
3767 /* note: ext4_ext_correct_indexes() isn't needed here because
3768 * borders are not changed
3769 */
3770 ext4_ext_try_to_merge(handle, inode, path, ex);
3771
3772 /* Mark modified extent as dirty */
3773 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3774 out:
3775 ext4_ext_show_leaf(inode, path);
3776 return err;
3777 }
3778
3779 static int
convert_initialized_extent(handle_t * handle,struct inode * inode,struct ext4_map_blocks * map,struct ext4_ext_path ** ppath,unsigned int * allocated)3780 convert_initialized_extent(handle_t *handle, struct inode *inode,
3781 struct ext4_map_blocks *map,
3782 struct ext4_ext_path **ppath,
3783 unsigned int *allocated)
3784 {
3785 struct ext4_ext_path *path = *ppath;
3786 struct ext4_extent *ex;
3787 ext4_lblk_t ee_block;
3788 unsigned int ee_len;
3789 int depth;
3790 int err = 0;
3791
3792 /*
3793 * Make sure that the extent is no bigger than we support with
3794 * unwritten extent
3795 */
3796 if (map->m_len > EXT_UNWRITTEN_MAX_LEN)
3797 map->m_len = EXT_UNWRITTEN_MAX_LEN / 2;
3798
3799 depth = ext_depth(inode);
3800 ex = path[depth].p_ext;
3801 ee_block = le32_to_cpu(ex->ee_block);
3802 ee_len = ext4_ext_get_actual_len(ex);
3803
3804 ext_debug(inode, "logical block %llu, max_blocks %u\n",
3805 (unsigned long long)ee_block, ee_len);
3806
3807 if (ee_block != map->m_lblk || ee_len > map->m_len) {
3808 err = ext4_split_convert_extents(handle, inode, map, ppath,
3809 EXT4_GET_BLOCKS_CONVERT_UNWRITTEN);
3810 if (err < 0)
3811 return err;
3812 path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
3813 if (IS_ERR(path))
3814 return PTR_ERR(path);
3815 depth = ext_depth(inode);
3816 ex = path[depth].p_ext;
3817 if (!ex) {
3818 EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
3819 (unsigned long) map->m_lblk);
3820 return -EFSCORRUPTED;
3821 }
3822 }
3823
3824 err = ext4_ext_get_access(handle, inode, path + depth);
3825 if (err)
3826 return err;
3827 /* first mark the extent as unwritten */
3828 ext4_ext_mark_unwritten(ex);
3829
3830 /* note: ext4_ext_correct_indexes() isn't needed here because
3831 * borders are not changed
3832 */
3833 ext4_ext_try_to_merge(handle, inode, path, ex);
3834
3835 /* Mark modified extent as dirty */
3836 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3837 if (err)
3838 return err;
3839 ext4_ext_show_leaf(inode, path);
3840
3841 ext4_update_inode_fsync_trans(handle, inode, 1);
3842
3843 map->m_flags |= EXT4_MAP_UNWRITTEN;
3844 if (*allocated > map->m_len)
3845 *allocated = map->m_len;
3846 map->m_len = *allocated;
3847 return 0;
3848 }
3849
3850 static int
ext4_ext_handle_unwritten_extents(handle_t * handle,struct inode * inode,struct ext4_map_blocks * map,struct ext4_ext_path ** ppath,int flags,unsigned int allocated,ext4_fsblk_t newblock)3851 ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
3852 struct ext4_map_blocks *map,
3853 struct ext4_ext_path **ppath, int flags,
3854 unsigned int allocated, ext4_fsblk_t newblock)
3855 {
3856 struct ext4_ext_path __maybe_unused *path = *ppath;
3857 int ret = 0;
3858 int err = 0;
3859
3860 ext_debug(inode, "logical block %llu, max_blocks %u, flags 0x%x, allocated %u\n",
3861 (unsigned long long)map->m_lblk, map->m_len, flags,
3862 allocated);
3863 ext4_ext_show_leaf(inode, path);
3864
3865 /*
3866 * When writing into unwritten space, we should not fail to
3867 * allocate metadata blocks for the new extent block if needed.
3868 */
3869 flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL;
3870
3871 trace_ext4_ext_handle_unwritten_extents(inode, map, flags,
3872 allocated, newblock);
3873
3874 /* get_block() before submitting IO, split the extent */
3875 if (flags & EXT4_GET_BLOCKS_PRE_IO) {
3876 ret = ext4_split_convert_extents(handle, inode, map, ppath,
3877 flags | EXT4_GET_BLOCKS_CONVERT);
3878 if (ret < 0) {
3879 err = ret;
3880 goto out2;
3881 }
3882 /*
3883 * shouldn't get a 0 return when splitting an extent unless
3884 * m_len is 0 (bug) or extent has been corrupted
3885 */
3886 if (unlikely(ret == 0)) {
3887 EXT4_ERROR_INODE(inode,
3888 "unexpected ret == 0, m_len = %u",
3889 map->m_len);
3890 err = -EFSCORRUPTED;
3891 goto out2;
3892 }
3893 map->m_flags |= EXT4_MAP_UNWRITTEN;
3894 goto out;
3895 }
3896 /* IO end_io complete, convert the filled extent to written */
3897 if (flags & EXT4_GET_BLOCKS_CONVERT) {
3898 err = ext4_convert_unwritten_extents_endio(handle, inode, map,
3899 ppath);
3900 if (err < 0)
3901 goto out2;
3902 ext4_update_inode_fsync_trans(handle, inode, 1);
3903 goto map_out;
3904 }
3905 /* buffered IO cases */
3906 /*
3907 * repeat fallocate creation request
3908 * we already have an unwritten extent
3909 */
3910 if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) {
3911 map->m_flags |= EXT4_MAP_UNWRITTEN;
3912 goto map_out;
3913 }
3914
3915 /* buffered READ or buffered write_begin() lookup */
3916 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3917 /*
3918 * We have blocks reserved already. We
3919 * return allocated blocks so that delalloc
3920 * won't do block reservation for us. But
3921 * the buffer head will be unmapped so that
3922 * a read from the block returns 0s.
3923 */
3924 map->m_flags |= EXT4_MAP_UNWRITTEN;
3925 goto out1;
3926 }
3927
3928 /*
3929 * Default case when (flags & EXT4_GET_BLOCKS_CREATE) == 1.
3930 * For buffered writes, at writepage time, etc. Convert a
3931 * discovered unwritten extent to written.
3932 */
3933 ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags);
3934 if (ret < 0) {
3935 err = ret;
3936 goto out2;
3937 }
3938 ext4_update_inode_fsync_trans(handle, inode, 1);
3939 /*
3940 * shouldn't get a 0 return when converting an unwritten extent
3941 * unless m_len is 0 (bug) or extent has been corrupted
3942 */
3943 if (unlikely(ret == 0)) {
3944 EXT4_ERROR_INODE(inode, "unexpected ret == 0, m_len = %u",
3945 map->m_len);
3946 err = -EFSCORRUPTED;
3947 goto out2;
3948 }
3949
3950 out:
3951 allocated = ret;
3952 map->m_flags |= EXT4_MAP_NEW;
3953 map_out:
3954 map->m_flags |= EXT4_MAP_MAPPED;
3955 out1:
3956 map->m_pblk = newblock;
3957 if (allocated > map->m_len)
3958 allocated = map->m_len;
3959 map->m_len = allocated;
3960 ext4_ext_show_leaf(inode, path);
3961 out2:
3962 return err ? err : allocated;
3963 }
3964
3965 /*
3966 * get_implied_cluster_alloc - check to see if the requested
3967 * allocation (in the map structure) overlaps with a cluster already
3968 * allocated in an extent.
3969 * @sb The filesystem superblock structure
3970 * @map The requested lblk->pblk mapping
3971 * @ex The extent structure which might contain an implied
3972 * cluster allocation
3973 *
3974 * This function is called by ext4_ext_map_blocks() after we failed to
3975 * find blocks that were already in the inode's extent tree. Hence,
3976 * we know that the beginning of the requested region cannot overlap
3977 * the extent from the inode's extent tree. There are three cases we
3978 * want to catch. The first is this case:
3979 *
3980 * |--- cluster # N--|
3981 * |--- extent ---| |---- requested region ---|
3982 * |==========|
3983 *
3984 * The second case that we need to test for is this one:
3985 *
3986 * |--------- cluster # N ----------------|
3987 * |--- requested region --| |------- extent ----|
3988 * |=======================|
3989 *
3990 * The third case is when the requested region lies between two extents
3991 * within the same cluster:
3992 * |------------- cluster # N-------------|
3993 * |----- ex -----| |---- ex_right ----|
3994 * |------ requested region ------|
3995 * |================|
3996 *
3997 * In each of the above cases, we need to set the map->m_pblk and
3998 * map->m_len so it corresponds to the return the extent labelled as
3999 * "|====|" from cluster #N, since it is already in use for data in
4000 * cluster EXT4_B2C(sbi, map->m_lblk). We will then return 1 to
4001 * signal to ext4_ext_map_blocks() that map->m_pblk should be treated
4002 * as a new "allocated" block region. Otherwise, we will return 0 and
4003 * ext4_ext_map_blocks() will then allocate one or more new clusters
4004 * by calling ext4_mb_new_blocks().
4005 */
get_implied_cluster_alloc(struct super_block * sb,struct ext4_map_blocks * map,struct ext4_extent * ex,struct ext4_ext_path * path)4006 static int get_implied_cluster_alloc(struct super_block *sb,
4007 struct ext4_map_blocks *map,
4008 struct ext4_extent *ex,
4009 struct ext4_ext_path *path)
4010 {
4011 struct ext4_sb_info *sbi = EXT4_SB(sb);
4012 ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4013 ext4_lblk_t ex_cluster_start, ex_cluster_end;
4014 ext4_lblk_t rr_cluster_start;
4015 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
4016 ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
4017 unsigned short ee_len = ext4_ext_get_actual_len(ex);
4018
4019 /* The extent passed in that we are trying to match */
4020 ex_cluster_start = EXT4_B2C(sbi, ee_block);
4021 ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1);
4022
4023 /* The requested region passed into ext4_map_blocks() */
4024 rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
4025
4026 if ((rr_cluster_start == ex_cluster_end) ||
4027 (rr_cluster_start == ex_cluster_start)) {
4028 if (rr_cluster_start == ex_cluster_end)
4029 ee_start += ee_len - 1;
4030 map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset;
4031 map->m_len = min(map->m_len,
4032 (unsigned) sbi->s_cluster_ratio - c_offset);
4033 /*
4034 * Check for and handle this case:
4035 *
4036 * |--------- cluster # N-------------|
4037 * |------- extent ----|
4038 * |--- requested region ---|
4039 * |===========|
4040 */
4041
4042 if (map->m_lblk < ee_block)
4043 map->m_len = min(map->m_len, ee_block - map->m_lblk);
4044
4045 /*
4046 * Check for the case where there is already another allocated
4047 * block to the right of 'ex' but before the end of the cluster.
4048 *
4049 * |------------- cluster # N-------------|
4050 * |----- ex -----| |---- ex_right ----|
4051 * |------ requested region ------|
4052 * |================|
4053 */
4054 if (map->m_lblk > ee_block) {
4055 ext4_lblk_t next = ext4_ext_next_allocated_block(path);
4056 map->m_len = min(map->m_len, next - map->m_lblk);
4057 }
4058
4059 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
4060 return 1;
4061 }
4062
4063 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
4064 return 0;
4065 }
4066
4067
4068 /*
4069 * Block allocation/map/preallocation routine for extents based files
4070 *
4071 *
4072 * Need to be called with
4073 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
4074 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
4075 *
4076 * return > 0, number of blocks already mapped/allocated
4077 * if create == 0 and these are pre-allocated blocks
4078 * buffer head is unmapped
4079 * otherwise blocks are mapped
4080 *
4081 * return = 0, if plain look up failed (blocks have not been allocated)
4082 * buffer head is unmapped
4083 *
4084 * return < 0, error case.
4085 */
ext4_ext_map_blocks(handle_t * handle,struct inode * inode,struct ext4_map_blocks * map,int flags)4086 int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4087 struct ext4_map_blocks *map, int flags)
4088 {
4089 struct ext4_ext_path *path = NULL;
4090 struct ext4_extent newex, *ex, ex2;
4091 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4092 ext4_fsblk_t newblock = 0, pblk;
4093 int err = 0, depth, ret;
4094 unsigned int allocated = 0, offset = 0;
4095 unsigned int allocated_clusters = 0;
4096 struct ext4_allocation_request ar;
4097 ext4_lblk_t cluster_offset;
4098
4099 ext_debug(inode, "blocks %u/%u requested\n", map->m_lblk, map->m_len);
4100 trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
4101
4102 /* find extent for this block */
4103 path = ext4_find_extent(inode, map->m_lblk, NULL, 0);
4104 if (IS_ERR(path)) {
4105 err = PTR_ERR(path);
4106 path = NULL;
4107 goto out;
4108 }
4109
4110 depth = ext_depth(inode);
4111
4112 /*
4113 * consistent leaf must not be empty;
4114 * this situation is possible, though, _during_ tree modification;
4115 * this is why assert can't be put in ext4_find_extent()
4116 */
4117 if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
4118 EXT4_ERROR_INODE(inode, "bad extent address "
4119 "lblock: %lu, depth: %d pblock %lld",
4120 (unsigned long) map->m_lblk, depth,
4121 path[depth].p_block);
4122 err = -EFSCORRUPTED;
4123 goto out;
4124 }
4125
4126 ex = path[depth].p_ext;
4127 if (ex) {
4128 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
4129 ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
4130 unsigned short ee_len;
4131
4132
4133 /*
4134 * unwritten extents are treated as holes, except that
4135 * we split out initialized portions during a write.
4136 */
4137 ee_len = ext4_ext_get_actual_len(ex);
4138
4139 trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
4140
4141 /* if found extent covers block, simply return it */
4142 if (in_range(map->m_lblk, ee_block, ee_len)) {
4143 newblock = map->m_lblk - ee_block + ee_start;
4144 /* number of remaining blocks in the extent */
4145 allocated = ee_len - (map->m_lblk - ee_block);
4146 ext_debug(inode, "%u fit into %u:%d -> %llu\n",
4147 map->m_lblk, ee_block, ee_len, newblock);
4148
4149 /*
4150 * If the extent is initialized check whether the
4151 * caller wants to convert it to unwritten.
4152 */
4153 if ((!ext4_ext_is_unwritten(ex)) &&
4154 (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
4155 err = convert_initialized_extent(handle,
4156 inode, map, &path, &allocated);
4157 goto out;
4158 } else if (!ext4_ext_is_unwritten(ex)) {
4159 map->m_flags |= EXT4_MAP_MAPPED;
4160 map->m_pblk = newblock;
4161 if (allocated > map->m_len)
4162 allocated = map->m_len;
4163 map->m_len = allocated;
4164 ext4_ext_show_leaf(inode, path);
4165 goto out;
4166 }
4167
4168 ret = ext4_ext_handle_unwritten_extents(
4169 handle, inode, map, &path, flags,
4170 allocated, newblock);
4171 if (ret < 0)
4172 err = ret;
4173 else
4174 allocated = ret;
4175 goto out;
4176 }
4177 }
4178
4179 /*
4180 * requested block isn't allocated yet;
4181 * we couldn't try to create block if create flag is zero
4182 */
4183 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
4184 ext4_lblk_t hole_start, hole_len;
4185
4186 hole_start = map->m_lblk;
4187 hole_len = ext4_ext_determine_hole(inode, path, &hole_start);
4188 /*
4189 * put just found gap into cache to speed up
4190 * subsequent requests
4191 */
4192 ext4_ext_put_gap_in_cache(inode, hole_start, hole_len);
4193
4194 /* Update hole_len to reflect hole size after map->m_lblk */
4195 if (hole_start != map->m_lblk)
4196 hole_len -= map->m_lblk - hole_start;
4197 map->m_pblk = 0;
4198 map->m_len = min_t(unsigned int, map->m_len, hole_len);
4199
4200 goto out;
4201 }
4202
4203 /*
4204 * Okay, we need to do block allocation.
4205 */
4206 newex.ee_block = cpu_to_le32(map->m_lblk);
4207 cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4208
4209 /*
4210 * If we are doing bigalloc, check to see if the extent returned
4211 * by ext4_find_extent() implies a cluster we can use.
4212 */
4213 if (cluster_offset && ex &&
4214 get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
4215 ar.len = allocated = map->m_len;
4216 newblock = map->m_pblk;
4217 goto got_allocated_blocks;
4218 }
4219
4220 /* find neighbour allocated blocks */
4221 ar.lleft = map->m_lblk;
4222 err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
4223 if (err)
4224 goto out;
4225 ar.lright = map->m_lblk;
4226 err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
4227 if (err < 0)
4228 goto out;
4229
4230 /* Check if the extent after searching to the right implies a
4231 * cluster we can use. */
4232 if ((sbi->s_cluster_ratio > 1) && err &&
4233 get_implied_cluster_alloc(inode->i_sb, map, &ex2, path)) {
4234 ar.len = allocated = map->m_len;
4235 newblock = map->m_pblk;
4236 goto got_allocated_blocks;
4237 }
4238
4239 /*
4240 * See if request is beyond maximum number of blocks we can have in
4241 * a single extent. For an initialized extent this limit is
4242 * EXT_INIT_MAX_LEN and for an unwritten extent this limit is
4243 * EXT_UNWRITTEN_MAX_LEN.
4244 */
4245 if (map->m_len > EXT_INIT_MAX_LEN &&
4246 !(flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
4247 map->m_len = EXT_INIT_MAX_LEN;
4248 else if (map->m_len > EXT_UNWRITTEN_MAX_LEN &&
4249 (flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
4250 map->m_len = EXT_UNWRITTEN_MAX_LEN;
4251
4252 /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
4253 newex.ee_len = cpu_to_le16(map->m_len);
4254 err = ext4_ext_check_overlap(sbi, inode, &newex, path);
4255 if (err)
4256 allocated = ext4_ext_get_actual_len(&newex);
4257 else
4258 allocated = map->m_len;
4259
4260 /* allocate new block */
4261 ar.inode = inode;
4262 ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
4263 ar.logical = map->m_lblk;
4264 /*
4265 * We calculate the offset from the beginning of the cluster
4266 * for the logical block number, since when we allocate a
4267 * physical cluster, the physical block should start at the
4268 * same offset from the beginning of the cluster. This is
4269 * needed so that future calls to get_implied_cluster_alloc()
4270 * work correctly.
4271 */
4272 offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4273 ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
4274 ar.goal -= offset;
4275 ar.logical -= offset;
4276 if (S_ISREG(inode->i_mode))
4277 ar.flags = EXT4_MB_HINT_DATA;
4278 else
4279 /* disable in-core preallocation for non-regular files */
4280 ar.flags = 0;
4281 if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
4282 ar.flags |= EXT4_MB_HINT_NOPREALLOC;
4283 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
4284 ar.flags |= EXT4_MB_DELALLOC_RESERVED;
4285 if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
4286 ar.flags |= EXT4_MB_USE_RESERVED;
4287 newblock = ext4_mb_new_blocks(handle, &ar, &err);
4288 if (!newblock)
4289 goto out;
4290 allocated_clusters = ar.len;
4291 ar.len = EXT4_C2B(sbi, ar.len) - offset;
4292 ext_debug(inode, "allocate new block: goal %llu, found %llu/%u, requested %u\n",
4293 ar.goal, newblock, ar.len, allocated);
4294 if (ar.len > allocated)
4295 ar.len = allocated;
4296
4297 got_allocated_blocks:
4298 /* try to insert new extent into found leaf and return */
4299 pblk = newblock + offset;
4300 ext4_ext_store_pblock(&newex, pblk);
4301 newex.ee_len = cpu_to_le16(ar.len);
4302 /* Mark unwritten */
4303 if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) {
4304 ext4_ext_mark_unwritten(&newex);
4305 map->m_flags |= EXT4_MAP_UNWRITTEN;
4306 }
4307
4308 err = ext4_ext_insert_extent(handle, inode, &path, &newex, flags);
4309 if (err) {
4310 if (allocated_clusters) {
4311 int fb_flags = 0;
4312
4313 /*
4314 * free data blocks we just allocated.
4315 * not a good idea to call discard here directly,
4316 * but otherwise we'd need to call it every free().
4317 */
4318 ext4_discard_preallocations(inode, 0);
4319 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
4320 fb_flags = EXT4_FREE_BLOCKS_NO_QUOT_UPDATE;
4321 ext4_free_blocks(handle, inode, NULL, newblock,
4322 EXT4_C2B(sbi, allocated_clusters),
4323 fb_flags);
4324 }
4325 goto out;
4326 }
4327
4328 /*
4329 * Reduce the reserved cluster count to reflect successful deferred
4330 * allocation of delayed allocated clusters or direct allocation of
4331 * clusters discovered to be delayed allocated. Once allocated, a
4332 * cluster is not included in the reserved count.
4333 */
4334 if (test_opt(inode->i_sb, DELALLOC) && allocated_clusters) {
4335 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
4336 /*
4337 * When allocating delayed allocated clusters, simply
4338 * reduce the reserved cluster count and claim quota
4339 */
4340 ext4_da_update_reserve_space(inode, allocated_clusters,
4341 1);
4342 } else {
4343 ext4_lblk_t lblk, len;
4344 unsigned int n;
4345
4346 /*
4347 * When allocating non-delayed allocated clusters
4348 * (from fallocate, filemap, DIO, or clusters
4349 * allocated when delalloc has been disabled by
4350 * ext4_nonda_switch), reduce the reserved cluster
4351 * count by the number of allocated clusters that
4352 * have previously been delayed allocated. Quota
4353 * has been claimed by ext4_mb_new_blocks() above,
4354 * so release the quota reservations made for any
4355 * previously delayed allocated clusters.
4356 */
4357 lblk = EXT4_LBLK_CMASK(sbi, map->m_lblk);
4358 len = allocated_clusters << sbi->s_cluster_bits;
4359 n = ext4_es_delayed_clu(inode, lblk, len);
4360 if (n > 0)
4361 ext4_da_update_reserve_space(inode, (int) n, 0);
4362 }
4363 }
4364
4365 /*
4366 * Cache the extent and update transaction to commit on fdatasync only
4367 * when it is _not_ an unwritten extent.
4368 */
4369 if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0)
4370 ext4_update_inode_fsync_trans(handle, inode, 1);
4371 else
4372 ext4_update_inode_fsync_trans(handle, inode, 0);
4373
4374 map->m_flags |= (EXT4_MAP_NEW | EXT4_MAP_MAPPED);
4375 map->m_pblk = pblk;
4376 map->m_len = ar.len;
4377 allocated = map->m_len;
4378 ext4_ext_show_leaf(inode, path);
4379 out:
4380 ext4_free_ext_path(path);
4381
4382 trace_ext4_ext_map_blocks_exit(inode, flags, map,
4383 err ? err : allocated);
4384 return err ? err : allocated;
4385 }
4386
ext4_ext_truncate(handle_t * handle,struct inode * inode)4387 int ext4_ext_truncate(handle_t *handle, struct inode *inode)
4388 {
4389 struct super_block *sb = inode->i_sb;
4390 ext4_lblk_t last_block;
4391 int err = 0;
4392
4393 /*
4394 * TODO: optimization is possible here.
4395 * Probably we need not scan at all,
4396 * because page truncation is enough.
4397 */
4398
4399 /* we have to know where to truncate from in crash case */
4400 EXT4_I(inode)->i_disksize = inode->i_size;
4401 err = ext4_mark_inode_dirty(handle, inode);
4402 if (err)
4403 return err;
4404
4405 last_block = (inode->i_size + sb->s_blocksize - 1)
4406 >> EXT4_BLOCK_SIZE_BITS(sb);
4407 retry:
4408 err = ext4_es_remove_extent(inode, last_block,
4409 EXT_MAX_BLOCKS - last_block);
4410 if (err == -ENOMEM) {
4411 memalloc_retry_wait(GFP_ATOMIC);
4412 goto retry;
4413 }
4414 if (err)
4415 return err;
4416 retry_remove_space:
4417 err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
4418 if (err == -ENOMEM) {
4419 memalloc_retry_wait(GFP_ATOMIC);
4420 goto retry_remove_space;
4421 }
4422 return err;
4423 }
4424
ext4_alloc_file_blocks(struct file * file,ext4_lblk_t offset,ext4_lblk_t len,loff_t new_size,int flags)4425 static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
4426 ext4_lblk_t len, loff_t new_size,
4427 int flags)
4428 {
4429 struct inode *inode = file_inode(file);
4430 handle_t *handle;
4431 int ret = 0, ret2 = 0, ret3 = 0;
4432 int retries = 0;
4433 int depth = 0;
4434 struct ext4_map_blocks map;
4435 unsigned int credits;
4436 loff_t epos;
4437
4438 BUG_ON(!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS));
4439 map.m_lblk = offset;
4440 map.m_len = len;
4441 /*
4442 * Don't normalize the request if it can fit in one extent so
4443 * that it doesn't get unnecessarily split into multiple
4444 * extents.
4445 */
4446 if (len <= EXT_UNWRITTEN_MAX_LEN)
4447 flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
4448
4449 /*
4450 * credits to insert 1 extent into extent tree
4451 */
4452 credits = ext4_chunk_trans_blocks(inode, len);
4453 depth = ext_depth(inode);
4454
4455 retry:
4456 while (len) {
4457 /*
4458 * Recalculate credits when extent tree depth changes.
4459 */
4460 if (depth != ext_depth(inode)) {
4461 credits = ext4_chunk_trans_blocks(inode, len);
4462 depth = ext_depth(inode);
4463 }
4464
4465 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
4466 credits);
4467 if (IS_ERR(handle)) {
4468 ret = PTR_ERR(handle);
4469 break;
4470 }
4471 ret = ext4_map_blocks(handle, inode, &map, flags);
4472 if (ret <= 0) {
4473 ext4_debug("inode #%lu: block %u: len %u: "
4474 "ext4_ext_map_blocks returned %d",
4475 inode->i_ino, map.m_lblk,
4476 map.m_len, ret);
4477 ext4_mark_inode_dirty(handle, inode);
4478 ext4_journal_stop(handle);
4479 break;
4480 }
4481 /*
4482 * allow a full retry cycle for any remaining allocations
4483 */
4484 retries = 0;
4485 map.m_lblk += ret;
4486 map.m_len = len = len - ret;
4487 epos = (loff_t)map.m_lblk << inode->i_blkbits;
4488 inode->i_ctime = current_time(inode);
4489 if (new_size) {
4490 if (epos > new_size)
4491 epos = new_size;
4492 if (ext4_update_inode_size(inode, epos) & 0x1)
4493 inode->i_mtime = inode->i_ctime;
4494 }
4495 ret2 = ext4_mark_inode_dirty(handle, inode);
4496 ext4_update_inode_fsync_trans(handle, inode, 1);
4497 ret3 = ext4_journal_stop(handle);
4498 ret2 = ret3 ? ret3 : ret2;
4499 if (unlikely(ret2))
4500 break;
4501 }
4502 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
4503 goto retry;
4504
4505 return ret > 0 ? ret2 : ret;
4506 }
4507
4508 static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len);
4509
4510 static int ext4_insert_range(struct file *file, loff_t offset, loff_t len);
4511
ext4_zero_range(struct file * file,loff_t offset,loff_t len,int mode)4512 static long ext4_zero_range(struct file *file, loff_t offset,
4513 loff_t len, int mode)
4514 {
4515 struct inode *inode = file_inode(file);
4516 struct address_space *mapping = file->f_mapping;
4517 handle_t *handle = NULL;
4518 unsigned int max_blocks;
4519 loff_t new_size = 0;
4520 int ret = 0;
4521 int flags;
4522 int credits;
4523 int partial_begin, partial_end;
4524 loff_t start, end;
4525 ext4_lblk_t lblk;
4526 unsigned int blkbits = inode->i_blkbits;
4527
4528 trace_ext4_zero_range(inode, offset, len, mode);
4529
4530 /* Call ext4_force_commit to flush all data in case of data=journal. */
4531 if (ext4_should_journal_data(inode)) {
4532 ret = ext4_force_commit(inode->i_sb);
4533 if (ret)
4534 return ret;
4535 }
4536
4537 /*
4538 * Round up offset. This is not fallocate, we need to zero out
4539 * blocks, so convert interior block aligned part of the range to
4540 * unwritten and possibly manually zero out unaligned parts of the
4541 * range.
4542 */
4543 start = round_up(offset, 1 << blkbits);
4544 end = round_down((offset + len), 1 << blkbits);
4545
4546 if (start < offset || end > offset + len)
4547 return -EINVAL;
4548 partial_begin = offset & ((1 << blkbits) - 1);
4549 partial_end = (offset + len) & ((1 << blkbits) - 1);
4550
4551 lblk = start >> blkbits;
4552 max_blocks = (end >> blkbits);
4553 if (max_blocks < lblk)
4554 max_blocks = 0;
4555 else
4556 max_blocks -= lblk;
4557
4558 inode_lock(inode);
4559
4560 /*
4561 * Indirect files do not support unwritten extents
4562 */
4563 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4564 ret = -EOPNOTSUPP;
4565 goto out_mutex;
4566 }
4567
4568 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
4569 (offset + len > inode->i_size ||
4570 offset + len > EXT4_I(inode)->i_disksize)) {
4571 new_size = offset + len;
4572 ret = inode_newsize_ok(inode, new_size);
4573 if (ret)
4574 goto out_mutex;
4575 }
4576
4577 flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
4578
4579 /* Wait all existing dio workers, newcomers will block on i_rwsem */
4580 inode_dio_wait(inode);
4581
4582 ret = file_modified(file);
4583 if (ret)
4584 goto out_mutex;
4585
4586 /* Preallocate the range including the unaligned edges */
4587 if (partial_begin || partial_end) {
4588 ret = ext4_alloc_file_blocks(file,
4589 round_down(offset, 1 << blkbits) >> blkbits,
4590 (round_up((offset + len), 1 << blkbits) -
4591 round_down(offset, 1 << blkbits)) >> blkbits,
4592 new_size, flags);
4593 if (ret)
4594 goto out_mutex;
4595
4596 }
4597
4598 /* Zero range excluding the unaligned edges */
4599 if (max_blocks > 0) {
4600 flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
4601 EXT4_EX_NOCACHE);
4602
4603 /*
4604 * Prevent page faults from reinstantiating pages we have
4605 * released from page cache.
4606 */
4607 filemap_invalidate_lock(mapping);
4608
4609 ret = ext4_break_layouts(inode);
4610 if (ret) {
4611 filemap_invalidate_unlock(mapping);
4612 goto out_mutex;
4613 }
4614
4615 ret = ext4_update_disksize_before_punch(inode, offset, len);
4616 if (ret) {
4617 filemap_invalidate_unlock(mapping);
4618 goto out_mutex;
4619 }
4620 /* Now release the pages and zero block aligned part of pages */
4621 truncate_pagecache_range(inode, start, end - 1);
4622 inode->i_mtime = inode->i_ctime = current_time(inode);
4623
4624 ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
4625 flags);
4626 filemap_invalidate_unlock(mapping);
4627 if (ret)
4628 goto out_mutex;
4629 }
4630 if (!partial_begin && !partial_end)
4631 goto out_mutex;
4632
4633 /*
4634 * In worst case we have to writeout two nonadjacent unwritten
4635 * blocks and update the inode
4636 */
4637 credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1;
4638 if (ext4_should_journal_data(inode))
4639 credits += 2;
4640 handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
4641 if (IS_ERR(handle)) {
4642 ret = PTR_ERR(handle);
4643 ext4_std_error(inode->i_sb, ret);
4644 goto out_mutex;
4645 }
4646
4647 inode->i_mtime = inode->i_ctime = current_time(inode);
4648 if (new_size)
4649 ext4_update_inode_size(inode, new_size);
4650 ret = ext4_mark_inode_dirty(handle, inode);
4651 if (unlikely(ret))
4652 goto out_handle;
4653 /* Zero out partial block at the edges of the range */
4654 ret = ext4_zero_partial_blocks(handle, inode, offset, len);
4655 if (ret >= 0)
4656 ext4_update_inode_fsync_trans(handle, inode, 1);
4657
4658 if (file->f_flags & O_SYNC)
4659 ext4_handle_sync(handle);
4660
4661 out_handle:
4662 ext4_journal_stop(handle);
4663 out_mutex:
4664 inode_unlock(inode);
4665 return ret;
4666 }
4667
4668 /*
4669 * preallocate space for a file. This implements ext4's fallocate file
4670 * operation, which gets called from sys_fallocate system call.
4671 * For block-mapped files, posix_fallocate should fall back to the method
4672 * of writing zeroes to the required new blocks (the same behavior which is
4673 * expected for file systems which do not support fallocate() system call).
4674 */
ext4_fallocate(struct file * file,int mode,loff_t offset,loff_t len)4675 long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4676 {
4677 struct inode *inode = file_inode(file);
4678 loff_t new_size = 0;
4679 unsigned int max_blocks;
4680 int ret = 0;
4681 int flags;
4682 ext4_lblk_t lblk;
4683 unsigned int blkbits = inode->i_blkbits;
4684
4685 /*
4686 * Encrypted inodes can't handle collapse range or insert
4687 * range since we would need to re-encrypt blocks with a
4688 * different IV or XTS tweak (which are based on the logical
4689 * block number).
4690 */
4691 if (IS_ENCRYPTED(inode) &&
4692 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
4693 return -EOPNOTSUPP;
4694
4695 /* Return error if mode is not supported */
4696 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
4697 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
4698 FALLOC_FL_INSERT_RANGE))
4699 return -EOPNOTSUPP;
4700
4701 inode_lock(inode);
4702 ret = ext4_convert_inline_data(inode);
4703 inode_unlock(inode);
4704 if (ret)
4705 goto exit;
4706
4707 if (mode & FALLOC_FL_PUNCH_HOLE) {
4708 ret = ext4_punch_hole(file, offset, len);
4709 goto exit;
4710 }
4711
4712 if (mode & FALLOC_FL_COLLAPSE_RANGE) {
4713 ret = ext4_collapse_range(file, offset, len);
4714 goto exit;
4715 }
4716
4717 if (mode & FALLOC_FL_INSERT_RANGE) {
4718 ret = ext4_insert_range(file, offset, len);
4719 goto exit;
4720 }
4721
4722 if (mode & FALLOC_FL_ZERO_RANGE) {
4723 ret = ext4_zero_range(file, offset, len, mode);
4724 goto exit;
4725 }
4726 trace_ext4_fallocate_enter(inode, offset, len, mode);
4727 lblk = offset >> blkbits;
4728
4729 max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
4730 flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
4731
4732 inode_lock(inode);
4733
4734 /*
4735 * We only support preallocation for extent-based files only
4736 */
4737 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4738 ret = -EOPNOTSUPP;
4739 goto out;
4740 }
4741
4742 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
4743 (offset + len > inode->i_size ||
4744 offset + len > EXT4_I(inode)->i_disksize)) {
4745 new_size = offset + len;
4746 ret = inode_newsize_ok(inode, new_size);
4747 if (ret)
4748 goto out;
4749 }
4750
4751 /* Wait all existing dio workers, newcomers will block on i_rwsem */
4752 inode_dio_wait(inode);
4753
4754 ret = file_modified(file);
4755 if (ret)
4756 goto out;
4757
4758 ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags);
4759 if (ret)
4760 goto out;
4761
4762 if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) {
4763 ret = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
4764 EXT4_I(inode)->i_sync_tid);
4765 }
4766 out:
4767 inode_unlock(inode);
4768 trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
4769 exit:
4770 return ret;
4771 }
4772
4773 /*
4774 * This function convert a range of blocks to written extents
4775 * The caller of this function will pass the start offset and the size.
4776 * all unwritten extents within this range will be converted to
4777 * written extents.
4778 *
4779 * This function is called from the direct IO end io call back
4780 * function, to convert the fallocated extents after IO is completed.
4781 * Returns 0 on success.
4782 */
ext4_convert_unwritten_extents(handle_t * handle,struct inode * inode,loff_t offset,ssize_t len)4783 int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
4784 loff_t offset, ssize_t len)
4785 {
4786 unsigned int max_blocks;
4787 int ret = 0, ret2 = 0, ret3 = 0;
4788 struct ext4_map_blocks map;
4789 unsigned int blkbits = inode->i_blkbits;
4790 unsigned int credits = 0;
4791
4792 map.m_lblk = offset >> blkbits;
4793 max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
4794
4795 if (!handle) {
4796 /*
4797 * credits to insert 1 extent into extent tree
4798 */
4799 credits = ext4_chunk_trans_blocks(inode, max_blocks);
4800 }
4801 while (ret >= 0 && ret < max_blocks) {
4802 map.m_lblk += ret;
4803 map.m_len = (max_blocks -= ret);
4804 if (credits) {
4805 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
4806 credits);
4807 if (IS_ERR(handle)) {
4808 ret = PTR_ERR(handle);
4809 break;
4810 }
4811 }
4812 ret = ext4_map_blocks(handle, inode, &map,
4813 EXT4_GET_BLOCKS_IO_CONVERT_EXT);
4814 if (ret <= 0)
4815 ext4_warning(inode->i_sb,
4816 "inode #%lu: block %u: len %u: "
4817 "ext4_ext_map_blocks returned %d",
4818 inode->i_ino, map.m_lblk,
4819 map.m_len, ret);
4820 ret2 = ext4_mark_inode_dirty(handle, inode);
4821 if (credits) {
4822 ret3 = ext4_journal_stop(handle);
4823 if (unlikely(ret3))
4824 ret2 = ret3;
4825 }
4826
4827 if (ret <= 0 || ret2)
4828 break;
4829 }
4830 return ret > 0 ? ret2 : ret;
4831 }
4832
ext4_convert_unwritten_io_end_vec(handle_t * handle,ext4_io_end_t * io_end)4833 int ext4_convert_unwritten_io_end_vec(handle_t *handle, ext4_io_end_t *io_end)
4834 {
4835 int ret = 0, err = 0;
4836 struct ext4_io_end_vec *io_end_vec;
4837
4838 /*
4839 * This is somewhat ugly but the idea is clear: When transaction is
4840 * reserved, everything goes into it. Otherwise we rather start several
4841 * smaller transactions for conversion of each extent separately.
4842 */
4843 if (handle) {
4844 handle = ext4_journal_start_reserved(handle,
4845 EXT4_HT_EXT_CONVERT);
4846 if (IS_ERR(handle))
4847 return PTR_ERR(handle);
4848 }
4849
4850 list_for_each_entry(io_end_vec, &io_end->list_vec, list) {
4851 ret = ext4_convert_unwritten_extents(handle, io_end->inode,
4852 io_end_vec->offset,
4853 io_end_vec->size);
4854 if (ret)
4855 break;
4856 }
4857
4858 if (handle)
4859 err = ext4_journal_stop(handle);
4860
4861 return ret < 0 ? ret : err;
4862 }
4863
ext4_iomap_xattr_fiemap(struct inode * inode,struct iomap * iomap)4864 static int ext4_iomap_xattr_fiemap(struct inode *inode, struct iomap *iomap)
4865 {
4866 __u64 physical = 0;
4867 __u64 length = 0;
4868 int blockbits = inode->i_sb->s_blocksize_bits;
4869 int error = 0;
4870 u16 iomap_type;
4871
4872 /* in-inode? */
4873 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
4874 struct ext4_iloc iloc;
4875 int offset; /* offset of xattr in inode */
4876
4877 error = ext4_get_inode_loc(inode, &iloc);
4878 if (error)
4879 return error;
4880 physical = (__u64)iloc.bh->b_blocknr << blockbits;
4881 offset = EXT4_GOOD_OLD_INODE_SIZE +
4882 EXT4_I(inode)->i_extra_isize;
4883 physical += offset;
4884 length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
4885 brelse(iloc.bh);
4886 iomap_type = IOMAP_INLINE;
4887 } else if (EXT4_I(inode)->i_file_acl) { /* external block */
4888 physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits;
4889 length = inode->i_sb->s_blocksize;
4890 iomap_type = IOMAP_MAPPED;
4891 } else {
4892 /* no in-inode or external block for xattr, so return -ENOENT */
4893 error = -ENOENT;
4894 goto out;
4895 }
4896
4897 iomap->addr = physical;
4898 iomap->offset = 0;
4899 iomap->length = length;
4900 iomap->type = iomap_type;
4901 iomap->flags = 0;
4902 out:
4903 return error;
4904 }
4905
ext4_iomap_xattr_begin(struct inode * inode,loff_t offset,loff_t length,unsigned flags,struct iomap * iomap,struct iomap * srcmap)4906 static int ext4_iomap_xattr_begin(struct inode *inode, loff_t offset,
4907 loff_t length, unsigned flags,
4908 struct iomap *iomap, struct iomap *srcmap)
4909 {
4910 int error;
4911
4912 error = ext4_iomap_xattr_fiemap(inode, iomap);
4913 if (error == 0 && (offset >= iomap->length))
4914 error = -ENOENT;
4915 return error;
4916 }
4917
4918 static const struct iomap_ops ext4_iomap_xattr_ops = {
4919 .iomap_begin = ext4_iomap_xattr_begin,
4920 };
4921
ext4_fiemap_check_ranges(struct inode * inode,u64 start,u64 * len)4922 static int ext4_fiemap_check_ranges(struct inode *inode, u64 start, u64 *len)
4923 {
4924 u64 maxbytes;
4925
4926 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4927 maxbytes = inode->i_sb->s_maxbytes;
4928 else
4929 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
4930
4931 if (*len == 0)
4932 return -EINVAL;
4933 if (start > maxbytes)
4934 return -EFBIG;
4935
4936 /*
4937 * Shrink request scope to what the fs can actually handle.
4938 */
4939 if (*len > maxbytes || (maxbytes - *len) < start)
4940 *len = maxbytes - start;
4941 return 0;
4942 }
4943
ext4_fiemap(struct inode * inode,struct fiemap_extent_info * fieinfo,u64 start,u64 len)4944 int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4945 u64 start, u64 len)
4946 {
4947 int error = 0;
4948
4949 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
4950 error = ext4_ext_precache(inode);
4951 if (error)
4952 return error;
4953 fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE;
4954 }
4955
4956 /*
4957 * For bitmap files the maximum size limit could be smaller than
4958 * s_maxbytes, so check len here manually instead of just relying on the
4959 * generic check.
4960 */
4961 error = ext4_fiemap_check_ranges(inode, start, &len);
4962 if (error)
4963 return error;
4964
4965 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
4966 fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR;
4967 return iomap_fiemap(inode, fieinfo, start, len,
4968 &ext4_iomap_xattr_ops);
4969 }
4970
4971 return iomap_fiemap(inode, fieinfo, start, len, &ext4_iomap_report_ops);
4972 }
4973
ext4_get_es_cache(struct inode * inode,struct fiemap_extent_info * fieinfo,__u64 start,__u64 len)4974 int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo,
4975 __u64 start, __u64 len)
4976 {
4977 ext4_lblk_t start_blk, len_blks;
4978 __u64 last_blk;
4979 int error = 0;
4980
4981 if (ext4_has_inline_data(inode)) {
4982 int has_inline;
4983
4984 down_read(&EXT4_I(inode)->xattr_sem);
4985 has_inline = ext4_has_inline_data(inode);
4986 up_read(&EXT4_I(inode)->xattr_sem);
4987 if (has_inline)
4988 return 0;
4989 }
4990
4991 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
4992 error = ext4_ext_precache(inode);
4993 if (error)
4994 return error;
4995 fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE;
4996 }
4997
4998 error = fiemap_prep(inode, fieinfo, start, &len, 0);
4999 if (error)
5000 return error;
5001
5002 error = ext4_fiemap_check_ranges(inode, start, &len);
5003 if (error)
5004 return error;
5005
5006 start_blk = start >> inode->i_sb->s_blocksize_bits;
5007 last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
5008 if (last_blk >= EXT_MAX_BLOCKS)
5009 last_blk = EXT_MAX_BLOCKS-1;
5010 len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
5011
5012 /*
5013 * Walk the extent tree gathering extent information
5014 * and pushing extents back to the user.
5015 */
5016 return ext4_fill_es_cache_info(inode, start_blk, len_blks, fieinfo);
5017 }
5018
5019 /*
5020 * ext4_ext_shift_path_extents:
5021 * Shift the extents of a path structure lying between path[depth].p_ext
5022 * and EXT_LAST_EXTENT(path[depth].p_hdr), by @shift blocks. @SHIFT tells
5023 * if it is right shift or left shift operation.
5024 */
5025 static int
ext4_ext_shift_path_extents(struct ext4_ext_path * path,ext4_lblk_t shift,struct inode * inode,handle_t * handle,enum SHIFT_DIRECTION SHIFT)5026 ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
5027 struct inode *inode, handle_t *handle,
5028 enum SHIFT_DIRECTION SHIFT)
5029 {
5030 int depth, err = 0;
5031 struct ext4_extent *ex_start, *ex_last;
5032 bool update = false;
5033 int credits, restart_credits;
5034 depth = path->p_depth;
5035
5036 while (depth >= 0) {
5037 if (depth == path->p_depth) {
5038 ex_start = path[depth].p_ext;
5039 if (!ex_start)
5040 return -EFSCORRUPTED;
5041
5042 ex_last = EXT_LAST_EXTENT(path[depth].p_hdr);
5043 /* leaf + sb + inode */
5044 credits = 3;
5045 if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr)) {
5046 update = true;
5047 /* extent tree + sb + inode */
5048 credits = depth + 2;
5049 }
5050
5051 restart_credits = ext4_writepage_trans_blocks(inode);
5052 err = ext4_datasem_ensure_credits(handle, inode, credits,
5053 restart_credits, 0);
5054 if (err) {
5055 if (err > 0)
5056 err = -EAGAIN;
5057 goto out;
5058 }
5059
5060 err = ext4_ext_get_access(handle, inode, path + depth);
5061 if (err)
5062 goto out;
5063
5064 while (ex_start <= ex_last) {
5065 if (SHIFT == SHIFT_LEFT) {
5066 le32_add_cpu(&ex_start->ee_block,
5067 -shift);
5068 /* Try to merge to the left. */
5069 if ((ex_start >
5070 EXT_FIRST_EXTENT(path[depth].p_hdr))
5071 &&
5072 ext4_ext_try_to_merge_right(inode,
5073 path, ex_start - 1))
5074 ex_last--;
5075 else
5076 ex_start++;
5077 } else {
5078 le32_add_cpu(&ex_last->ee_block, shift);
5079 ext4_ext_try_to_merge_right(inode, path,
5080 ex_last);
5081 ex_last--;
5082 }
5083 }
5084 err = ext4_ext_dirty(handle, inode, path + depth);
5085 if (err)
5086 goto out;
5087
5088 if (--depth < 0 || !update)
5089 break;
5090 }
5091
5092 /* Update index too */
5093 err = ext4_ext_get_access(handle, inode, path + depth);
5094 if (err)
5095 goto out;
5096
5097 if (SHIFT == SHIFT_LEFT)
5098 le32_add_cpu(&path[depth].p_idx->ei_block, -shift);
5099 else
5100 le32_add_cpu(&path[depth].p_idx->ei_block, shift);
5101 err = ext4_ext_dirty(handle, inode, path + depth);
5102 if (err)
5103 goto out;
5104
5105 /* we are done if current index is not a starting index */
5106 if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr))
5107 break;
5108
5109 depth--;
5110 }
5111
5112 out:
5113 return err;
5114 }
5115
5116 /*
5117 * ext4_ext_shift_extents:
5118 * All the extents which lies in the range from @start to the last allocated
5119 * block for the @inode are shifted either towards left or right (depending
5120 * upon @SHIFT) by @shift blocks.
5121 * On success, 0 is returned, error otherwise.
5122 */
5123 static int
ext4_ext_shift_extents(struct inode * inode,handle_t * handle,ext4_lblk_t start,ext4_lblk_t shift,enum SHIFT_DIRECTION SHIFT)5124 ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
5125 ext4_lblk_t start, ext4_lblk_t shift,
5126 enum SHIFT_DIRECTION SHIFT)
5127 {
5128 struct ext4_ext_path *path;
5129 int ret = 0, depth;
5130 struct ext4_extent *extent;
5131 ext4_lblk_t stop, *iterator, ex_start, ex_end;
5132 ext4_lblk_t tmp = EXT_MAX_BLOCKS;
5133
5134 /* Let path point to the last extent */
5135 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
5136 EXT4_EX_NOCACHE);
5137 if (IS_ERR(path))
5138 return PTR_ERR(path);
5139
5140 depth = path->p_depth;
5141 extent = path[depth].p_ext;
5142 if (!extent)
5143 goto out;
5144
5145 stop = le32_to_cpu(extent->ee_block);
5146
5147 /*
5148 * For left shifts, make sure the hole on the left is big enough to
5149 * accommodate the shift. For right shifts, make sure the last extent
5150 * won't be shifted beyond EXT_MAX_BLOCKS.
5151 */
5152 if (SHIFT == SHIFT_LEFT) {
5153 path = ext4_find_extent(inode, start - 1, &path,
5154 EXT4_EX_NOCACHE);
5155 if (IS_ERR(path))
5156 return PTR_ERR(path);
5157 depth = path->p_depth;
5158 extent = path[depth].p_ext;
5159 if (extent) {
5160 ex_start = le32_to_cpu(extent->ee_block);
5161 ex_end = le32_to_cpu(extent->ee_block) +
5162 ext4_ext_get_actual_len(extent);
5163 } else {
5164 ex_start = 0;
5165 ex_end = 0;
5166 }
5167
5168 if ((start == ex_start && shift > ex_start) ||
5169 (shift > start - ex_end)) {
5170 ret = -EINVAL;
5171 goto out;
5172 }
5173 } else {
5174 if (shift > EXT_MAX_BLOCKS -
5175 (stop + ext4_ext_get_actual_len(extent))) {
5176 ret = -EINVAL;
5177 goto out;
5178 }
5179 }
5180
5181 /*
5182 * In case of left shift, iterator points to start and it is increased
5183 * till we reach stop. In case of right shift, iterator points to stop
5184 * and it is decreased till we reach start.
5185 */
5186 again:
5187 ret = 0;
5188 if (SHIFT == SHIFT_LEFT)
5189 iterator = &start;
5190 else
5191 iterator = &stop;
5192
5193 if (tmp != EXT_MAX_BLOCKS)
5194 *iterator = tmp;
5195
5196 /*
5197 * Its safe to start updating extents. Start and stop are unsigned, so
5198 * in case of right shift if extent with 0 block is reached, iterator
5199 * becomes NULL to indicate the end of the loop.
5200 */
5201 while (iterator && start <= stop) {
5202 path = ext4_find_extent(inode, *iterator, &path,
5203 EXT4_EX_NOCACHE);
5204 if (IS_ERR(path))
5205 return PTR_ERR(path);
5206 depth = path->p_depth;
5207 extent = path[depth].p_ext;
5208 if (!extent) {
5209 EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
5210 (unsigned long) *iterator);
5211 return -EFSCORRUPTED;
5212 }
5213 if (SHIFT == SHIFT_LEFT && *iterator >
5214 le32_to_cpu(extent->ee_block)) {
5215 /* Hole, move to the next extent */
5216 if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) {
5217 path[depth].p_ext++;
5218 } else {
5219 *iterator = ext4_ext_next_allocated_block(path);
5220 continue;
5221 }
5222 }
5223
5224 tmp = *iterator;
5225 if (SHIFT == SHIFT_LEFT) {
5226 extent = EXT_LAST_EXTENT(path[depth].p_hdr);
5227 *iterator = le32_to_cpu(extent->ee_block) +
5228 ext4_ext_get_actual_len(extent);
5229 } else {
5230 extent = EXT_FIRST_EXTENT(path[depth].p_hdr);
5231 if (le32_to_cpu(extent->ee_block) > start)
5232 *iterator = le32_to_cpu(extent->ee_block) - 1;
5233 else if (le32_to_cpu(extent->ee_block) == start)
5234 iterator = NULL;
5235 else {
5236 extent = EXT_LAST_EXTENT(path[depth].p_hdr);
5237 while (le32_to_cpu(extent->ee_block) >= start)
5238 extent--;
5239
5240 if (extent == EXT_LAST_EXTENT(path[depth].p_hdr))
5241 break;
5242
5243 extent++;
5244 iterator = NULL;
5245 }
5246 path[depth].p_ext = extent;
5247 }
5248 ret = ext4_ext_shift_path_extents(path, shift, inode,
5249 handle, SHIFT);
5250 /* iterator can be NULL which means we should break */
5251 if (ret == -EAGAIN)
5252 goto again;
5253 if (ret)
5254 break;
5255 }
5256 out:
5257 ext4_free_ext_path(path);
5258 return ret;
5259 }
5260
5261 /*
5262 * ext4_collapse_range:
5263 * This implements the fallocate's collapse range functionality for ext4
5264 * Returns: 0 and non-zero on error.
5265 */
ext4_collapse_range(struct file * file,loff_t offset,loff_t len)5266 static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
5267 {
5268 struct inode *inode = file_inode(file);
5269 struct super_block *sb = inode->i_sb;
5270 struct address_space *mapping = inode->i_mapping;
5271 ext4_lblk_t punch_start, punch_stop;
5272 handle_t *handle;
5273 unsigned int credits;
5274 loff_t new_size, ioffset;
5275 int ret;
5276
5277 /*
5278 * We need to test this early because xfstests assumes that a
5279 * collapse range of (0, 1) will return EOPNOTSUPP if the file
5280 * system does not support collapse range.
5281 */
5282 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
5283 return -EOPNOTSUPP;
5284
5285 /* Collapse range works only on fs cluster size aligned regions. */
5286 if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
5287 return -EINVAL;
5288
5289 trace_ext4_collapse_range(inode, offset, len);
5290
5291 punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb);
5292 punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb);
5293
5294 /* Call ext4_force_commit to flush all data in case of data=journal. */
5295 if (ext4_should_journal_data(inode)) {
5296 ret = ext4_force_commit(inode->i_sb);
5297 if (ret)
5298 return ret;
5299 }
5300
5301 inode_lock(inode);
5302 /*
5303 * There is no need to overlap collapse range with EOF, in which case
5304 * it is effectively a truncate operation
5305 */
5306 if (offset + len >= inode->i_size) {
5307 ret = -EINVAL;
5308 goto out_mutex;
5309 }
5310
5311 /* Currently just for extent based files */
5312 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
5313 ret = -EOPNOTSUPP;
5314 goto out_mutex;
5315 }
5316
5317 /* Wait for existing dio to complete */
5318 inode_dio_wait(inode);
5319
5320 ret = file_modified(file);
5321 if (ret)
5322 goto out_mutex;
5323
5324 /*
5325 * Prevent page faults from reinstantiating pages we have released from
5326 * page cache.
5327 */
5328 filemap_invalidate_lock(mapping);
5329
5330 ret = ext4_break_layouts(inode);
5331 if (ret)
5332 goto out_mmap;
5333
5334 /*
5335 * Need to round down offset to be aligned with page size boundary
5336 * for page size > block size.
5337 */
5338 ioffset = round_down(offset, PAGE_SIZE);
5339 /*
5340 * Write tail of the last page before removed range since it will get
5341 * removed from the page cache below.
5342 */
5343 ret = filemap_write_and_wait_range(mapping, ioffset, offset);
5344 if (ret)
5345 goto out_mmap;
5346 /*
5347 * Write data that will be shifted to preserve them when discarding
5348 * page cache below. We are also protected from pages becoming dirty
5349 * by i_rwsem and invalidate_lock.
5350 */
5351 ret = filemap_write_and_wait_range(mapping, offset + len,
5352 LLONG_MAX);
5353 if (ret)
5354 goto out_mmap;
5355 truncate_pagecache(inode, ioffset);
5356
5357 credits = ext4_writepage_trans_blocks(inode);
5358 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
5359 if (IS_ERR(handle)) {
5360 ret = PTR_ERR(handle);
5361 goto out_mmap;
5362 }
5363 ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
5364
5365 down_write(&EXT4_I(inode)->i_data_sem);
5366 ext4_discard_preallocations(inode, 0);
5367
5368 ret = ext4_es_remove_extent(inode, punch_start,
5369 EXT_MAX_BLOCKS - punch_start);
5370 if (ret) {
5371 up_write(&EXT4_I(inode)->i_data_sem);
5372 goto out_stop;
5373 }
5374
5375 ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1);
5376 if (ret) {
5377 up_write(&EXT4_I(inode)->i_data_sem);
5378 goto out_stop;
5379 }
5380 ext4_discard_preallocations(inode, 0);
5381
5382 ret = ext4_ext_shift_extents(inode, handle, punch_stop,
5383 punch_stop - punch_start, SHIFT_LEFT);
5384 if (ret) {
5385 up_write(&EXT4_I(inode)->i_data_sem);
5386 goto out_stop;
5387 }
5388
5389 new_size = inode->i_size - len;
5390 i_size_write(inode, new_size);
5391 EXT4_I(inode)->i_disksize = new_size;
5392
5393 up_write(&EXT4_I(inode)->i_data_sem);
5394 if (IS_SYNC(inode))
5395 ext4_handle_sync(handle);
5396 inode->i_mtime = inode->i_ctime = current_time(inode);
5397 ret = ext4_mark_inode_dirty(handle, inode);
5398 ext4_update_inode_fsync_trans(handle, inode, 1);
5399
5400 out_stop:
5401 ext4_journal_stop(handle);
5402 out_mmap:
5403 filemap_invalidate_unlock(mapping);
5404 out_mutex:
5405 inode_unlock(inode);
5406 return ret;
5407 }
5408
5409 /*
5410 * ext4_insert_range:
5411 * This function implements the FALLOC_FL_INSERT_RANGE flag of fallocate.
5412 * The data blocks starting from @offset to the EOF are shifted by @len
5413 * towards right to create a hole in the @inode. Inode size is increased
5414 * by len bytes.
5415 * Returns 0 on success, error otherwise.
5416 */
ext4_insert_range(struct file * file,loff_t offset,loff_t len)5417 static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
5418 {
5419 struct inode *inode = file_inode(file);
5420 struct super_block *sb = inode->i_sb;
5421 struct address_space *mapping = inode->i_mapping;
5422 handle_t *handle;
5423 struct ext4_ext_path *path;
5424 struct ext4_extent *extent;
5425 ext4_lblk_t offset_lblk, len_lblk, ee_start_lblk = 0;
5426 unsigned int credits, ee_len;
5427 int ret = 0, depth, split_flag = 0;
5428 loff_t ioffset;
5429
5430 /*
5431 * We need to test this early because xfstests assumes that an
5432 * insert range of (0, 1) will return EOPNOTSUPP if the file
5433 * system does not support insert range.
5434 */
5435 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
5436 return -EOPNOTSUPP;
5437
5438 /* Insert range works only on fs cluster size aligned regions. */
5439 if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
5440 return -EINVAL;
5441
5442 trace_ext4_insert_range(inode, offset, len);
5443
5444 offset_lblk = offset >> EXT4_BLOCK_SIZE_BITS(sb);
5445 len_lblk = len >> EXT4_BLOCK_SIZE_BITS(sb);
5446
5447 /* Call ext4_force_commit to flush all data in case of data=journal */
5448 if (ext4_should_journal_data(inode)) {
5449 ret = ext4_force_commit(inode->i_sb);
5450 if (ret)
5451 return ret;
5452 }
5453
5454 inode_lock(inode);
5455 /* Currently just for extent based files */
5456 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
5457 ret = -EOPNOTSUPP;
5458 goto out_mutex;
5459 }
5460
5461 /* Check whether the maximum file size would be exceeded */
5462 if (len > inode->i_sb->s_maxbytes - inode->i_size) {
5463 ret = -EFBIG;
5464 goto out_mutex;
5465 }
5466
5467 /* Offset must be less than i_size */
5468 if (offset >= inode->i_size) {
5469 ret = -EINVAL;
5470 goto out_mutex;
5471 }
5472
5473 /* Wait for existing dio to complete */
5474 inode_dio_wait(inode);
5475
5476 ret = file_modified(file);
5477 if (ret)
5478 goto out_mutex;
5479
5480 /*
5481 * Prevent page faults from reinstantiating pages we have released from
5482 * page cache.
5483 */
5484 filemap_invalidate_lock(mapping);
5485
5486 ret = ext4_break_layouts(inode);
5487 if (ret)
5488 goto out_mmap;
5489
5490 /*
5491 * Need to round down to align start offset to page size boundary
5492 * for page size > block size.
5493 */
5494 ioffset = round_down(offset, PAGE_SIZE);
5495 /* Write out all dirty pages */
5496 ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
5497 LLONG_MAX);
5498 if (ret)
5499 goto out_mmap;
5500 truncate_pagecache(inode, ioffset);
5501
5502 credits = ext4_writepage_trans_blocks(inode);
5503 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
5504 if (IS_ERR(handle)) {
5505 ret = PTR_ERR(handle);
5506 goto out_mmap;
5507 }
5508 ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
5509
5510 /* Expand file to avoid data loss if there is error while shifting */
5511 inode->i_size += len;
5512 EXT4_I(inode)->i_disksize += len;
5513 inode->i_mtime = inode->i_ctime = current_time(inode);
5514 ret = ext4_mark_inode_dirty(handle, inode);
5515 if (ret)
5516 goto out_stop;
5517
5518 down_write(&EXT4_I(inode)->i_data_sem);
5519 ext4_discard_preallocations(inode, 0);
5520
5521 path = ext4_find_extent(inode, offset_lblk, NULL, 0);
5522 if (IS_ERR(path)) {
5523 up_write(&EXT4_I(inode)->i_data_sem);
5524 goto out_stop;
5525 }
5526
5527 depth = ext_depth(inode);
5528 extent = path[depth].p_ext;
5529 if (extent) {
5530 ee_start_lblk = le32_to_cpu(extent->ee_block);
5531 ee_len = ext4_ext_get_actual_len(extent);
5532
5533 /*
5534 * If offset_lblk is not the starting block of extent, split
5535 * the extent @offset_lblk
5536 */
5537 if ((offset_lblk > ee_start_lblk) &&
5538 (offset_lblk < (ee_start_lblk + ee_len))) {
5539 if (ext4_ext_is_unwritten(extent))
5540 split_flag = EXT4_EXT_MARK_UNWRIT1 |
5541 EXT4_EXT_MARK_UNWRIT2;
5542 ret = ext4_split_extent_at(handle, inode, &path,
5543 offset_lblk, split_flag,
5544 EXT4_EX_NOCACHE |
5545 EXT4_GET_BLOCKS_PRE_IO |
5546 EXT4_GET_BLOCKS_METADATA_NOFAIL);
5547 }
5548
5549 ext4_free_ext_path(path);
5550 if (ret < 0) {
5551 up_write(&EXT4_I(inode)->i_data_sem);
5552 goto out_stop;
5553 }
5554 } else {
5555 ext4_free_ext_path(path);
5556 }
5557
5558 ret = ext4_es_remove_extent(inode, offset_lblk,
5559 EXT_MAX_BLOCKS - offset_lblk);
5560 if (ret) {
5561 up_write(&EXT4_I(inode)->i_data_sem);
5562 goto out_stop;
5563 }
5564
5565 /*
5566 * if offset_lblk lies in a hole which is at start of file, use
5567 * ee_start_lblk to shift extents
5568 */
5569 ret = ext4_ext_shift_extents(inode, handle,
5570 ee_start_lblk > offset_lblk ? ee_start_lblk : offset_lblk,
5571 len_lblk, SHIFT_RIGHT);
5572
5573 up_write(&EXT4_I(inode)->i_data_sem);
5574 if (IS_SYNC(inode))
5575 ext4_handle_sync(handle);
5576 if (ret >= 0)
5577 ext4_update_inode_fsync_trans(handle, inode, 1);
5578
5579 out_stop:
5580 ext4_journal_stop(handle);
5581 out_mmap:
5582 filemap_invalidate_unlock(mapping);
5583 out_mutex:
5584 inode_unlock(inode);
5585 return ret;
5586 }
5587
5588 /**
5589 * ext4_swap_extents() - Swap extents between two inodes
5590 * @handle: handle for this transaction
5591 * @inode1: First inode
5592 * @inode2: Second inode
5593 * @lblk1: Start block for first inode
5594 * @lblk2: Start block for second inode
5595 * @count: Number of blocks to swap
5596 * @unwritten: Mark second inode's extents as unwritten after swap
5597 * @erp: Pointer to save error value
5598 *
5599 * This helper routine does exactly what is promise "swap extents". All other
5600 * stuff such as page-cache locking consistency, bh mapping consistency or
5601 * extent's data copying must be performed by caller.
5602 * Locking:
5603 * i_rwsem is held for both inodes
5604 * i_data_sem is locked for write for both inodes
5605 * Assumptions:
5606 * All pages from requested range are locked for both inodes
5607 */
5608 int
ext4_swap_extents(handle_t * handle,struct inode * inode1,struct inode * inode2,ext4_lblk_t lblk1,ext4_lblk_t lblk2,ext4_lblk_t count,int unwritten,int * erp)5609 ext4_swap_extents(handle_t *handle, struct inode *inode1,
5610 struct inode *inode2, ext4_lblk_t lblk1, ext4_lblk_t lblk2,
5611 ext4_lblk_t count, int unwritten, int *erp)
5612 {
5613 struct ext4_ext_path *path1 = NULL;
5614 struct ext4_ext_path *path2 = NULL;
5615 int replaced_count = 0;
5616
5617 BUG_ON(!rwsem_is_locked(&EXT4_I(inode1)->i_data_sem));
5618 BUG_ON(!rwsem_is_locked(&EXT4_I(inode2)->i_data_sem));
5619 BUG_ON(!inode_is_locked(inode1));
5620 BUG_ON(!inode_is_locked(inode2));
5621
5622 *erp = ext4_es_remove_extent(inode1, lblk1, count);
5623 if (unlikely(*erp))
5624 return 0;
5625 *erp = ext4_es_remove_extent(inode2, lblk2, count);
5626 if (unlikely(*erp))
5627 return 0;
5628
5629 while (count) {
5630 struct ext4_extent *ex1, *ex2, tmp_ex;
5631 ext4_lblk_t e1_blk, e2_blk;
5632 int e1_len, e2_len, len;
5633 int split = 0;
5634
5635 path1 = ext4_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE);
5636 if (IS_ERR(path1)) {
5637 *erp = PTR_ERR(path1);
5638 path1 = NULL;
5639 finish:
5640 count = 0;
5641 goto repeat;
5642 }
5643 path2 = ext4_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE);
5644 if (IS_ERR(path2)) {
5645 *erp = PTR_ERR(path2);
5646 path2 = NULL;
5647 goto finish;
5648 }
5649 ex1 = path1[path1->p_depth].p_ext;
5650 ex2 = path2[path2->p_depth].p_ext;
5651 /* Do we have something to swap ? */
5652 if (unlikely(!ex2 || !ex1))
5653 goto finish;
5654
5655 e1_blk = le32_to_cpu(ex1->ee_block);
5656 e2_blk = le32_to_cpu(ex2->ee_block);
5657 e1_len = ext4_ext_get_actual_len(ex1);
5658 e2_len = ext4_ext_get_actual_len(ex2);
5659
5660 /* Hole handling */
5661 if (!in_range(lblk1, e1_blk, e1_len) ||
5662 !in_range(lblk2, e2_blk, e2_len)) {
5663 ext4_lblk_t next1, next2;
5664
5665 /* if hole after extent, then go to next extent */
5666 next1 = ext4_ext_next_allocated_block(path1);
5667 next2 = ext4_ext_next_allocated_block(path2);
5668 /* If hole before extent, then shift to that extent */
5669 if (e1_blk > lblk1)
5670 next1 = e1_blk;
5671 if (e2_blk > lblk2)
5672 next2 = e2_blk;
5673 /* Do we have something to swap */
5674 if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS)
5675 goto finish;
5676 /* Move to the rightest boundary */
5677 len = next1 - lblk1;
5678 if (len < next2 - lblk2)
5679 len = next2 - lblk2;
5680 if (len > count)
5681 len = count;
5682 lblk1 += len;
5683 lblk2 += len;
5684 count -= len;
5685 goto repeat;
5686 }
5687
5688 /* Prepare left boundary */
5689 if (e1_blk < lblk1) {
5690 split = 1;
5691 *erp = ext4_force_split_extent_at(handle, inode1,
5692 &path1, lblk1, 0);
5693 if (unlikely(*erp))
5694 goto finish;
5695 }
5696 if (e2_blk < lblk2) {
5697 split = 1;
5698 *erp = ext4_force_split_extent_at(handle, inode2,
5699 &path2, lblk2, 0);
5700 if (unlikely(*erp))
5701 goto finish;
5702 }
5703 /* ext4_split_extent_at() may result in leaf extent split,
5704 * path must to be revalidated. */
5705 if (split)
5706 goto repeat;
5707
5708 /* Prepare right boundary */
5709 len = count;
5710 if (len > e1_blk + e1_len - lblk1)
5711 len = e1_blk + e1_len - lblk1;
5712 if (len > e2_blk + e2_len - lblk2)
5713 len = e2_blk + e2_len - lblk2;
5714
5715 if (len != e1_len) {
5716 split = 1;
5717 *erp = ext4_force_split_extent_at(handle, inode1,
5718 &path1, lblk1 + len, 0);
5719 if (unlikely(*erp))
5720 goto finish;
5721 }
5722 if (len != e2_len) {
5723 split = 1;
5724 *erp = ext4_force_split_extent_at(handle, inode2,
5725 &path2, lblk2 + len, 0);
5726 if (*erp)
5727 goto finish;
5728 }
5729 /* ext4_split_extent_at() may result in leaf extent split,
5730 * path must to be revalidated. */
5731 if (split)
5732 goto repeat;
5733
5734 BUG_ON(e2_len != e1_len);
5735 *erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth);
5736 if (unlikely(*erp))
5737 goto finish;
5738 *erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth);
5739 if (unlikely(*erp))
5740 goto finish;
5741
5742 /* Both extents are fully inside boundaries. Swap it now */
5743 tmp_ex = *ex1;
5744 ext4_ext_store_pblock(ex1, ext4_ext_pblock(ex2));
5745 ext4_ext_store_pblock(ex2, ext4_ext_pblock(&tmp_ex));
5746 ex1->ee_len = cpu_to_le16(e2_len);
5747 ex2->ee_len = cpu_to_le16(e1_len);
5748 if (unwritten)
5749 ext4_ext_mark_unwritten(ex2);
5750 if (ext4_ext_is_unwritten(&tmp_ex))
5751 ext4_ext_mark_unwritten(ex1);
5752
5753 ext4_ext_try_to_merge(handle, inode2, path2, ex2);
5754 ext4_ext_try_to_merge(handle, inode1, path1, ex1);
5755 *erp = ext4_ext_dirty(handle, inode2, path2 +
5756 path2->p_depth);
5757 if (unlikely(*erp))
5758 goto finish;
5759 *erp = ext4_ext_dirty(handle, inode1, path1 +
5760 path1->p_depth);
5761 /*
5762 * Looks scarry ah..? second inode already points to new blocks,
5763 * and it was successfully dirtied. But luckily error may happen
5764 * only due to journal error, so full transaction will be
5765 * aborted anyway.
5766 */
5767 if (unlikely(*erp))
5768 goto finish;
5769 lblk1 += len;
5770 lblk2 += len;
5771 replaced_count += len;
5772 count -= len;
5773
5774 repeat:
5775 ext4_free_ext_path(path1);
5776 ext4_free_ext_path(path2);
5777 path1 = path2 = NULL;
5778 }
5779 return replaced_count;
5780 }
5781
5782 /*
5783 * ext4_clu_mapped - determine whether any block in a logical cluster has
5784 * been mapped to a physical cluster
5785 *
5786 * @inode - file containing the logical cluster
5787 * @lclu - logical cluster of interest
5788 *
5789 * Returns 1 if any block in the logical cluster is mapped, signifying
5790 * that a physical cluster has been allocated for it. Otherwise,
5791 * returns 0. Can also return negative error codes. Derived from
5792 * ext4_ext_map_blocks().
5793 */
ext4_clu_mapped(struct inode * inode,ext4_lblk_t lclu)5794 int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu)
5795 {
5796 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5797 struct ext4_ext_path *path;
5798 int depth, mapped = 0, err = 0;
5799 struct ext4_extent *extent;
5800 ext4_lblk_t first_lblk, first_lclu, last_lclu;
5801
5802 /*
5803 * if data can be stored inline, the logical cluster isn't
5804 * mapped - no physical clusters have been allocated, and the
5805 * file has no extents
5806 */
5807 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
5808 return 0;
5809
5810 /* search for the extent closest to the first block in the cluster */
5811 path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0);
5812 if (IS_ERR(path)) {
5813 err = PTR_ERR(path);
5814 path = NULL;
5815 goto out;
5816 }
5817
5818 depth = ext_depth(inode);
5819
5820 /*
5821 * A consistent leaf must not be empty. This situation is possible,
5822 * though, _during_ tree modification, and it's why an assert can't
5823 * be put in ext4_find_extent().
5824 */
5825 if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
5826 EXT4_ERROR_INODE(inode,
5827 "bad extent address - lblock: %lu, depth: %d, pblock: %lld",
5828 (unsigned long) EXT4_C2B(sbi, lclu),
5829 depth, path[depth].p_block);
5830 err = -EFSCORRUPTED;
5831 goto out;
5832 }
5833
5834 extent = path[depth].p_ext;
5835
5836 /* can't be mapped if the extent tree is empty */
5837 if (extent == NULL)
5838 goto out;
5839
5840 first_lblk = le32_to_cpu(extent->ee_block);
5841 first_lclu = EXT4_B2C(sbi, first_lblk);
5842
5843 /*
5844 * Three possible outcomes at this point - found extent spanning
5845 * the target cluster, to the left of the target cluster, or to the
5846 * right of the target cluster. The first two cases are handled here.
5847 * The last case indicates the target cluster is not mapped.
5848 */
5849 if (lclu >= first_lclu) {
5850 last_lclu = EXT4_B2C(sbi, first_lblk +
5851 ext4_ext_get_actual_len(extent) - 1);
5852 if (lclu <= last_lclu) {
5853 mapped = 1;
5854 } else {
5855 first_lblk = ext4_ext_next_allocated_block(path);
5856 first_lclu = EXT4_B2C(sbi, first_lblk);
5857 if (lclu == first_lclu)
5858 mapped = 1;
5859 }
5860 }
5861
5862 out:
5863 ext4_free_ext_path(path);
5864
5865 return err ? err : mapped;
5866 }
5867
5868 /*
5869 * Updates physical block address and unwritten status of extent
5870 * starting at lblk start and of len. If such an extent doesn't exist,
5871 * this function splits the extent tree appropriately to create an
5872 * extent like this. This function is called in the fast commit
5873 * replay path. Returns 0 on success and error on failure.
5874 */
ext4_ext_replay_update_ex(struct inode * inode,ext4_lblk_t start,int len,int unwritten,ext4_fsblk_t pblk)5875 int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start,
5876 int len, int unwritten, ext4_fsblk_t pblk)
5877 {
5878 struct ext4_ext_path *path = NULL, *ppath;
5879 struct ext4_extent *ex;
5880 int ret;
5881
5882 path = ext4_find_extent(inode, start, NULL, 0);
5883 if (IS_ERR(path))
5884 return PTR_ERR(path);
5885 ex = path[path->p_depth].p_ext;
5886 if (!ex) {
5887 ret = -EFSCORRUPTED;
5888 goto out;
5889 }
5890
5891 if (le32_to_cpu(ex->ee_block) != start ||
5892 ext4_ext_get_actual_len(ex) != len) {
5893 /* We need to split this extent to match our extent first */
5894 ppath = path;
5895 down_write(&EXT4_I(inode)->i_data_sem);
5896 ret = ext4_force_split_extent_at(NULL, inode, &ppath, start, 1);
5897 up_write(&EXT4_I(inode)->i_data_sem);
5898 if (ret)
5899 goto out;
5900 kfree(path);
5901 path = ext4_find_extent(inode, start, NULL, 0);
5902 if (IS_ERR(path))
5903 return -1;
5904 ppath = path;
5905 ex = path[path->p_depth].p_ext;
5906 WARN_ON(le32_to_cpu(ex->ee_block) != start);
5907 if (ext4_ext_get_actual_len(ex) != len) {
5908 down_write(&EXT4_I(inode)->i_data_sem);
5909 ret = ext4_force_split_extent_at(NULL, inode, &ppath,
5910 start + len, 1);
5911 up_write(&EXT4_I(inode)->i_data_sem);
5912 if (ret)
5913 goto out;
5914 kfree(path);
5915 path = ext4_find_extent(inode, start, NULL, 0);
5916 if (IS_ERR(path))
5917 return -EINVAL;
5918 ex = path[path->p_depth].p_ext;
5919 }
5920 }
5921 if (unwritten)
5922 ext4_ext_mark_unwritten(ex);
5923 else
5924 ext4_ext_mark_initialized(ex);
5925 ext4_ext_store_pblock(ex, pblk);
5926 down_write(&EXT4_I(inode)->i_data_sem);
5927 ret = ext4_ext_dirty(NULL, inode, &path[path->p_depth]);
5928 up_write(&EXT4_I(inode)->i_data_sem);
5929 out:
5930 ext4_free_ext_path(path);
5931 ext4_mark_inode_dirty(NULL, inode);
5932 return ret;
5933 }
5934
5935 /* Try to shrink the extent tree */
ext4_ext_replay_shrink_inode(struct inode * inode,ext4_lblk_t end)5936 void ext4_ext_replay_shrink_inode(struct inode *inode, ext4_lblk_t end)
5937 {
5938 struct ext4_ext_path *path = NULL;
5939 struct ext4_extent *ex;
5940 ext4_lblk_t old_cur, cur = 0;
5941
5942 while (cur < end) {
5943 path = ext4_find_extent(inode, cur, NULL, 0);
5944 if (IS_ERR(path))
5945 return;
5946 ex = path[path->p_depth].p_ext;
5947 if (!ex) {
5948 ext4_free_ext_path(path);
5949 ext4_mark_inode_dirty(NULL, inode);
5950 return;
5951 }
5952 old_cur = cur;
5953 cur = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
5954 if (cur <= old_cur)
5955 cur = old_cur + 1;
5956 ext4_ext_try_to_merge(NULL, inode, path, ex);
5957 down_write(&EXT4_I(inode)->i_data_sem);
5958 ext4_ext_dirty(NULL, inode, &path[path->p_depth]);
5959 up_write(&EXT4_I(inode)->i_data_sem);
5960 ext4_mark_inode_dirty(NULL, inode);
5961 ext4_free_ext_path(path);
5962 }
5963 }
5964
5965 /* Check if *cur is a hole and if it is, skip it */
skip_hole(struct inode * inode,ext4_lblk_t * cur)5966 static int skip_hole(struct inode *inode, ext4_lblk_t *cur)
5967 {
5968 int ret;
5969 struct ext4_map_blocks map;
5970
5971 map.m_lblk = *cur;
5972 map.m_len = ((inode->i_size) >> inode->i_sb->s_blocksize_bits) - *cur;
5973
5974 ret = ext4_map_blocks(NULL, inode, &map, 0);
5975 if (ret < 0)
5976 return ret;
5977 if (ret != 0)
5978 return 0;
5979 *cur = *cur + map.m_len;
5980 return 0;
5981 }
5982
5983 /* Count number of blocks used by this inode and update i_blocks */
ext4_ext_replay_set_iblocks(struct inode * inode)5984 int ext4_ext_replay_set_iblocks(struct inode *inode)
5985 {
5986 struct ext4_ext_path *path = NULL, *path2 = NULL;
5987 struct ext4_extent *ex;
5988 ext4_lblk_t cur = 0, end;
5989 int numblks = 0, i, ret = 0;
5990 ext4_fsblk_t cmp1, cmp2;
5991 struct ext4_map_blocks map;
5992
5993 /* Determin the size of the file first */
5994 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
5995 EXT4_EX_NOCACHE);
5996 if (IS_ERR(path))
5997 return PTR_ERR(path);
5998 ex = path[path->p_depth].p_ext;
5999 if (!ex) {
6000 ext4_free_ext_path(path);
6001 goto out;
6002 }
6003 end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
6004 ext4_free_ext_path(path);
6005
6006 /* Count the number of data blocks */
6007 cur = 0;
6008 while (cur < end) {
6009 map.m_lblk = cur;
6010 map.m_len = end - cur;
6011 ret = ext4_map_blocks(NULL, inode, &map, 0);
6012 if (ret < 0)
6013 break;
6014 if (ret > 0)
6015 numblks += ret;
6016 cur = cur + map.m_len;
6017 }
6018
6019 /*
6020 * Count the number of extent tree blocks. We do it by looking up
6021 * two successive extents and determining the difference between
6022 * their paths. When path is different for 2 successive extents
6023 * we compare the blocks in the path at each level and increment
6024 * iblocks by total number of differences found.
6025 */
6026 cur = 0;
6027 ret = skip_hole(inode, &cur);
6028 if (ret < 0)
6029 goto out;
6030 path = ext4_find_extent(inode, cur, NULL, 0);
6031 if (IS_ERR(path))
6032 goto out;
6033 numblks += path->p_depth;
6034 ext4_free_ext_path(path);
6035 while (cur < end) {
6036 path = ext4_find_extent(inode, cur, NULL, 0);
6037 if (IS_ERR(path))
6038 break;
6039 ex = path[path->p_depth].p_ext;
6040 if (!ex) {
6041 ext4_free_ext_path(path);
6042 return 0;
6043 }
6044 cur = max(cur + 1, le32_to_cpu(ex->ee_block) +
6045 ext4_ext_get_actual_len(ex));
6046 ret = skip_hole(inode, &cur);
6047 if (ret < 0) {
6048 ext4_free_ext_path(path);
6049 break;
6050 }
6051 path2 = ext4_find_extent(inode, cur, NULL, 0);
6052 if (IS_ERR(path2)) {
6053 ext4_free_ext_path(path);
6054 break;
6055 }
6056 for (i = 0; i <= max(path->p_depth, path2->p_depth); i++) {
6057 cmp1 = cmp2 = 0;
6058 if (i <= path->p_depth)
6059 cmp1 = path[i].p_bh ?
6060 path[i].p_bh->b_blocknr : 0;
6061 if (i <= path2->p_depth)
6062 cmp2 = path2[i].p_bh ?
6063 path2[i].p_bh->b_blocknr : 0;
6064 if (cmp1 != cmp2 && cmp2 != 0)
6065 numblks++;
6066 }
6067 ext4_free_ext_path(path);
6068 ext4_free_ext_path(path2);
6069 }
6070
6071 out:
6072 inode->i_blocks = numblks << (inode->i_sb->s_blocksize_bits - 9);
6073 ext4_mark_inode_dirty(NULL, inode);
6074 return 0;
6075 }
6076
ext4_ext_clear_bb(struct inode * inode)6077 int ext4_ext_clear_bb(struct inode *inode)
6078 {
6079 struct ext4_ext_path *path = NULL;
6080 struct ext4_extent *ex;
6081 ext4_lblk_t cur = 0, end;
6082 int j, ret = 0;
6083 struct ext4_map_blocks map;
6084
6085 if (ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA))
6086 return 0;
6087
6088 /* Determin the size of the file first */
6089 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
6090 EXT4_EX_NOCACHE);
6091 if (IS_ERR(path))
6092 return PTR_ERR(path);
6093 ex = path[path->p_depth].p_ext;
6094 if (!ex) {
6095 ext4_free_ext_path(path);
6096 return 0;
6097 }
6098 end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
6099 ext4_free_ext_path(path);
6100
6101 cur = 0;
6102 while (cur < end) {
6103 map.m_lblk = cur;
6104 map.m_len = end - cur;
6105 ret = ext4_map_blocks(NULL, inode, &map, 0);
6106 if (ret < 0)
6107 break;
6108 if (ret > 0) {
6109 path = ext4_find_extent(inode, map.m_lblk, NULL, 0);
6110 if (!IS_ERR_OR_NULL(path)) {
6111 for (j = 0; j < path->p_depth; j++) {
6112
6113 ext4_mb_mark_bb(inode->i_sb,
6114 path[j].p_block, 1, 0);
6115 ext4_fc_record_regions(inode->i_sb, inode->i_ino,
6116 0, path[j].p_block, 1, 1);
6117 }
6118 ext4_free_ext_path(path);
6119 }
6120 ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, 0);
6121 ext4_fc_record_regions(inode->i_sb, inode->i_ino,
6122 map.m_lblk, map.m_pblk, map.m_len, 1);
6123 }
6124 cur = cur + map.m_len;
6125 }
6126
6127 return 0;
6128 }
6129