1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2017 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/types.h>
7 #include "btrfs-tests.h"
8 #include "../ctree.h"
9 #include "../btrfs_inode.h"
10 #include "../volumes.h"
11 #include "../disk-io.h"
12 #include "../block-group.h"
13 
free_extent_map_tree(struct extent_map_tree * em_tree)14 static void free_extent_map_tree(struct extent_map_tree *em_tree)
15 {
16 	struct extent_map *em;
17 	struct rb_node *node;
18 
19 	write_lock(&em_tree->lock);
20 	while (!RB_EMPTY_ROOT(&em_tree->map.rb_root)) {
21 		node = rb_first_cached(&em_tree->map);
22 		em = rb_entry(node, struct extent_map, rb_node);
23 		remove_extent_mapping(em_tree, em);
24 
25 #ifdef CONFIG_BTRFS_DEBUG
26 		if (refcount_read(&em->refs) != 1) {
27 			test_err(
28 "em leak: em (start 0x%llx len 0x%llx block_start 0x%llx block_len 0x%llx) refs %d",
29 				 em->start, em->len, em->block_start,
30 				 em->block_len, refcount_read(&em->refs));
31 
32 			refcount_set(&em->refs, 1);
33 		}
34 #endif
35 		free_extent_map(em);
36 	}
37 	write_unlock(&em_tree->lock);
38 }
39 
40 /*
41  * Test scenario:
42  *
43  * Suppose that no extent map has been loaded into memory yet, there is a file
44  * extent [0, 16K), followed by another file extent [16K, 20K), two dio reads
45  * are entering btrfs_get_extent() concurrently, t1 is reading [8K, 16K), t2 is
46  * reading [0, 8K)
47  *
48  *     t1                            t2
49  *  btrfs_get_extent()              btrfs_get_extent()
50  *    -> lookup_extent_mapping()      ->lookup_extent_mapping()
51  *    -> add_extent_mapping(0, 16K)
52  *    -> return em
53  *                                    ->add_extent_mapping(0, 16K)
54  *                                    -> #handle -EEXIST
55  */
test_case_1(struct btrfs_fs_info * fs_info,struct extent_map_tree * em_tree)56 static int test_case_1(struct btrfs_fs_info *fs_info,
57 		struct extent_map_tree *em_tree)
58 {
59 	struct extent_map *em;
60 	u64 start = 0;
61 	u64 len = SZ_8K;
62 	int ret;
63 
64 	em = alloc_extent_map();
65 	if (!em) {
66 		test_std_err(TEST_ALLOC_EXTENT_MAP);
67 		return -ENOMEM;
68 	}
69 
70 	/* Add [0, 16K) */
71 	em->start = 0;
72 	em->len = SZ_16K;
73 	em->block_start = 0;
74 	em->block_len = SZ_16K;
75 	write_lock(&em_tree->lock);
76 	ret = add_extent_mapping(em_tree, em, 0);
77 	write_unlock(&em_tree->lock);
78 	if (ret < 0) {
79 		test_err("cannot add extent range [0, 16K)");
80 		goto out;
81 	}
82 	free_extent_map(em);
83 
84 	/* Add [16K, 20K) following [0, 16K)  */
85 	em = alloc_extent_map();
86 	if (!em) {
87 		test_std_err(TEST_ALLOC_EXTENT_MAP);
88 		ret = -ENOMEM;
89 		goto out;
90 	}
91 
92 	em->start = SZ_16K;
93 	em->len = SZ_4K;
94 	em->block_start = SZ_32K; /* avoid merging */
95 	em->block_len = SZ_4K;
96 	write_lock(&em_tree->lock);
97 	ret = add_extent_mapping(em_tree, em, 0);
98 	write_unlock(&em_tree->lock);
99 	if (ret < 0) {
100 		test_err("cannot add extent range [16K, 20K)");
101 		goto out;
102 	}
103 	free_extent_map(em);
104 
105 	em = alloc_extent_map();
106 	if (!em) {
107 		test_std_err(TEST_ALLOC_EXTENT_MAP);
108 		ret = -ENOMEM;
109 		goto out;
110 	}
111 
112 	/* Add [0, 8K), should return [0, 16K) instead. */
113 	em->start = start;
114 	em->len = len;
115 	em->block_start = start;
116 	em->block_len = len;
117 	write_lock(&em_tree->lock);
118 	ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, em->start, em->len);
119 	write_unlock(&em_tree->lock);
120 	if (ret) {
121 		test_err("case1 [%llu %llu]: ret %d", start, start + len, ret);
122 		goto out;
123 	}
124 	if (em &&
125 	    (em->start != 0 || extent_map_end(em) != SZ_16K ||
126 	     em->block_start != 0 || em->block_len != SZ_16K)) {
127 		test_err(
128 "case1 [%llu %llu]: ret %d return a wrong em (start %llu len %llu block_start %llu block_len %llu",
129 			 start, start + len, ret, em->start, em->len,
130 			 em->block_start, em->block_len);
131 		ret = -EINVAL;
132 	}
133 	free_extent_map(em);
134 out:
135 	free_extent_map_tree(em_tree);
136 
137 	return ret;
138 }
139 
140 /*
141  * Test scenario:
142  *
143  * Reading the inline ending up with EEXIST, ie. read an inline
144  * extent and discard page cache and read it again.
145  */
test_case_2(struct btrfs_fs_info * fs_info,struct extent_map_tree * em_tree)146 static int test_case_2(struct btrfs_fs_info *fs_info,
147 		struct extent_map_tree *em_tree)
148 {
149 	struct extent_map *em;
150 	int ret;
151 
152 	em = alloc_extent_map();
153 	if (!em) {
154 		test_std_err(TEST_ALLOC_EXTENT_MAP);
155 		return -ENOMEM;
156 	}
157 
158 	/* Add [0, 1K) */
159 	em->start = 0;
160 	em->len = SZ_1K;
161 	em->block_start = EXTENT_MAP_INLINE;
162 	em->block_len = (u64)-1;
163 	write_lock(&em_tree->lock);
164 	ret = add_extent_mapping(em_tree, em, 0);
165 	write_unlock(&em_tree->lock);
166 	if (ret < 0) {
167 		test_err("cannot add extent range [0, 1K)");
168 		goto out;
169 	}
170 	free_extent_map(em);
171 
172 	/* Add [4K, 8K) following [0, 1K)  */
173 	em = alloc_extent_map();
174 	if (!em) {
175 		test_std_err(TEST_ALLOC_EXTENT_MAP);
176 		ret = -ENOMEM;
177 		goto out;
178 	}
179 
180 	em->start = SZ_4K;
181 	em->len = SZ_4K;
182 	em->block_start = SZ_4K;
183 	em->block_len = SZ_4K;
184 	write_lock(&em_tree->lock);
185 	ret = add_extent_mapping(em_tree, em, 0);
186 	write_unlock(&em_tree->lock);
187 	if (ret < 0) {
188 		test_err("cannot add extent range [4K, 8K)");
189 		goto out;
190 	}
191 	free_extent_map(em);
192 
193 	em = alloc_extent_map();
194 	if (!em) {
195 		test_std_err(TEST_ALLOC_EXTENT_MAP);
196 		ret = -ENOMEM;
197 		goto out;
198 	}
199 
200 	/* Add [0, 1K) */
201 	em->start = 0;
202 	em->len = SZ_1K;
203 	em->block_start = EXTENT_MAP_INLINE;
204 	em->block_len = (u64)-1;
205 	write_lock(&em_tree->lock);
206 	ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, em->start, em->len);
207 	write_unlock(&em_tree->lock);
208 	if (ret) {
209 		test_err("case2 [0 1K]: ret %d", ret);
210 		goto out;
211 	}
212 	if (em &&
213 	    (em->start != 0 || extent_map_end(em) != SZ_1K ||
214 	     em->block_start != EXTENT_MAP_INLINE || em->block_len != (u64)-1)) {
215 		test_err(
216 "case2 [0 1K]: ret %d return a wrong em (start %llu len %llu block_start %llu block_len %llu",
217 			 ret, em->start, em->len, em->block_start,
218 			 em->block_len);
219 		ret = -EINVAL;
220 	}
221 	free_extent_map(em);
222 out:
223 	free_extent_map_tree(em_tree);
224 
225 	return ret;
226 }
227 
__test_case_3(struct btrfs_fs_info * fs_info,struct extent_map_tree * em_tree,u64 start)228 static int __test_case_3(struct btrfs_fs_info *fs_info,
229 		struct extent_map_tree *em_tree, u64 start)
230 {
231 	struct extent_map *em;
232 	u64 len = SZ_4K;
233 	int ret;
234 
235 	em = alloc_extent_map();
236 	if (!em) {
237 		test_std_err(TEST_ALLOC_EXTENT_MAP);
238 		return -ENOMEM;
239 	}
240 
241 	/* Add [4K, 8K) */
242 	em->start = SZ_4K;
243 	em->len = SZ_4K;
244 	em->block_start = SZ_4K;
245 	em->block_len = SZ_4K;
246 	write_lock(&em_tree->lock);
247 	ret = add_extent_mapping(em_tree, em, 0);
248 	write_unlock(&em_tree->lock);
249 	if (ret < 0) {
250 		test_err("cannot add extent range [4K, 8K)");
251 		goto out;
252 	}
253 	free_extent_map(em);
254 
255 	em = alloc_extent_map();
256 	if (!em) {
257 		test_std_err(TEST_ALLOC_EXTENT_MAP);
258 		ret = -ENOMEM;
259 		goto out;
260 	}
261 
262 	/* Add [0, 16K) */
263 	em->start = 0;
264 	em->len = SZ_16K;
265 	em->block_start = 0;
266 	em->block_len = SZ_16K;
267 	write_lock(&em_tree->lock);
268 	ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len);
269 	write_unlock(&em_tree->lock);
270 	if (ret) {
271 		test_err("case3 [0x%llx 0x%llx): ret %d",
272 			 start, start + len, ret);
273 		goto out;
274 	}
275 	/*
276 	 * Since bytes within em are contiguous, em->block_start is identical to
277 	 * em->start.
278 	 */
279 	if (em &&
280 	    (start < em->start || start + len > extent_map_end(em) ||
281 	     em->start != em->block_start || em->len != em->block_len)) {
282 		test_err(
283 "case3 [0x%llx 0x%llx): ret %d em (start 0x%llx len 0x%llx block_start 0x%llx block_len 0x%llx)",
284 			 start, start + len, ret, em->start, em->len,
285 			 em->block_start, em->block_len);
286 		ret = -EINVAL;
287 	}
288 	free_extent_map(em);
289 out:
290 	free_extent_map_tree(em_tree);
291 
292 	return ret;
293 }
294 
295 /*
296  * Test scenario:
297  *
298  * Suppose that no extent map has been loaded into memory yet.
299  * There is a file extent [0, 16K), two jobs are running concurrently
300  * against it, t1 is buffered writing to [4K, 8K) and t2 is doing dio
301  * read from [0, 4K) or [8K, 12K) or [12K, 16K).
302  *
303  * t1 goes ahead of t2 and adds em [4K, 8K) into tree.
304  *
305  *         t1                       t2
306  *  cow_file_range()	     btrfs_get_extent()
307  *                            -> lookup_extent_mapping()
308  *   -> add_extent_mapping()
309  *                            -> add_extent_mapping()
310  */
test_case_3(struct btrfs_fs_info * fs_info,struct extent_map_tree * em_tree)311 static int test_case_3(struct btrfs_fs_info *fs_info,
312 		struct extent_map_tree *em_tree)
313 {
314 	int ret;
315 
316 	ret = __test_case_3(fs_info, em_tree, 0);
317 	if (ret)
318 		return ret;
319 	ret = __test_case_3(fs_info, em_tree, SZ_8K);
320 	if (ret)
321 		return ret;
322 	ret = __test_case_3(fs_info, em_tree, (12 * SZ_1K));
323 
324 	return ret;
325 }
326 
__test_case_4(struct btrfs_fs_info * fs_info,struct extent_map_tree * em_tree,u64 start)327 static int __test_case_4(struct btrfs_fs_info *fs_info,
328 		struct extent_map_tree *em_tree, u64 start)
329 {
330 	struct extent_map *em;
331 	u64 len = SZ_4K;
332 	int ret;
333 
334 	em = alloc_extent_map();
335 	if (!em) {
336 		test_std_err(TEST_ALLOC_EXTENT_MAP);
337 		return -ENOMEM;
338 	}
339 
340 	/* Add [0K, 8K) */
341 	em->start = 0;
342 	em->len = SZ_8K;
343 	em->block_start = 0;
344 	em->block_len = SZ_8K;
345 	write_lock(&em_tree->lock);
346 	ret = add_extent_mapping(em_tree, em, 0);
347 	write_unlock(&em_tree->lock);
348 	if (ret < 0) {
349 		test_err("cannot add extent range [0, 8K)");
350 		goto out;
351 	}
352 	free_extent_map(em);
353 
354 	em = alloc_extent_map();
355 	if (!em) {
356 		test_std_err(TEST_ALLOC_EXTENT_MAP);
357 		ret = -ENOMEM;
358 		goto out;
359 	}
360 
361 	/* Add [8K, 32K) */
362 	em->start = SZ_8K;
363 	em->len = 24 * SZ_1K;
364 	em->block_start = SZ_16K; /* avoid merging */
365 	em->block_len = 24 * SZ_1K;
366 	write_lock(&em_tree->lock);
367 	ret = add_extent_mapping(em_tree, em, 0);
368 	write_unlock(&em_tree->lock);
369 	if (ret < 0) {
370 		test_err("cannot add extent range [8K, 32K)");
371 		goto out;
372 	}
373 	free_extent_map(em);
374 
375 	em = alloc_extent_map();
376 	if (!em) {
377 		test_std_err(TEST_ALLOC_EXTENT_MAP);
378 		ret = -ENOMEM;
379 		goto out;
380 	}
381 	/* Add [0K, 32K) */
382 	em->start = 0;
383 	em->len = SZ_32K;
384 	em->block_start = 0;
385 	em->block_len = SZ_32K;
386 	write_lock(&em_tree->lock);
387 	ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len);
388 	write_unlock(&em_tree->lock);
389 	if (ret) {
390 		test_err("case4 [0x%llx 0x%llx): ret %d",
391 			 start, len, ret);
392 		goto out;
393 	}
394 	if (em && (start < em->start || start + len > extent_map_end(em))) {
395 		test_err(
396 "case4 [0x%llx 0x%llx): ret %d, added wrong em (start 0x%llx len 0x%llx block_start 0x%llx block_len 0x%llx)",
397 			 start, len, ret, em->start, em->len, em->block_start,
398 			 em->block_len);
399 		ret = -EINVAL;
400 	}
401 	free_extent_map(em);
402 out:
403 	free_extent_map_tree(em_tree);
404 
405 	return ret;
406 }
407 
408 /*
409  * Test scenario:
410  *
411  * Suppose that no extent map has been loaded into memory yet.
412  * There is a file extent [0, 32K), two jobs are running concurrently
413  * against it, t1 is doing dio write to [8K, 32K) and t2 is doing dio
414  * read from [0, 4K) or [4K, 8K).
415  *
416  * t1 goes ahead of t2 and splits em [0, 32K) to em [0K, 8K) and [8K 32K).
417  *
418  *         t1                                t2
419  *  btrfs_get_blocks_direct()	       btrfs_get_blocks_direct()
420  *   -> btrfs_get_extent()              -> btrfs_get_extent()
421  *       -> lookup_extent_mapping()
422  *       -> add_extent_mapping()            -> lookup_extent_mapping()
423  *          # load [0, 32K)
424  *   -> btrfs_new_extent_direct()
425  *       -> btrfs_drop_extent_cache()
426  *          # split [0, 32K)
427  *       -> add_extent_mapping()
428  *          # add [8K, 32K)
429  *                                          -> add_extent_mapping()
430  *                                             # handle -EEXIST when adding
431  *                                             # [0, 32K)
432  */
test_case_4(struct btrfs_fs_info * fs_info,struct extent_map_tree * em_tree)433 static int test_case_4(struct btrfs_fs_info *fs_info,
434 		struct extent_map_tree *em_tree)
435 {
436 	int ret;
437 
438 	ret = __test_case_4(fs_info, em_tree, 0);
439 	if (ret)
440 		return ret;
441 	ret = __test_case_4(fs_info, em_tree, SZ_4K);
442 
443 	return ret;
444 }
445 
add_compressed_extent(struct extent_map_tree * em_tree,u64 start,u64 len,u64 block_start)446 static int add_compressed_extent(struct extent_map_tree *em_tree,
447 				 u64 start, u64 len, u64 block_start)
448 {
449 	struct extent_map *em;
450 	int ret;
451 
452 	em = alloc_extent_map();
453 	if (!em) {
454 		test_std_err(TEST_ALLOC_EXTENT_MAP);
455 		return -ENOMEM;
456 	}
457 
458 	em->start = start;
459 	em->len = len;
460 	em->block_start = block_start;
461 	em->block_len = SZ_4K;
462 	set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
463 	write_lock(&em_tree->lock);
464 	ret = add_extent_mapping(em_tree, em, 0);
465 	write_unlock(&em_tree->lock);
466 	free_extent_map(em);
467 	if (ret < 0) {
468 		test_err("cannot add extent map [%llu, %llu)", start, start + len);
469 		return ret;
470 	}
471 
472 	return 0;
473 }
474 
475 struct extent_range {
476 	u64 start;
477 	u64 len;
478 };
479 
480 /* The valid states of the tree after every drop, as described below. */
481 struct extent_range valid_ranges[][7] = {
482 	{
483 	  { .start = 0,			.len = SZ_8K },		/* [0, 8K) */
484 	  { .start = SZ_4K * 3,		.len = SZ_4K * 3},	/* [12k, 24k) */
485 	  { .start = SZ_4K * 6,		.len = SZ_4K * 3},	/* [24k, 36k) */
486 	  { .start = SZ_32K + SZ_4K,	.len = SZ_4K},		/* [36k, 40k) */
487 	  { .start = SZ_4K * 10,	.len = SZ_4K * 6},	/* [40k, 64k) */
488 	},
489 	{
490 	  { .start = 0,			.len = SZ_8K },		/* [0, 8K) */
491 	  { .start = SZ_4K * 5,		.len = SZ_4K},		/* [20k, 24k) */
492 	  { .start = SZ_4K * 6,		.len = SZ_4K * 3},	/* [24k, 36k) */
493 	  { .start = SZ_32K + SZ_4K,	.len = SZ_4K},		/* [36k, 40k) */
494 	  { .start = SZ_4K * 10,	.len = SZ_4K * 6},	/* [40k, 64k) */
495 	},
496 	{
497 	  { .start = 0,			.len = SZ_8K },		/* [0, 8K) */
498 	  { .start = SZ_4K * 5,		.len = SZ_4K},		/* [20k, 24k) */
499 	  { .start = SZ_4K * 6,		.len = SZ_4K},		/* [24k, 28k) */
500 	  { .start = SZ_32K,		.len = SZ_4K},		/* [32k, 36k) */
501 	  { .start = SZ_32K + SZ_4K,	.len = SZ_4K},		/* [36k, 40k) */
502 	  { .start = SZ_4K * 10,	.len = SZ_4K * 6},	/* [40k, 64k) */
503 	},
504 	{
505 	  { .start = 0,			.len = SZ_8K},		/* [0, 8K) */
506 	  { .start = SZ_4K * 5,		.len = SZ_4K},		/* [20k, 24k) */
507 	  { .start = SZ_4K * 6,		.len = SZ_4K},		/* [24k, 28k) */
508 	}
509 };
510 
validate_range(struct extent_map_tree * em_tree,int index)511 static int validate_range(struct extent_map_tree *em_tree, int index)
512 {
513 	struct rb_node *n;
514 	int i;
515 
516 	for (i = 0, n = rb_first_cached(&em_tree->map);
517 	     valid_ranges[index][i].len && n;
518 	     i++, n = rb_next(n)) {
519 		struct extent_map *entry = rb_entry(n, struct extent_map, rb_node);
520 
521 		if (entry->start != valid_ranges[index][i].start) {
522 			test_err("mapping has start %llu expected %llu",
523 				 entry->start, valid_ranges[index][i].start);
524 			return -EINVAL;
525 		}
526 
527 		if (entry->len != valid_ranges[index][i].len) {
528 			test_err("mapping has len %llu expected %llu",
529 				 entry->len, valid_ranges[index][i].len);
530 			return -EINVAL;
531 		}
532 	}
533 
534 	/*
535 	 * We exited because we don't have any more entries in the extent_map
536 	 * but we still expect more valid entries.
537 	 */
538 	if (valid_ranges[index][i].len) {
539 		test_err("missing an entry");
540 		return -EINVAL;
541 	}
542 
543 	/* We exited the loop but still have entries in the extent map. */
544 	if (n) {
545 		test_err("we have a left over entry in the extent map we didn't expect");
546 		return -EINVAL;
547 	}
548 
549 	return 0;
550 }
551 
552 /*
553  * Test scenario:
554  *
555  * Test the various edge cases of btrfs_drop_extent_map_range, create the
556  * following ranges
557  *
558  * [0, 12k)[12k, 24k)[24k, 36k)[36k, 40k)[40k,64k)
559  *
560  * And then we'll drop:
561  *
562  * [8k, 12k) - test the single front split
563  * [12k, 20k) - test the single back split
564  * [28k, 32k) - test the double split
565  * [32k, 64k) - test whole em dropping
566  *
567  * They'll have the EXTENT_FLAG_COMPRESSED flag set to keep the em tree from
568  * merging the em's.
569  */
test_case_5(void)570 static int test_case_5(void)
571 {
572 	struct extent_map_tree *em_tree;
573 	struct inode *inode;
574 	u64 start, end;
575 	int ret;
576 
577 	test_msg("Running btrfs_drop_extent_map_range tests");
578 
579 	inode = btrfs_new_test_inode();
580 	if (!inode) {
581 		test_std_err(TEST_ALLOC_INODE);
582 		return -ENOMEM;
583 	}
584 
585 	em_tree = &BTRFS_I(inode)->extent_tree;
586 
587 	/* [0, 12k) */
588 	ret = add_compressed_extent(em_tree, 0, SZ_4K * 3, 0);
589 	if (ret) {
590 		test_err("cannot add extent range [0, 12K)");
591 		goto out;
592 	}
593 
594 	/* [12k, 24k) */
595 	ret = add_compressed_extent(em_tree, SZ_4K * 3, SZ_4K * 3, SZ_4K);
596 	if (ret) {
597 		test_err("cannot add extent range [12k, 24k)");
598 		goto out;
599 	}
600 
601 	/* [24k, 36k) */
602 	ret = add_compressed_extent(em_tree, SZ_4K * 6, SZ_4K * 3, SZ_8K);
603 	if (ret) {
604 		test_err("cannot add extent range [12k, 24k)");
605 		goto out;
606 	}
607 
608 	/* [36k, 40k) */
609 	ret = add_compressed_extent(em_tree, SZ_32K + SZ_4K, SZ_4K, SZ_4K * 3);
610 	if (ret) {
611 		test_err("cannot add extent range [12k, 24k)");
612 		goto out;
613 	}
614 
615 	/* [40k, 64k) */
616 	ret = add_compressed_extent(em_tree, SZ_4K * 10, SZ_4K * 6, SZ_16K);
617 	if (ret) {
618 		test_err("cannot add extent range [12k, 24k)");
619 		goto out;
620 	}
621 
622 	/* Drop [8k, 12k) */
623 	start = SZ_8K;
624 	end = (3 * SZ_4K) - 1;
625 	btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, false);
626 	ret = validate_range(&BTRFS_I(inode)->extent_tree, 0);
627 	if (ret)
628 		goto out;
629 
630 	/* Drop [12k, 20k) */
631 	start = SZ_4K * 3;
632 	end = SZ_16K + SZ_4K - 1;
633 	btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, false);
634 	ret = validate_range(&BTRFS_I(inode)->extent_tree, 1);
635 	if (ret)
636 		goto out;
637 
638 	/* Drop [28k, 32k) */
639 	start = SZ_32K - SZ_4K;
640 	end = SZ_32K - 1;
641 	btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, false);
642 	ret = validate_range(&BTRFS_I(inode)->extent_tree, 2);
643 	if (ret)
644 		goto out;
645 
646 	/* Drop [32k, 64k) */
647 	start = SZ_32K;
648 	end = SZ_64K - 1;
649 	btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, false);
650 	ret = validate_range(&BTRFS_I(inode)->extent_tree, 3);
651 	if (ret)
652 		goto out;
653 out:
654 	iput(inode);
655 	return ret;
656 }
657 
658 /*
659  * Test the btrfs_add_extent_mapping helper which will attempt to create an em
660  * for areas between two existing ems.  Validate it doesn't do this when there
661  * are two unmerged em's side by side.
662  */
test_case_6(struct btrfs_fs_info * fs_info,struct extent_map_tree * em_tree)663 static int test_case_6(struct btrfs_fs_info *fs_info, struct extent_map_tree *em_tree)
664 {
665 	struct extent_map *em = NULL;
666 	int ret;
667 
668 	ret = add_compressed_extent(em_tree, 0, SZ_4K, 0);
669 	if (ret)
670 		goto out;
671 
672 	ret = add_compressed_extent(em_tree, SZ_4K, SZ_4K, 0);
673 	if (ret)
674 		goto out;
675 
676 	em = alloc_extent_map();
677 	if (!em) {
678 		test_std_err(TEST_ALLOC_EXTENT_MAP);
679 		return -ENOMEM;
680 	}
681 
682 	em->start = SZ_4K;
683 	em->len = SZ_4K;
684 	em->block_start = SZ_16K;
685 	em->block_len = SZ_16K;
686 	write_lock(&em_tree->lock);
687 	ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, 0, SZ_8K);
688 	write_unlock(&em_tree->lock);
689 
690 	if (ret != 0) {
691 		test_err("got an error when adding our em: %d", ret);
692 		goto out;
693 	}
694 
695 	ret = -EINVAL;
696 	if (em->start != 0) {
697 		test_err("unexpected em->start at %llu, wanted 0", em->start);
698 		goto out;
699 	}
700 	if (em->len != SZ_4K) {
701 		test_err("unexpected em->len %llu, expected 4K", em->len);
702 		goto out;
703 	}
704 	ret = 0;
705 out:
706 	free_extent_map(em);
707 	free_extent_map_tree(em_tree);
708 	return ret;
709 }
710 
711 /*
712  * Regression test for btrfs_drop_extent_map_range.  Calling with skip_pinned ==
713  * true would mess up the start/end calculations and subsequent splits would be
714  * incorrect.
715  */
test_case_7(void)716 static int test_case_7(void)
717 {
718 	struct extent_map_tree *em_tree;
719 	struct extent_map *em;
720 	struct inode *inode;
721 	int ret;
722 
723 	test_msg("Running btrfs_drop_extent_cache with pinned");
724 
725 	inode = btrfs_new_test_inode();
726 	if (!inode) {
727 		test_std_err(TEST_ALLOC_INODE);
728 		return -ENOMEM;
729 	}
730 
731 	em_tree = &BTRFS_I(inode)->extent_tree;
732 
733 	em = alloc_extent_map();
734 	if (!em) {
735 		test_std_err(TEST_ALLOC_EXTENT_MAP);
736 		ret = -ENOMEM;
737 		goto out;
738 	}
739 
740 	/* [0, 16K), pinned */
741 	em->start = 0;
742 	em->len = SZ_16K;
743 	em->block_start = 0;
744 	em->block_len = SZ_4K;
745 	set_bit(EXTENT_FLAG_PINNED, &em->flags);
746 	write_lock(&em_tree->lock);
747 	ret = add_extent_mapping(em_tree, em, 0);
748 	write_unlock(&em_tree->lock);
749 	if (ret < 0) {
750 		test_err("couldn't add extent map");
751 		goto out;
752 	}
753 	free_extent_map(em);
754 
755 	em = alloc_extent_map();
756 	if (!em) {
757 		test_std_err(TEST_ALLOC_EXTENT_MAP);
758 		ret = -ENOMEM;
759 		goto out;
760 	}
761 
762 	/* [32K, 48K), not pinned */
763 	em->start = SZ_32K;
764 	em->len = SZ_16K;
765 	em->block_start = SZ_32K;
766 	em->block_len = SZ_16K;
767 	write_lock(&em_tree->lock);
768 	ret = add_extent_mapping(em_tree, em, 0);
769 	write_unlock(&em_tree->lock);
770 	if (ret < 0) {
771 		test_err("couldn't add extent map");
772 		goto out;
773 	}
774 	free_extent_map(em);
775 
776 	/*
777 	 * Drop [0, 36K) This should skip the [0, 4K) extent and then split the
778 	 * [32K, 48K) extent.
779 	 */
780 	btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (36 * SZ_1K) - 1, true);
781 
782 	/* Make sure our extent maps look sane. */
783 	ret = -EINVAL;
784 
785 	em = lookup_extent_mapping(em_tree, 0, SZ_16K);
786 	if (!em) {
787 		test_err("didn't find an em at 0 as expected");
788 		goto out;
789 	}
790 
791 	if (em->start != 0) {
792 		test_err("em->start is %llu, expected 0", em->start);
793 		goto out;
794 	}
795 
796 	if (em->len != SZ_16K) {
797 		test_err("em->len is %llu, expected 16K", em->len);
798 		goto out;
799 	}
800 
801 	free_extent_map(em);
802 
803 	read_lock(&em_tree->lock);
804 	em = lookup_extent_mapping(em_tree, SZ_16K, SZ_16K);
805 	read_unlock(&em_tree->lock);
806 	if (em) {
807 		test_err("found an em when we weren't expecting one");
808 		goto out;
809 	}
810 
811 	read_lock(&em_tree->lock);
812 	em = lookup_extent_mapping(em_tree, SZ_32K, SZ_16K);
813 	read_unlock(&em_tree->lock);
814 	if (!em) {
815 		test_err("didn't find an em at 32K as expected");
816 		goto out;
817 	}
818 
819 	if (em->start != (36 * SZ_1K)) {
820 		test_err("em->start is %llu, expected 36K", em->start);
821 		goto out;
822 	}
823 
824 	if (em->len != (12 * SZ_1K)) {
825 		test_err("em->len is %llu, expected 12K", em->len);
826 		goto out;
827 	}
828 
829 	free_extent_map(em);
830 
831 	read_lock(&em_tree->lock);
832 	em = lookup_extent_mapping(em_tree, 48 * SZ_1K, (u64)-1);
833 	read_unlock(&em_tree->lock);
834 	if (em) {
835 		test_err("found an unexpected em above 48K");
836 		goto out;
837 	}
838 
839 	ret = 0;
840 out:
841 	free_extent_map(em);
842 	iput(inode);
843 	return ret;
844 }
845 
846 struct rmap_test_vector {
847 	u64 raid_type;
848 	u64 physical_start;
849 	u64 data_stripe_size;
850 	u64 num_data_stripes;
851 	u64 num_stripes;
852 	/* Assume we won't have more than 5 physical stripes */
853 	u64 data_stripe_phys_start[5];
854 	bool expected_mapped_addr;
855 	/* Physical to logical addresses */
856 	u64 mapped_logical[5];
857 };
858 
test_rmap_block(struct btrfs_fs_info * fs_info,struct rmap_test_vector * test)859 static int test_rmap_block(struct btrfs_fs_info *fs_info,
860 			   struct rmap_test_vector *test)
861 {
862 	struct extent_map *em;
863 	struct map_lookup *map = NULL;
864 	u64 *logical = NULL;
865 	int i, out_ndaddrs, out_stripe_len;
866 	int ret;
867 
868 	em = alloc_extent_map();
869 	if (!em) {
870 		test_std_err(TEST_ALLOC_EXTENT_MAP);
871 		return -ENOMEM;
872 	}
873 
874 	map = kmalloc(map_lookup_size(test->num_stripes), GFP_KERNEL);
875 	if (!map) {
876 		kfree(em);
877 		test_std_err(TEST_ALLOC_EXTENT_MAP);
878 		return -ENOMEM;
879 	}
880 
881 	set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
882 	/* Start at 4GiB logical address */
883 	em->start = SZ_4G;
884 	em->len = test->data_stripe_size * test->num_data_stripes;
885 	em->block_len = em->len;
886 	em->orig_block_len = test->data_stripe_size;
887 	em->map_lookup = map;
888 
889 	map->num_stripes = test->num_stripes;
890 	map->type = test->raid_type;
891 
892 	for (i = 0; i < map->num_stripes; i++) {
893 		struct btrfs_device *dev = btrfs_alloc_dummy_device(fs_info);
894 
895 		if (IS_ERR(dev)) {
896 			test_err("cannot allocate device");
897 			ret = PTR_ERR(dev);
898 			goto out;
899 		}
900 		map->stripes[i].dev = dev;
901 		map->stripes[i].physical = test->data_stripe_phys_start[i];
902 	}
903 
904 	write_lock(&fs_info->mapping_tree.lock);
905 	ret = add_extent_mapping(&fs_info->mapping_tree, em, 0);
906 	write_unlock(&fs_info->mapping_tree.lock);
907 	if (ret) {
908 		test_err("error adding block group mapping to mapping tree");
909 		goto out_free;
910 	}
911 
912 	ret = btrfs_rmap_block(fs_info, em->start, btrfs_sb_offset(1),
913 			       &logical, &out_ndaddrs, &out_stripe_len);
914 	if (ret || (out_ndaddrs == 0 && test->expected_mapped_addr)) {
915 		test_err("didn't rmap anything but expected %d",
916 			 test->expected_mapped_addr);
917 		goto out;
918 	}
919 
920 	if (out_stripe_len != BTRFS_STRIPE_LEN) {
921 		test_err("calculated stripe length doesn't match");
922 		goto out;
923 	}
924 
925 	if (out_ndaddrs != test->expected_mapped_addr) {
926 		for (i = 0; i < out_ndaddrs; i++)
927 			test_msg("mapped %llu", logical[i]);
928 		test_err("unexpected number of mapped addresses: %d", out_ndaddrs);
929 		goto out;
930 	}
931 
932 	for (i = 0; i < out_ndaddrs; i++) {
933 		if (logical[i] != test->mapped_logical[i]) {
934 			test_err("unexpected logical address mapped");
935 			goto out;
936 		}
937 	}
938 
939 	ret = 0;
940 out:
941 	write_lock(&fs_info->mapping_tree.lock);
942 	remove_extent_mapping(&fs_info->mapping_tree, em);
943 	write_unlock(&fs_info->mapping_tree.lock);
944 	/* For us */
945 	free_extent_map(em);
946 out_free:
947 	/* For the tree */
948 	free_extent_map(em);
949 	kfree(logical);
950 	return ret;
951 }
952 
btrfs_test_extent_map(void)953 int btrfs_test_extent_map(void)
954 {
955 	struct btrfs_fs_info *fs_info = NULL;
956 	struct extent_map_tree *em_tree;
957 	int ret = 0, i;
958 	struct rmap_test_vector rmap_tests[] = {
959 		{
960 			/*
961 			 * Test a chunk with 2 data stripes one of which
962 			 * intersects the physical address of the super block
963 			 * is correctly recognised.
964 			 */
965 			.raid_type = BTRFS_BLOCK_GROUP_RAID1,
966 			.physical_start = SZ_64M - SZ_4M,
967 			.data_stripe_size = SZ_256M,
968 			.num_data_stripes = 2,
969 			.num_stripes = 2,
970 			.data_stripe_phys_start =
971 				{SZ_64M - SZ_4M, SZ_64M - SZ_4M + SZ_256M},
972 			.expected_mapped_addr = true,
973 			.mapped_logical= {SZ_4G + SZ_4M}
974 		},
975 		{
976 			/*
977 			 * Test that out-of-range physical addresses are
978 			 * ignored
979 			 */
980 
981 			 /* SINGLE chunk type */
982 			.raid_type = 0,
983 			.physical_start = SZ_4G,
984 			.data_stripe_size = SZ_256M,
985 			.num_data_stripes = 1,
986 			.num_stripes = 1,
987 			.data_stripe_phys_start = {SZ_256M},
988 			.expected_mapped_addr = false,
989 			.mapped_logical = {0}
990 		}
991 	};
992 
993 	test_msg("running extent_map tests");
994 
995 	/*
996 	 * Note: the fs_info is not set up completely, we only need
997 	 * fs_info::fsid for the tracepoint.
998 	 */
999 	fs_info = btrfs_alloc_dummy_fs_info(PAGE_SIZE, PAGE_SIZE);
1000 	if (!fs_info) {
1001 		test_std_err(TEST_ALLOC_FS_INFO);
1002 		return -ENOMEM;
1003 	}
1004 
1005 	em_tree = kzalloc(sizeof(*em_tree), GFP_KERNEL);
1006 	if (!em_tree) {
1007 		ret = -ENOMEM;
1008 		goto out;
1009 	}
1010 
1011 	extent_map_tree_init(em_tree);
1012 
1013 	ret = test_case_1(fs_info, em_tree);
1014 	if (ret)
1015 		goto out;
1016 	ret = test_case_2(fs_info, em_tree);
1017 	if (ret)
1018 		goto out;
1019 	ret = test_case_3(fs_info, em_tree);
1020 	if (ret)
1021 		goto out;
1022 	ret = test_case_4(fs_info, em_tree);
1023 	if (ret)
1024 		goto out;
1025 	ret = test_case_5();
1026 	if (ret)
1027 		goto out;
1028 	ret = test_case_6(fs_info, em_tree);
1029 	if (ret)
1030 		goto out;
1031 	ret = test_case_7();
1032 	if (ret)
1033 		goto out;
1034 
1035 	test_msg("running rmap tests");
1036 	for (i = 0; i < ARRAY_SIZE(rmap_tests); i++) {
1037 		ret = test_rmap_block(fs_info, &rmap_tests[i]);
1038 		if (ret)
1039 			goto out;
1040 	}
1041 
1042 out:
1043 	kfree(em_tree);
1044 	btrfs_free_dummy_fs_info(fs_info);
1045 
1046 	return ret;
1047 }
1048