1 /*
2  * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_mount.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_alloc_btree.h"
30 #include "xfs_ialloc_btree.h"
31 #include "xfs_dinode.h"
32 #include "xfs_inode.h"
33 #include "xfs_inode_item.h"
34 #include "xfs_alloc.h"
35 #include "xfs_btree.h"
36 #include "xfs_btree_trace.h"
37 #include "xfs_itable.h"
38 #include "xfs_bmap.h"
39 #include "xfs_error.h"
40 #include "xfs_quota.h"
41 
42 /*
43  * Determine the extent state.
44  */
45 /* ARGSUSED */
46 STATIC xfs_exntst_t
xfs_extent_state(xfs_filblks_t blks,int extent_flag)47 xfs_extent_state(
48 	xfs_filblks_t		blks,
49 	int			extent_flag)
50 {
51 	if (extent_flag) {
52 		ASSERT(blks != 0);	/* saved for DMIG */
53 		return XFS_EXT_UNWRITTEN;
54 	}
55 	return XFS_EXT_NORM;
56 }
57 
58 /*
59  * Convert on-disk form of btree root to in-memory form.
60  */
61 void
xfs_bmdr_to_bmbt(struct xfs_mount * mp,xfs_bmdr_block_t * dblock,int dblocklen,struct xfs_btree_block * rblock,int rblocklen)62 xfs_bmdr_to_bmbt(
63 	struct xfs_mount	*mp,
64 	xfs_bmdr_block_t	*dblock,
65 	int			dblocklen,
66 	struct xfs_btree_block	*rblock,
67 	int			rblocklen)
68 {
69 	int			dmxr;
70 	xfs_bmbt_key_t		*fkp;
71 	__be64			*fpp;
72 	xfs_bmbt_key_t		*tkp;
73 	__be64			*tpp;
74 
75 	rblock->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
76 	rblock->bb_level = dblock->bb_level;
77 	ASSERT(be16_to_cpu(rblock->bb_level) > 0);
78 	rblock->bb_numrecs = dblock->bb_numrecs;
79 	rblock->bb_u.l.bb_leftsib = cpu_to_be64(NULLDFSBNO);
80 	rblock->bb_u.l.bb_rightsib = cpu_to_be64(NULLDFSBNO);
81 	dmxr = xfs_bmdr_maxrecs(mp, dblocklen, 0);
82 	fkp = XFS_BMDR_KEY_ADDR(dblock, 1);
83 	tkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
84 	fpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
85 	tpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
86 	dmxr = be16_to_cpu(dblock->bb_numrecs);
87 	memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
88 	memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
89 }
90 
91 /*
92  * Convert a compressed bmap extent record to an uncompressed form.
93  * This code must be in sync with the routines xfs_bmbt_get_startoff,
94  * xfs_bmbt_get_startblock, xfs_bmbt_get_blockcount and xfs_bmbt_get_state.
95  */
96 STATIC void
__xfs_bmbt_get_all(__uint64_t l0,__uint64_t l1,xfs_bmbt_irec_t * s)97 __xfs_bmbt_get_all(
98 		__uint64_t l0,
99 		__uint64_t l1,
100 		xfs_bmbt_irec_t *s)
101 {
102 	int	ext_flag;
103 	xfs_exntst_t st;
104 
105 	ext_flag = (int)(l0 >> (64 - BMBT_EXNTFLAG_BITLEN));
106 	s->br_startoff = ((xfs_fileoff_t)l0 &
107 			   xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
108 #if XFS_BIG_BLKNOS
109 	s->br_startblock = (((xfs_fsblock_t)l0 & xfs_mask64lo(9)) << 43) |
110 			   (((xfs_fsblock_t)l1) >> 21);
111 #else
112 #ifdef DEBUG
113 	{
114 		xfs_dfsbno_t	b;
115 
116 		b = (((xfs_dfsbno_t)l0 & xfs_mask64lo(9)) << 43) |
117 		    (((xfs_dfsbno_t)l1) >> 21);
118 		ASSERT((b >> 32) == 0 || isnulldstartblock(b));
119 		s->br_startblock = (xfs_fsblock_t)b;
120 	}
121 #else	/* !DEBUG */
122 	s->br_startblock = (xfs_fsblock_t)(((xfs_dfsbno_t)l1) >> 21);
123 #endif	/* DEBUG */
124 #endif	/* XFS_BIG_BLKNOS */
125 	s->br_blockcount = (xfs_filblks_t)(l1 & xfs_mask64lo(21));
126 	/* This is xfs_extent_state() in-line */
127 	if (ext_flag) {
128 		ASSERT(s->br_blockcount != 0);	/* saved for DMIG */
129 		st = XFS_EXT_UNWRITTEN;
130 	} else
131 		st = XFS_EXT_NORM;
132 	s->br_state = st;
133 }
134 
135 void
xfs_bmbt_get_all(xfs_bmbt_rec_host_t * r,xfs_bmbt_irec_t * s)136 xfs_bmbt_get_all(
137 	xfs_bmbt_rec_host_t *r,
138 	xfs_bmbt_irec_t *s)
139 {
140 	__xfs_bmbt_get_all(r->l0, r->l1, s);
141 }
142 
143 /*
144  * Extract the blockcount field from an in memory bmap extent record.
145  */
146 xfs_filblks_t
xfs_bmbt_get_blockcount(xfs_bmbt_rec_host_t * r)147 xfs_bmbt_get_blockcount(
148 	xfs_bmbt_rec_host_t	*r)
149 {
150 	return (xfs_filblks_t)(r->l1 & xfs_mask64lo(21));
151 }
152 
153 /*
154  * Extract the startblock field from an in memory bmap extent record.
155  */
156 xfs_fsblock_t
xfs_bmbt_get_startblock(xfs_bmbt_rec_host_t * r)157 xfs_bmbt_get_startblock(
158 	xfs_bmbt_rec_host_t	*r)
159 {
160 #if XFS_BIG_BLKNOS
161 	return (((xfs_fsblock_t)r->l0 & xfs_mask64lo(9)) << 43) |
162 	       (((xfs_fsblock_t)r->l1) >> 21);
163 #else
164 #ifdef DEBUG
165 	xfs_dfsbno_t	b;
166 
167 	b = (((xfs_dfsbno_t)r->l0 & xfs_mask64lo(9)) << 43) |
168 	    (((xfs_dfsbno_t)r->l1) >> 21);
169 	ASSERT((b >> 32) == 0 || isnulldstartblock(b));
170 	return (xfs_fsblock_t)b;
171 #else	/* !DEBUG */
172 	return (xfs_fsblock_t)(((xfs_dfsbno_t)r->l1) >> 21);
173 #endif	/* DEBUG */
174 #endif	/* XFS_BIG_BLKNOS */
175 }
176 
177 /*
178  * Extract the startoff field from an in memory bmap extent record.
179  */
180 xfs_fileoff_t
xfs_bmbt_get_startoff(xfs_bmbt_rec_host_t * r)181 xfs_bmbt_get_startoff(
182 	xfs_bmbt_rec_host_t	*r)
183 {
184 	return ((xfs_fileoff_t)r->l0 &
185 		 xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
186 }
187 
188 xfs_exntst_t
xfs_bmbt_get_state(xfs_bmbt_rec_host_t * r)189 xfs_bmbt_get_state(
190 	xfs_bmbt_rec_host_t	*r)
191 {
192 	int	ext_flag;
193 
194 	ext_flag = (int)((r->l0) >> (64 - BMBT_EXNTFLAG_BITLEN));
195 	return xfs_extent_state(xfs_bmbt_get_blockcount(r),
196 				ext_flag);
197 }
198 
199 /*
200  * Extract the blockcount field from an on disk bmap extent record.
201  */
202 xfs_filblks_t
xfs_bmbt_disk_get_blockcount(xfs_bmbt_rec_t * r)203 xfs_bmbt_disk_get_blockcount(
204 	xfs_bmbt_rec_t	*r)
205 {
206 	return (xfs_filblks_t)(be64_to_cpu(r->l1) & xfs_mask64lo(21));
207 }
208 
209 /*
210  * Extract the startoff field from a disk format bmap extent record.
211  */
212 xfs_fileoff_t
xfs_bmbt_disk_get_startoff(xfs_bmbt_rec_t * r)213 xfs_bmbt_disk_get_startoff(
214 	xfs_bmbt_rec_t	*r)
215 {
216 	return ((xfs_fileoff_t)be64_to_cpu(r->l0) &
217 		 xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
218 }
219 
220 
221 /*
222  * Set all the fields in a bmap extent record from the arguments.
223  */
224 void
xfs_bmbt_set_allf(xfs_bmbt_rec_host_t * r,xfs_fileoff_t startoff,xfs_fsblock_t startblock,xfs_filblks_t blockcount,xfs_exntst_t state)225 xfs_bmbt_set_allf(
226 	xfs_bmbt_rec_host_t	*r,
227 	xfs_fileoff_t		startoff,
228 	xfs_fsblock_t		startblock,
229 	xfs_filblks_t		blockcount,
230 	xfs_exntst_t		state)
231 {
232 	int		extent_flag = (state == XFS_EXT_NORM) ? 0 : 1;
233 
234 	ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN);
235 	ASSERT((startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)) == 0);
236 	ASSERT((blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
237 
238 #if XFS_BIG_BLKNOS
239 	ASSERT((startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)) == 0);
240 
241 	r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
242 		((xfs_bmbt_rec_base_t)startoff << 9) |
243 		((xfs_bmbt_rec_base_t)startblock >> 43);
244 	r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) |
245 		((xfs_bmbt_rec_base_t)blockcount &
246 		(xfs_bmbt_rec_base_t)xfs_mask64lo(21));
247 #else	/* !XFS_BIG_BLKNOS */
248 	if (isnullstartblock(startblock)) {
249 		r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
250 			((xfs_bmbt_rec_base_t)startoff << 9) |
251 			 (xfs_bmbt_rec_base_t)xfs_mask64lo(9);
252 		r->l1 = xfs_mask64hi(11) |
253 			  ((xfs_bmbt_rec_base_t)startblock << 21) |
254 			  ((xfs_bmbt_rec_base_t)blockcount &
255 			   (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
256 	} else {
257 		r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
258 			((xfs_bmbt_rec_base_t)startoff << 9);
259 		r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) |
260 			 ((xfs_bmbt_rec_base_t)blockcount &
261 			 (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
262 	}
263 #endif	/* XFS_BIG_BLKNOS */
264 }
265 
266 /*
267  * Set all the fields in a bmap extent record from the uncompressed form.
268  */
269 void
xfs_bmbt_set_all(xfs_bmbt_rec_host_t * r,xfs_bmbt_irec_t * s)270 xfs_bmbt_set_all(
271 	xfs_bmbt_rec_host_t *r,
272 	xfs_bmbt_irec_t	*s)
273 {
274 	xfs_bmbt_set_allf(r, s->br_startoff, s->br_startblock,
275 			     s->br_blockcount, s->br_state);
276 }
277 
278 
279 /*
280  * Set all the fields in a disk format bmap extent record from the arguments.
281  */
282 void
xfs_bmbt_disk_set_allf(xfs_bmbt_rec_t * r,xfs_fileoff_t startoff,xfs_fsblock_t startblock,xfs_filblks_t blockcount,xfs_exntst_t state)283 xfs_bmbt_disk_set_allf(
284 	xfs_bmbt_rec_t		*r,
285 	xfs_fileoff_t		startoff,
286 	xfs_fsblock_t		startblock,
287 	xfs_filblks_t		blockcount,
288 	xfs_exntst_t		state)
289 {
290 	int			extent_flag = (state == XFS_EXT_NORM) ? 0 : 1;
291 
292 	ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN);
293 	ASSERT((startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)) == 0);
294 	ASSERT((blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
295 
296 #if XFS_BIG_BLKNOS
297 	ASSERT((startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)) == 0);
298 
299 	r->l0 = cpu_to_be64(
300 		((xfs_bmbt_rec_base_t)extent_flag << 63) |
301 		 ((xfs_bmbt_rec_base_t)startoff << 9) |
302 		 ((xfs_bmbt_rec_base_t)startblock >> 43));
303 	r->l1 = cpu_to_be64(
304 		((xfs_bmbt_rec_base_t)startblock << 21) |
305 		 ((xfs_bmbt_rec_base_t)blockcount &
306 		  (xfs_bmbt_rec_base_t)xfs_mask64lo(21)));
307 #else	/* !XFS_BIG_BLKNOS */
308 	if (isnullstartblock(startblock)) {
309 		r->l0 = cpu_to_be64(
310 			((xfs_bmbt_rec_base_t)extent_flag << 63) |
311 			 ((xfs_bmbt_rec_base_t)startoff << 9) |
312 			  (xfs_bmbt_rec_base_t)xfs_mask64lo(9));
313 		r->l1 = cpu_to_be64(xfs_mask64hi(11) |
314 			  ((xfs_bmbt_rec_base_t)startblock << 21) |
315 			  ((xfs_bmbt_rec_base_t)blockcount &
316 			   (xfs_bmbt_rec_base_t)xfs_mask64lo(21)));
317 	} else {
318 		r->l0 = cpu_to_be64(
319 			((xfs_bmbt_rec_base_t)extent_flag << 63) |
320 			 ((xfs_bmbt_rec_base_t)startoff << 9));
321 		r->l1 = cpu_to_be64(
322 			((xfs_bmbt_rec_base_t)startblock << 21) |
323 			 ((xfs_bmbt_rec_base_t)blockcount &
324 			  (xfs_bmbt_rec_base_t)xfs_mask64lo(21)));
325 	}
326 #endif	/* XFS_BIG_BLKNOS */
327 }
328 
329 /*
330  * Set all the fields in a bmap extent record from the uncompressed form.
331  */
332 STATIC void
xfs_bmbt_disk_set_all(xfs_bmbt_rec_t * r,xfs_bmbt_irec_t * s)333 xfs_bmbt_disk_set_all(
334 	xfs_bmbt_rec_t	*r,
335 	xfs_bmbt_irec_t *s)
336 {
337 	xfs_bmbt_disk_set_allf(r, s->br_startoff, s->br_startblock,
338 				  s->br_blockcount, s->br_state);
339 }
340 
341 /*
342  * Set the blockcount field in a bmap extent record.
343  */
344 void
xfs_bmbt_set_blockcount(xfs_bmbt_rec_host_t * r,xfs_filblks_t v)345 xfs_bmbt_set_blockcount(
346 	xfs_bmbt_rec_host_t *r,
347 	xfs_filblks_t	v)
348 {
349 	ASSERT((v & xfs_mask64hi(43)) == 0);
350 	r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64hi(43)) |
351 		  (xfs_bmbt_rec_base_t)(v & xfs_mask64lo(21));
352 }
353 
354 /*
355  * Set the startblock field in a bmap extent record.
356  */
357 void
xfs_bmbt_set_startblock(xfs_bmbt_rec_host_t * r,xfs_fsblock_t v)358 xfs_bmbt_set_startblock(
359 	xfs_bmbt_rec_host_t *r,
360 	xfs_fsblock_t	v)
361 {
362 #if XFS_BIG_BLKNOS
363 	ASSERT((v & xfs_mask64hi(12)) == 0);
364 	r->l0 = (r->l0 & (xfs_bmbt_rec_base_t)xfs_mask64hi(55)) |
365 		  (xfs_bmbt_rec_base_t)(v >> 43);
366 	r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21)) |
367 		  (xfs_bmbt_rec_base_t)(v << 21);
368 #else	/* !XFS_BIG_BLKNOS */
369 	if (isnullstartblock(v)) {
370 		r->l0 |= (xfs_bmbt_rec_base_t)xfs_mask64lo(9);
371 		r->l1 = (xfs_bmbt_rec_base_t)xfs_mask64hi(11) |
372 			  ((xfs_bmbt_rec_base_t)v << 21) |
373 			  (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
374 	} else {
375 		r->l0 &= ~(xfs_bmbt_rec_base_t)xfs_mask64lo(9);
376 		r->l1 = ((xfs_bmbt_rec_base_t)v << 21) |
377 			  (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
378 	}
379 #endif	/* XFS_BIG_BLKNOS */
380 }
381 
382 /*
383  * Set the startoff field in a bmap extent record.
384  */
385 void
xfs_bmbt_set_startoff(xfs_bmbt_rec_host_t * r,xfs_fileoff_t v)386 xfs_bmbt_set_startoff(
387 	xfs_bmbt_rec_host_t *r,
388 	xfs_fileoff_t	v)
389 {
390 	ASSERT((v & xfs_mask64hi(9)) == 0);
391 	r->l0 = (r->l0 & (xfs_bmbt_rec_base_t) xfs_mask64hi(1)) |
392 		((xfs_bmbt_rec_base_t)v << 9) |
393 		  (r->l0 & (xfs_bmbt_rec_base_t)xfs_mask64lo(9));
394 }
395 
396 /*
397  * Set the extent state field in a bmap extent record.
398  */
399 void
xfs_bmbt_set_state(xfs_bmbt_rec_host_t * r,xfs_exntst_t v)400 xfs_bmbt_set_state(
401 	xfs_bmbt_rec_host_t *r,
402 	xfs_exntst_t	v)
403 {
404 	ASSERT(v == XFS_EXT_NORM || v == XFS_EXT_UNWRITTEN);
405 	if (v == XFS_EXT_NORM)
406 		r->l0 &= xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN);
407 	else
408 		r->l0 |= xfs_mask64hi(BMBT_EXNTFLAG_BITLEN);
409 }
410 
411 /*
412  * Convert in-memory form of btree root to on-disk form.
413  */
414 void
xfs_bmbt_to_bmdr(struct xfs_mount * mp,struct xfs_btree_block * rblock,int rblocklen,xfs_bmdr_block_t * dblock,int dblocklen)415 xfs_bmbt_to_bmdr(
416 	struct xfs_mount	*mp,
417 	struct xfs_btree_block	*rblock,
418 	int			rblocklen,
419 	xfs_bmdr_block_t	*dblock,
420 	int			dblocklen)
421 {
422 	int			dmxr;
423 	xfs_bmbt_key_t		*fkp;
424 	__be64			*fpp;
425 	xfs_bmbt_key_t		*tkp;
426 	__be64			*tpp;
427 
428 	ASSERT(be32_to_cpu(rblock->bb_magic) == XFS_BMAP_MAGIC);
429 	ASSERT(be64_to_cpu(rblock->bb_u.l.bb_leftsib) == NULLDFSBNO);
430 	ASSERT(be64_to_cpu(rblock->bb_u.l.bb_rightsib) == NULLDFSBNO);
431 	ASSERT(be16_to_cpu(rblock->bb_level) > 0);
432 	dblock->bb_level = rblock->bb_level;
433 	dblock->bb_numrecs = rblock->bb_numrecs;
434 	dmxr = xfs_bmdr_maxrecs(mp, dblocklen, 0);
435 	fkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
436 	tkp = XFS_BMDR_KEY_ADDR(dblock, 1);
437 	fpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
438 	tpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
439 	dmxr = be16_to_cpu(dblock->bb_numrecs);
440 	memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
441 	memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
442 }
443 
444 /*
445  * Check extent records, which have just been read, for
446  * any bit in the extent flag field. ASSERT on debug
447  * kernels, as this condition should not occur.
448  * Return an error condition (1) if any flags found,
449  * otherwise return 0.
450  */
451 
452 int
xfs_check_nostate_extents(xfs_ifork_t * ifp,xfs_extnum_t idx,xfs_extnum_t num)453 xfs_check_nostate_extents(
454 	xfs_ifork_t		*ifp,
455 	xfs_extnum_t		idx,
456 	xfs_extnum_t		num)
457 {
458 	for (; num > 0; num--, idx++) {
459 		xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx);
460 		if ((ep->l0 >>
461 		     (64 - BMBT_EXNTFLAG_BITLEN)) != 0) {
462 			ASSERT(0);
463 			return 1;
464 		}
465 	}
466 	return 0;
467 }
468 
469 
470 STATIC struct xfs_btree_cur *
xfs_bmbt_dup_cursor(struct xfs_btree_cur * cur)471 xfs_bmbt_dup_cursor(
472 	struct xfs_btree_cur	*cur)
473 {
474 	struct xfs_btree_cur	*new;
475 
476 	new = xfs_bmbt_init_cursor(cur->bc_mp, cur->bc_tp,
477 			cur->bc_private.b.ip, cur->bc_private.b.whichfork);
478 
479 	/*
480 	 * Copy the firstblock, flist, and flags values,
481 	 * since init cursor doesn't get them.
482 	 */
483 	new->bc_private.b.firstblock = cur->bc_private.b.firstblock;
484 	new->bc_private.b.flist = cur->bc_private.b.flist;
485 	new->bc_private.b.flags = cur->bc_private.b.flags;
486 
487 	return new;
488 }
489 
490 STATIC void
xfs_bmbt_update_cursor(struct xfs_btree_cur * src,struct xfs_btree_cur * dst)491 xfs_bmbt_update_cursor(
492 	struct xfs_btree_cur	*src,
493 	struct xfs_btree_cur	*dst)
494 {
495 	ASSERT((dst->bc_private.b.firstblock != NULLFSBLOCK) ||
496 	       (dst->bc_private.b.ip->i_d.di_flags & XFS_DIFLAG_REALTIME));
497 	ASSERT(dst->bc_private.b.flist == src->bc_private.b.flist);
498 
499 	dst->bc_private.b.allocated += src->bc_private.b.allocated;
500 	dst->bc_private.b.firstblock = src->bc_private.b.firstblock;
501 
502 	src->bc_private.b.allocated = 0;
503 }
504 
505 STATIC int
xfs_bmbt_alloc_block(struct xfs_btree_cur * cur,union xfs_btree_ptr * start,union xfs_btree_ptr * new,int length,int * stat)506 xfs_bmbt_alloc_block(
507 	struct xfs_btree_cur	*cur,
508 	union xfs_btree_ptr	*start,
509 	union xfs_btree_ptr	*new,
510 	int			length,
511 	int			*stat)
512 {
513 	xfs_alloc_arg_t		args;		/* block allocation args */
514 	int			error;		/* error return value */
515 
516 	memset(&args, 0, sizeof(args));
517 	args.tp = cur->bc_tp;
518 	args.mp = cur->bc_mp;
519 	args.fsbno = cur->bc_private.b.firstblock;
520 	args.firstblock = args.fsbno;
521 
522 	if (args.fsbno == NULLFSBLOCK) {
523 		args.fsbno = be64_to_cpu(start->l);
524 		args.type = XFS_ALLOCTYPE_START_BNO;
525 		/*
526 		 * Make sure there is sufficient room left in the AG to
527 		 * complete a full tree split for an extent insert.  If
528 		 * we are converting the middle part of an extent then
529 		 * we may need space for two tree splits.
530 		 *
531 		 * We are relying on the caller to make the correct block
532 		 * reservation for this operation to succeed.  If the
533 		 * reservation amount is insufficient then we may fail a
534 		 * block allocation here and corrupt the filesystem.
535 		 */
536 		args.minleft = xfs_trans_get_block_res(args.tp);
537 	} else if (cur->bc_private.b.flist->xbf_low) {
538 		args.type = XFS_ALLOCTYPE_START_BNO;
539 	} else {
540 		args.type = XFS_ALLOCTYPE_NEAR_BNO;
541 	}
542 
543 	args.minlen = args.maxlen = args.prod = 1;
544 	args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL;
545 	if (!args.wasdel && xfs_trans_get_block_res(args.tp) == 0) {
546 		error = XFS_ERROR(ENOSPC);
547 		goto error0;
548 	}
549 	error = xfs_alloc_vextent(&args);
550 	if (error)
551 		goto error0;
552 
553 	if (args.fsbno == NULLFSBLOCK && args.minleft) {
554 		/*
555 		 * Could not find an AG with enough free space to satisfy
556 		 * a full btree split.  Try again without minleft and if
557 		 * successful activate the lowspace algorithm.
558 		 */
559 		args.fsbno = 0;
560 		args.type = XFS_ALLOCTYPE_FIRST_AG;
561 		args.minleft = 0;
562 		error = xfs_alloc_vextent(&args);
563 		if (error)
564 			goto error0;
565 		cur->bc_private.b.flist->xbf_low = 1;
566 	}
567 	if (args.fsbno == NULLFSBLOCK) {
568 		XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
569 		*stat = 0;
570 		return 0;
571 	}
572 	ASSERT(args.len == 1);
573 	cur->bc_private.b.firstblock = args.fsbno;
574 	cur->bc_private.b.allocated++;
575 	cur->bc_private.b.ip->i_d.di_nblocks++;
576 	xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
577 	xfs_trans_mod_dquot_byino(args.tp, cur->bc_private.b.ip,
578 			XFS_TRANS_DQ_BCOUNT, 1L);
579 
580 	new->l = cpu_to_be64(args.fsbno);
581 
582 	XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
583 	*stat = 1;
584 	return 0;
585 
586  error0:
587 	XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
588 	return error;
589 }
590 
591 STATIC int
xfs_bmbt_free_block(struct xfs_btree_cur * cur,struct xfs_buf * bp)592 xfs_bmbt_free_block(
593 	struct xfs_btree_cur	*cur,
594 	struct xfs_buf		*bp)
595 {
596 	struct xfs_mount	*mp = cur->bc_mp;
597 	struct xfs_inode	*ip = cur->bc_private.b.ip;
598 	struct xfs_trans	*tp = cur->bc_tp;
599 	xfs_fsblock_t		fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
600 
601 	xfs_bmap_add_free(fsbno, 1, cur->bc_private.b.flist, mp);
602 	ip->i_d.di_nblocks--;
603 
604 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
605 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
606 	xfs_trans_binval(tp, bp);
607 	return 0;
608 }
609 
610 STATIC int
xfs_bmbt_get_minrecs(struct xfs_btree_cur * cur,int level)611 xfs_bmbt_get_minrecs(
612 	struct xfs_btree_cur	*cur,
613 	int			level)
614 {
615 	if (level == cur->bc_nlevels - 1) {
616 		struct xfs_ifork	*ifp;
617 
618 		ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
619 				    cur->bc_private.b.whichfork);
620 
621 		return xfs_bmbt_maxrecs(cur->bc_mp,
622 					ifp->if_broot_bytes, level == 0) / 2;
623 	}
624 
625 	return cur->bc_mp->m_bmap_dmnr[level != 0];
626 }
627 
628 int
xfs_bmbt_get_maxrecs(struct xfs_btree_cur * cur,int level)629 xfs_bmbt_get_maxrecs(
630 	struct xfs_btree_cur	*cur,
631 	int			level)
632 {
633 	if (level == cur->bc_nlevels - 1) {
634 		struct xfs_ifork	*ifp;
635 
636 		ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
637 				    cur->bc_private.b.whichfork);
638 
639 		return xfs_bmbt_maxrecs(cur->bc_mp,
640 					ifp->if_broot_bytes, level == 0);
641 	}
642 
643 	return cur->bc_mp->m_bmap_dmxr[level != 0];
644 
645 }
646 
647 /*
648  * Get the maximum records we could store in the on-disk format.
649  *
650  * For non-root nodes this is equivalent to xfs_bmbt_get_maxrecs, but
651  * for the root node this checks the available space in the dinode fork
652  * so that we can resize the in-memory buffer to match it.  After a
653  * resize to the maximum size this function returns the same value
654  * as xfs_bmbt_get_maxrecs for the root node, too.
655  */
656 STATIC int
xfs_bmbt_get_dmaxrecs(struct xfs_btree_cur * cur,int level)657 xfs_bmbt_get_dmaxrecs(
658 	struct xfs_btree_cur	*cur,
659 	int			level)
660 {
661 	if (level != cur->bc_nlevels - 1)
662 		return cur->bc_mp->m_bmap_dmxr[level != 0];
663 	return xfs_bmdr_maxrecs(cur->bc_mp, cur->bc_private.b.forksize,
664 				level == 0);
665 }
666 
667 STATIC void
xfs_bmbt_init_key_from_rec(union xfs_btree_key * key,union xfs_btree_rec * rec)668 xfs_bmbt_init_key_from_rec(
669 	union xfs_btree_key	*key,
670 	union xfs_btree_rec	*rec)
671 {
672 	key->bmbt.br_startoff =
673 		cpu_to_be64(xfs_bmbt_disk_get_startoff(&rec->bmbt));
674 }
675 
676 STATIC void
xfs_bmbt_init_rec_from_key(union xfs_btree_key * key,union xfs_btree_rec * rec)677 xfs_bmbt_init_rec_from_key(
678 	union xfs_btree_key	*key,
679 	union xfs_btree_rec	*rec)
680 {
681 	ASSERT(key->bmbt.br_startoff != 0);
682 
683 	xfs_bmbt_disk_set_allf(&rec->bmbt, be64_to_cpu(key->bmbt.br_startoff),
684 			       0, 0, XFS_EXT_NORM);
685 }
686 
687 STATIC void
xfs_bmbt_init_rec_from_cur(struct xfs_btree_cur * cur,union xfs_btree_rec * rec)688 xfs_bmbt_init_rec_from_cur(
689 	struct xfs_btree_cur	*cur,
690 	union xfs_btree_rec	*rec)
691 {
692 	xfs_bmbt_disk_set_all(&rec->bmbt, &cur->bc_rec.b);
693 }
694 
695 STATIC void
xfs_bmbt_init_ptr_from_cur(struct xfs_btree_cur * cur,union xfs_btree_ptr * ptr)696 xfs_bmbt_init_ptr_from_cur(
697 	struct xfs_btree_cur	*cur,
698 	union xfs_btree_ptr	*ptr)
699 {
700 	ptr->l = 0;
701 }
702 
703 STATIC __int64_t
xfs_bmbt_key_diff(struct xfs_btree_cur * cur,union xfs_btree_key * key)704 xfs_bmbt_key_diff(
705 	struct xfs_btree_cur	*cur,
706 	union xfs_btree_key	*key)
707 {
708 	return (__int64_t)be64_to_cpu(key->bmbt.br_startoff) -
709 				      cur->bc_rec.b.br_startoff;
710 }
711 
712 #ifdef DEBUG
713 STATIC int
xfs_bmbt_keys_inorder(struct xfs_btree_cur * cur,union xfs_btree_key * k1,union xfs_btree_key * k2)714 xfs_bmbt_keys_inorder(
715 	struct xfs_btree_cur	*cur,
716 	union xfs_btree_key	*k1,
717 	union xfs_btree_key	*k2)
718 {
719 	return be64_to_cpu(k1->bmbt.br_startoff) <
720 		be64_to_cpu(k2->bmbt.br_startoff);
721 }
722 
723 STATIC int
xfs_bmbt_recs_inorder(struct xfs_btree_cur * cur,union xfs_btree_rec * r1,union xfs_btree_rec * r2)724 xfs_bmbt_recs_inorder(
725 	struct xfs_btree_cur	*cur,
726 	union xfs_btree_rec	*r1,
727 	union xfs_btree_rec	*r2)
728 {
729 	return xfs_bmbt_disk_get_startoff(&r1->bmbt) +
730 		xfs_bmbt_disk_get_blockcount(&r1->bmbt) <=
731 		xfs_bmbt_disk_get_startoff(&r2->bmbt);
732 }
733 #endif	/* DEBUG */
734 
735 #ifdef XFS_BTREE_TRACE
736 ktrace_t	*xfs_bmbt_trace_buf;
737 
738 STATIC void
xfs_bmbt_trace_enter(struct xfs_btree_cur * cur,const char * func,char * s,int type,int line,__psunsigned_t a0,__psunsigned_t a1,__psunsigned_t a2,__psunsigned_t a3,__psunsigned_t a4,__psunsigned_t a5,__psunsigned_t a6,__psunsigned_t a7,__psunsigned_t a8,__psunsigned_t a9,__psunsigned_t a10)739 xfs_bmbt_trace_enter(
740 	struct xfs_btree_cur	*cur,
741 	const char		*func,
742 	char			*s,
743 	int			type,
744 	int			line,
745 	__psunsigned_t		a0,
746 	__psunsigned_t		a1,
747 	__psunsigned_t		a2,
748 	__psunsigned_t		a3,
749 	__psunsigned_t		a4,
750 	__psunsigned_t		a5,
751 	__psunsigned_t		a6,
752 	__psunsigned_t		a7,
753 	__psunsigned_t		a8,
754 	__psunsigned_t		a9,
755 	__psunsigned_t		a10)
756 {
757 	struct xfs_inode	*ip = cur->bc_private.b.ip;
758 	int			whichfork = cur->bc_private.b.whichfork;
759 
760 	ktrace_enter(xfs_bmbt_trace_buf,
761 		(void *)((__psint_t)type | (whichfork << 8) | (line << 16)),
762 		(void *)func, (void *)s, (void *)ip, (void *)cur,
763 		(void *)a0, (void *)a1, (void *)a2, (void *)a3,
764 		(void *)a4, (void *)a5, (void *)a6, (void *)a7,
765 		(void *)a8, (void *)a9, (void *)a10);
766 }
767 
768 STATIC void
xfs_bmbt_trace_cursor(struct xfs_btree_cur * cur,__uint32_t * s0,__uint64_t * l0,__uint64_t * l1)769 xfs_bmbt_trace_cursor(
770 	struct xfs_btree_cur	*cur,
771 	__uint32_t		*s0,
772 	__uint64_t		*l0,
773 	__uint64_t		*l1)
774 {
775 	struct xfs_bmbt_rec_host r;
776 
777 	xfs_bmbt_set_all(&r, &cur->bc_rec.b);
778 
779 	*s0 = (cur->bc_nlevels << 24) |
780 	      (cur->bc_private.b.flags << 16) |
781 	       cur->bc_private.b.allocated;
782 	*l0 = r.l0;
783 	*l1 = r.l1;
784 }
785 
786 STATIC void
xfs_bmbt_trace_key(struct xfs_btree_cur * cur,union xfs_btree_key * key,__uint64_t * l0,__uint64_t * l1)787 xfs_bmbt_trace_key(
788 	struct xfs_btree_cur	*cur,
789 	union xfs_btree_key	*key,
790 	__uint64_t		*l0,
791 	__uint64_t		*l1)
792 {
793 	*l0 = be64_to_cpu(key->bmbt.br_startoff);
794 	*l1 = 0;
795 }
796 
797 /* Endian flipping versions of the bmbt extraction functions */
798 STATIC void
xfs_bmbt_disk_get_all(xfs_bmbt_rec_t * r,xfs_bmbt_irec_t * s)799 xfs_bmbt_disk_get_all(
800 	xfs_bmbt_rec_t	*r,
801 	xfs_bmbt_irec_t *s)
802 {
803 	__xfs_bmbt_get_all(get_unaligned_be64(&r->l0),
804 				get_unaligned_be64(&r->l1), s);
805 }
806 
807 STATIC void
xfs_bmbt_trace_record(struct xfs_btree_cur * cur,union xfs_btree_rec * rec,__uint64_t * l0,__uint64_t * l1,__uint64_t * l2)808 xfs_bmbt_trace_record(
809 	struct xfs_btree_cur	*cur,
810 	union xfs_btree_rec	*rec,
811 	__uint64_t		*l0,
812 	__uint64_t		*l1,
813 	__uint64_t		*l2)
814 {
815 	struct xfs_bmbt_irec	irec;
816 
817 	xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
818 	*l0 = irec.br_startoff;
819 	*l1 = irec.br_startblock;
820 	*l2 = irec.br_blockcount;
821 }
822 #endif /* XFS_BTREE_TRACE */
823 
824 static const struct xfs_btree_ops xfs_bmbt_ops = {
825 	.rec_len		= sizeof(xfs_bmbt_rec_t),
826 	.key_len		= sizeof(xfs_bmbt_key_t),
827 
828 	.dup_cursor		= xfs_bmbt_dup_cursor,
829 	.update_cursor		= xfs_bmbt_update_cursor,
830 	.alloc_block		= xfs_bmbt_alloc_block,
831 	.free_block		= xfs_bmbt_free_block,
832 	.get_maxrecs		= xfs_bmbt_get_maxrecs,
833 	.get_minrecs		= xfs_bmbt_get_minrecs,
834 	.get_dmaxrecs		= xfs_bmbt_get_dmaxrecs,
835 	.init_key_from_rec	= xfs_bmbt_init_key_from_rec,
836 	.init_rec_from_key	= xfs_bmbt_init_rec_from_key,
837 	.init_rec_from_cur	= xfs_bmbt_init_rec_from_cur,
838 	.init_ptr_from_cur	= xfs_bmbt_init_ptr_from_cur,
839 	.key_diff		= xfs_bmbt_key_diff,
840 
841 #ifdef DEBUG
842 	.keys_inorder		= xfs_bmbt_keys_inorder,
843 	.recs_inorder		= xfs_bmbt_recs_inorder,
844 #endif
845 
846 #ifdef XFS_BTREE_TRACE
847 	.trace_enter		= xfs_bmbt_trace_enter,
848 	.trace_cursor		= xfs_bmbt_trace_cursor,
849 	.trace_key		= xfs_bmbt_trace_key,
850 	.trace_record		= xfs_bmbt_trace_record,
851 #endif
852 };
853 
854 /*
855  * Allocate a new bmap btree cursor.
856  */
857 struct xfs_btree_cur *				/* new bmap btree cursor */
xfs_bmbt_init_cursor(struct xfs_mount * mp,struct xfs_trans * tp,struct xfs_inode * ip,int whichfork)858 xfs_bmbt_init_cursor(
859 	struct xfs_mount	*mp,		/* file system mount point */
860 	struct xfs_trans	*tp,		/* transaction pointer */
861 	struct xfs_inode	*ip,		/* inode owning the btree */
862 	int			whichfork)	/* data or attr fork */
863 {
864 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
865 	struct xfs_btree_cur	*cur;
866 
867 	cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);
868 
869 	cur->bc_tp = tp;
870 	cur->bc_mp = mp;
871 	cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
872 	cur->bc_btnum = XFS_BTNUM_BMAP;
873 	cur->bc_blocklog = mp->m_sb.sb_blocklog;
874 
875 	cur->bc_ops = &xfs_bmbt_ops;
876 	cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE;
877 
878 	cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork);
879 	cur->bc_private.b.ip = ip;
880 	cur->bc_private.b.firstblock = NULLFSBLOCK;
881 	cur->bc_private.b.flist = NULL;
882 	cur->bc_private.b.allocated = 0;
883 	cur->bc_private.b.flags = 0;
884 	cur->bc_private.b.whichfork = whichfork;
885 
886 	return cur;
887 }
888 
889 /*
890  * Calculate number of records in a bmap btree block.
891  */
892 int
xfs_bmbt_maxrecs(struct xfs_mount * mp,int blocklen,int leaf)893 xfs_bmbt_maxrecs(
894 	struct xfs_mount	*mp,
895 	int			blocklen,
896 	int			leaf)
897 {
898 	blocklen -= XFS_BMBT_BLOCK_LEN(mp);
899 
900 	if (leaf)
901 		return blocklen / sizeof(xfs_bmbt_rec_t);
902 	return blocklen / (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t));
903 }
904 
905 /*
906  * Calculate number of records in a bmap btree inode root.
907  */
908 int
xfs_bmdr_maxrecs(struct xfs_mount * mp,int blocklen,int leaf)909 xfs_bmdr_maxrecs(
910 	struct xfs_mount	*mp,
911 	int			blocklen,
912 	int			leaf)
913 {
914 	blocklen -= sizeof(xfs_bmdr_block_t);
915 
916 	if (leaf)
917 		return blocklen / sizeof(xfs_bmdr_rec_t);
918 	return blocklen / (sizeof(xfs_bmdr_key_t) + sizeof(xfs_bmdr_ptr_t));
919 }
920