1 /*
2  * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_mount.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_alloc_btree.h"
30 #include "xfs_ialloc_btree.h"
31 #include "xfs_dinode.h"
32 #include "xfs_inode.h"
33 #include "xfs_btree.h"
34 #include "xfs_alloc.h"
35 #include "xfs_error.h"
36 #include "xfs_trace.h"
37 
38 
39 STATIC struct xfs_btree_cur *
xfs_allocbt_dup_cursor(struct xfs_btree_cur * cur)40 xfs_allocbt_dup_cursor(
41 	struct xfs_btree_cur	*cur)
42 {
43 	return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp,
44 			cur->bc_private.a.agbp, cur->bc_private.a.agno,
45 			cur->bc_btnum);
46 }
47 
48 STATIC void
xfs_allocbt_set_root(struct xfs_btree_cur * cur,union xfs_btree_ptr * ptr,int inc)49 xfs_allocbt_set_root(
50 	struct xfs_btree_cur	*cur,
51 	union xfs_btree_ptr	*ptr,
52 	int			inc)
53 {
54 	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
55 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
56 	xfs_agnumber_t		seqno = be32_to_cpu(agf->agf_seqno);
57 	int			btnum = cur->bc_btnum;
58 	struct xfs_perag	*pag = xfs_perag_get(cur->bc_mp, seqno);
59 
60 	ASSERT(ptr->s != 0);
61 
62 	agf->agf_roots[btnum] = ptr->s;
63 	be32_add_cpu(&agf->agf_levels[btnum], inc);
64 	pag->pagf_levels[btnum] += inc;
65 	xfs_perag_put(pag);
66 
67 	xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
68 }
69 
70 STATIC int
xfs_allocbt_alloc_block(struct xfs_btree_cur * cur,union xfs_btree_ptr * start,union xfs_btree_ptr * new,int length,int * stat)71 xfs_allocbt_alloc_block(
72 	struct xfs_btree_cur	*cur,
73 	union xfs_btree_ptr	*start,
74 	union xfs_btree_ptr	*new,
75 	int			length,
76 	int			*stat)
77 {
78 	int			error;
79 	xfs_agblock_t		bno;
80 
81 	XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
82 
83 	/* Allocate the new block from the freelist. If we can't, give up.  */
84 	error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp,
85 				       &bno, 1);
86 	if (error) {
87 		XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
88 		return error;
89 	}
90 
91 	if (bno == NULLAGBLOCK) {
92 		XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
93 		*stat = 0;
94 		return 0;
95 	}
96 
97 	xfs_alloc_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1, false);
98 
99 	xfs_trans_agbtree_delta(cur->bc_tp, 1);
100 	new->s = cpu_to_be32(bno);
101 
102 	XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
103 	*stat = 1;
104 	return 0;
105 }
106 
107 STATIC int
xfs_allocbt_free_block(struct xfs_btree_cur * cur,struct xfs_buf * bp)108 xfs_allocbt_free_block(
109 	struct xfs_btree_cur	*cur,
110 	struct xfs_buf		*bp)
111 {
112 	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
113 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
114 	xfs_agblock_t		bno;
115 	int			error;
116 
117 	bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
118 	error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
119 	if (error)
120 		return error;
121 
122 	xfs_alloc_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
123 			      XFS_ALLOC_BUSY_SKIP_DISCARD);
124 	xfs_trans_agbtree_delta(cur->bc_tp, -1);
125 	return 0;
126 }
127 
128 /*
129  * Update the longest extent in the AGF
130  */
131 STATIC void
xfs_allocbt_update_lastrec(struct xfs_btree_cur * cur,struct xfs_btree_block * block,union xfs_btree_rec * rec,int ptr,int reason)132 xfs_allocbt_update_lastrec(
133 	struct xfs_btree_cur	*cur,
134 	struct xfs_btree_block	*block,
135 	union xfs_btree_rec	*rec,
136 	int			ptr,
137 	int			reason)
138 {
139 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
140 	xfs_agnumber_t		seqno = be32_to_cpu(agf->agf_seqno);
141 	struct xfs_perag	*pag;
142 	__be32			len;
143 	int			numrecs;
144 
145 	ASSERT(cur->bc_btnum == XFS_BTNUM_CNT);
146 
147 	switch (reason) {
148 	case LASTREC_UPDATE:
149 		/*
150 		 * If this is the last leaf block and it's the last record,
151 		 * then update the size of the longest extent in the AG.
152 		 */
153 		if (ptr != xfs_btree_get_numrecs(block))
154 			return;
155 		len = rec->alloc.ar_blockcount;
156 		break;
157 	case LASTREC_INSREC:
158 		if (be32_to_cpu(rec->alloc.ar_blockcount) <=
159 		    be32_to_cpu(agf->agf_longest))
160 			return;
161 		len = rec->alloc.ar_blockcount;
162 		break;
163 	case LASTREC_DELREC:
164 		numrecs = xfs_btree_get_numrecs(block);
165 		if (ptr <= numrecs)
166 			return;
167 		ASSERT(ptr == numrecs + 1);
168 
169 		if (numrecs) {
170 			xfs_alloc_rec_t *rrp;
171 
172 			rrp = XFS_ALLOC_REC_ADDR(cur->bc_mp, block, numrecs);
173 			len = rrp->ar_blockcount;
174 		} else {
175 			len = 0;
176 		}
177 
178 		break;
179 	default:
180 		ASSERT(0);
181 		return;
182 	}
183 
184 	agf->agf_longest = len;
185 	pag = xfs_perag_get(cur->bc_mp, seqno);
186 	pag->pagf_longest = be32_to_cpu(len);
187 	xfs_perag_put(pag);
188 	xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, XFS_AGF_LONGEST);
189 }
190 
191 STATIC int
xfs_allocbt_get_minrecs(struct xfs_btree_cur * cur,int level)192 xfs_allocbt_get_minrecs(
193 	struct xfs_btree_cur	*cur,
194 	int			level)
195 {
196 	return cur->bc_mp->m_alloc_mnr[level != 0];
197 }
198 
199 STATIC int
xfs_allocbt_get_maxrecs(struct xfs_btree_cur * cur,int level)200 xfs_allocbt_get_maxrecs(
201 	struct xfs_btree_cur	*cur,
202 	int			level)
203 {
204 	return cur->bc_mp->m_alloc_mxr[level != 0];
205 }
206 
207 STATIC void
xfs_allocbt_init_key_from_rec(union xfs_btree_key * key,union xfs_btree_rec * rec)208 xfs_allocbt_init_key_from_rec(
209 	union xfs_btree_key	*key,
210 	union xfs_btree_rec	*rec)
211 {
212 	ASSERT(rec->alloc.ar_startblock != 0);
213 
214 	key->alloc.ar_startblock = rec->alloc.ar_startblock;
215 	key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
216 }
217 
218 STATIC void
xfs_allocbt_init_rec_from_key(union xfs_btree_key * key,union xfs_btree_rec * rec)219 xfs_allocbt_init_rec_from_key(
220 	union xfs_btree_key	*key,
221 	union xfs_btree_rec	*rec)
222 {
223 	ASSERT(key->alloc.ar_startblock != 0);
224 
225 	rec->alloc.ar_startblock = key->alloc.ar_startblock;
226 	rec->alloc.ar_blockcount = key->alloc.ar_blockcount;
227 }
228 
229 STATIC void
xfs_allocbt_init_rec_from_cur(struct xfs_btree_cur * cur,union xfs_btree_rec * rec)230 xfs_allocbt_init_rec_from_cur(
231 	struct xfs_btree_cur	*cur,
232 	union xfs_btree_rec	*rec)
233 {
234 	ASSERT(cur->bc_rec.a.ar_startblock != 0);
235 
236 	rec->alloc.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock);
237 	rec->alloc.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount);
238 }
239 
240 STATIC void
xfs_allocbt_init_ptr_from_cur(struct xfs_btree_cur * cur,union xfs_btree_ptr * ptr)241 xfs_allocbt_init_ptr_from_cur(
242 	struct xfs_btree_cur	*cur,
243 	union xfs_btree_ptr	*ptr)
244 {
245 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
246 
247 	ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
248 	ASSERT(agf->agf_roots[cur->bc_btnum] != 0);
249 
250 	ptr->s = agf->agf_roots[cur->bc_btnum];
251 }
252 
253 STATIC __int64_t
xfs_allocbt_key_diff(struct xfs_btree_cur * cur,union xfs_btree_key * key)254 xfs_allocbt_key_diff(
255 	struct xfs_btree_cur	*cur,
256 	union xfs_btree_key	*key)
257 {
258 	xfs_alloc_rec_incore_t	*rec = &cur->bc_rec.a;
259 	xfs_alloc_key_t		*kp = &key->alloc;
260 	__int64_t		diff;
261 
262 	if (cur->bc_btnum == XFS_BTNUM_BNO) {
263 		return (__int64_t)be32_to_cpu(kp->ar_startblock) -
264 				rec->ar_startblock;
265 	}
266 
267 	diff = (__int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount;
268 	if (diff)
269 		return diff;
270 
271 	return (__int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
272 }
273 
274 #ifdef DEBUG
275 STATIC int
xfs_allocbt_keys_inorder(struct xfs_btree_cur * cur,union xfs_btree_key * k1,union xfs_btree_key * k2)276 xfs_allocbt_keys_inorder(
277 	struct xfs_btree_cur	*cur,
278 	union xfs_btree_key	*k1,
279 	union xfs_btree_key	*k2)
280 {
281 	if (cur->bc_btnum == XFS_BTNUM_BNO) {
282 		return be32_to_cpu(k1->alloc.ar_startblock) <
283 		       be32_to_cpu(k2->alloc.ar_startblock);
284 	} else {
285 		return be32_to_cpu(k1->alloc.ar_blockcount) <
286 			be32_to_cpu(k2->alloc.ar_blockcount) ||
287 			(k1->alloc.ar_blockcount == k2->alloc.ar_blockcount &&
288 			 be32_to_cpu(k1->alloc.ar_startblock) <
289 			 be32_to_cpu(k2->alloc.ar_startblock));
290 	}
291 }
292 
293 STATIC int
xfs_allocbt_recs_inorder(struct xfs_btree_cur * cur,union xfs_btree_rec * r1,union xfs_btree_rec * r2)294 xfs_allocbt_recs_inorder(
295 	struct xfs_btree_cur	*cur,
296 	union xfs_btree_rec	*r1,
297 	union xfs_btree_rec	*r2)
298 {
299 	if (cur->bc_btnum == XFS_BTNUM_BNO) {
300 		return be32_to_cpu(r1->alloc.ar_startblock) +
301 			be32_to_cpu(r1->alloc.ar_blockcount) <=
302 			be32_to_cpu(r2->alloc.ar_startblock);
303 	} else {
304 		return be32_to_cpu(r1->alloc.ar_blockcount) <
305 			be32_to_cpu(r2->alloc.ar_blockcount) ||
306 			(r1->alloc.ar_blockcount == r2->alloc.ar_blockcount &&
307 			 be32_to_cpu(r1->alloc.ar_startblock) <
308 			 be32_to_cpu(r2->alloc.ar_startblock));
309 	}
310 }
311 #endif	/* DEBUG */
312 
313 static const struct xfs_btree_ops xfs_allocbt_ops = {
314 	.rec_len		= sizeof(xfs_alloc_rec_t),
315 	.key_len		= sizeof(xfs_alloc_key_t),
316 
317 	.dup_cursor		= xfs_allocbt_dup_cursor,
318 	.set_root		= xfs_allocbt_set_root,
319 	.alloc_block		= xfs_allocbt_alloc_block,
320 	.free_block		= xfs_allocbt_free_block,
321 	.update_lastrec		= xfs_allocbt_update_lastrec,
322 	.get_minrecs		= xfs_allocbt_get_minrecs,
323 	.get_maxrecs		= xfs_allocbt_get_maxrecs,
324 	.init_key_from_rec	= xfs_allocbt_init_key_from_rec,
325 	.init_rec_from_key	= xfs_allocbt_init_rec_from_key,
326 	.init_rec_from_cur	= xfs_allocbt_init_rec_from_cur,
327 	.init_ptr_from_cur	= xfs_allocbt_init_ptr_from_cur,
328 	.key_diff		= xfs_allocbt_key_diff,
329 #ifdef DEBUG
330 	.keys_inorder		= xfs_allocbt_keys_inorder,
331 	.recs_inorder		= xfs_allocbt_recs_inorder,
332 #endif
333 };
334 
335 /*
336  * Allocate a new allocation btree cursor.
337  */
338 struct xfs_btree_cur *			/* new alloc btree cursor */
xfs_allocbt_init_cursor(struct xfs_mount * mp,struct xfs_trans * tp,struct xfs_buf * agbp,xfs_agnumber_t agno,xfs_btnum_t btnum)339 xfs_allocbt_init_cursor(
340 	struct xfs_mount	*mp,		/* file system mount point */
341 	struct xfs_trans	*tp,		/* transaction pointer */
342 	struct xfs_buf		*agbp,		/* buffer for agf structure */
343 	xfs_agnumber_t		agno,		/* allocation group number */
344 	xfs_btnum_t		btnum)		/* btree identifier */
345 {
346 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
347 	struct xfs_btree_cur	*cur;
348 
349 	ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
350 
351 	cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);
352 
353 	cur->bc_tp = tp;
354 	cur->bc_mp = mp;
355 	cur->bc_btnum = btnum;
356 	cur->bc_blocklog = mp->m_sb.sb_blocklog;
357 	cur->bc_ops = &xfs_allocbt_ops;
358 
359 	if (btnum == XFS_BTNUM_CNT) {
360 		cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
361 		cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
362 	} else {
363 		cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
364 	}
365 
366 	cur->bc_private.a.agbp = agbp;
367 	cur->bc_private.a.agno = agno;
368 
369 	return cur;
370 }
371 
372 /*
373  * Calculate number of records in an alloc btree block.
374  */
375 int
xfs_allocbt_maxrecs(struct xfs_mount * mp,int blocklen,int leaf)376 xfs_allocbt_maxrecs(
377 	struct xfs_mount	*mp,
378 	int			blocklen,
379 	int			leaf)
380 {
381 	blocklen -= XFS_ALLOC_BLOCK_LEN(mp);
382 
383 	if (leaf)
384 		return blocklen / sizeof(xfs_alloc_rec_t);
385 	return blocklen / (sizeof(xfs_alloc_key_t) + sizeof(xfs_alloc_ptr_t));
386 }
387